Skip to content

Commit 2310ced

Browse files
committed
[mlir][NFC] Update textual references of func to func.func in examples+python scripts
The special case parsing of `func` operations is being removed.
1 parent 186d5c8 commit 2310ced

File tree

37 files changed

+533
-531
lines changed

37 files changed

+533
-531
lines changed

mlir/docs/BufferDeallocationInternals.md

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ writes needs to dominate all buffer reads.
3939
Example for breaking the invariant:
4040

4141
```mlir
42-
func @condBranch(%arg0: i1, %arg1: memref<2xf32>) {
42+
func.func @condBranch(%arg0: i1, %arg1: memref<2xf32>) {
4343
%0 = memref.alloc() : memref<2xf32>
4444
cf.cond_br %arg0, ^bb1, ^bb2
4545
^bb1:
@@ -71,7 +71,7 @@ BufferDeallocation is fully compatible with “hybrid” setups in which tracked
7171
untracked allocations are mixed:
7272

7373
```mlir
74-
func @mixedAllocation(%arg0: i1) {
74+
func.func @mixedAllocation(%arg0: i1) {
7575
%0 = memref.alloca() : memref<2xf32> // aliases: %2
7676
%1 = memref.alloc() : memref<2xf32> // aliases: %2
7777
cf.cond_br %arg0, ^bb1, ^bb2
@@ -128,7 +128,7 @@ BufferHoisting pass:
128128
![branch_example_pre_move](/includes/img/branch_example_pre_move.svg)
129129

130130
```mlir
131-
func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
131+
func.func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
132132
cf.cond_br %arg0, ^bb1, ^bb2
133133
^bb1:
134134
cf.br ^bb3(%arg1 : memref<2xf32>)
@@ -148,7 +148,7 @@ of code:
148148
![branch_example_post_move](/includes/img/branch_example_post_move.svg)
149149

150150
```mlir
151-
func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
151+
func.func @condBranch(%arg0: i1, %arg1: memref<2xf32>, %arg2: memref<2xf32>) {
152152
%0 = memref.alloc() : memref<2xf32> // moved to bb0
153153
cf.cond_br %arg0, ^bb1, ^bb2
154154
^bb1:
@@ -170,7 +170,7 @@ Due to the data dependency of the allocation to %0, we cannot move the
170170
allocation out of bb2 in this case:
171171

172172
```mlir
173-
func @condBranchDynamicType(
173+
func.func @condBranchDynamicType(
174174
%arg0: i1,
175175
%arg1: memref<?xf32>,
176176
%arg2: memref<?xf32>,
@@ -199,7 +199,7 @@ copies to eliminate them. Consider the following example in which the
199199
allocations have already been placed:
200200

201201
```mlir
202-
func @branch(%arg0: i1) {
202+
func.func @branch(%arg0: i1) {
203203
%0 = memref.alloc() : memref<2xf32> // aliases: %2
204204
cf.cond_br %arg0, ^bb1, ^bb2
205205
^bb1:
@@ -231,7 +231,7 @@ Applying the BufferDeallocation pass to the program above yields the following
231231
result:
232232

233233
```mlir
234-
func @branch(%arg0: i1) {
234+
func.func @branch(%arg0: i1) {
235235
%0 = memref.alloc() : memref<2xf32>
236236
cf.cond_br %arg0, ^bb1, ^bb2
237237
^bb1:
@@ -268,7 +268,7 @@ and non-critical aliases:
268268
![nested_branch_example_pre_move](/includes/img/nested_branch_example_pre_move.svg)
269269

270270
```mlir
271-
func @condBranchDynamicTypeNested(
271+
func.func @condBranchDynamicTypeNested(
272272
%arg0: i1,
273273
%arg1: memref<?xf32>, // aliases: %3, %4
274274
%arg2: memref<?xf32>,
@@ -301,7 +301,7 @@ Applying BufferDeallocation yields the following output:
301301
![nested_branch_example_post_move](/includes/img/nested_branch_example_post_move.svg)
302302

303303
```mlir
304-
func @condBranchDynamicTypeNested(
304+
func.func @condBranchDynamicTypeNested(
305305
%arg0: i1,
306306
%arg1: memref<?xf32>,
307307
%arg2: memref<?xf32>,
@@ -379,7 +379,7 @@ the `RegionBranchOpInterface` to determine predecessors in order to infer the
379379
high-level control flow:
380380

381381
```mlir
382-
func @inner_region_control_flow(
382+
func.func @inner_region_control_flow(
383383
%arg0 : index,
384384
%arg1 : index) -> memref<?x?xf32> {
385385
%0 = memref.alloc(%arg0, %arg0) : memref<?x?xf32>
@@ -403,7 +403,7 @@ dialect-specific operations. BufferDeallocation supports this behavior via the
403403
operation to determine the value of %2 at runtime which creates an alias:
404404

405405
```mlir
406-
func @nested_region_control_flow(%arg0 : index, %arg1 : index) -> memref<?x?xf32> {
406+
func.func @nested_region_control_flow(%arg0 : index, %arg1 : index) -> memref<?x?xf32> {
407407
%0 = arith.cmpi "eq", %arg0, %arg1 : index
408408
%1 = memref.alloc(%arg0, %arg0) : memref<?x?xf32>
409409
%2 = scf.if %0 -> (memref<?x?xf32>) {
@@ -424,7 +424,7 @@ block since it cannot be accessed by the remainder of the program. Accessing the
424424
%1 which does not need to be tracked.
425425

426426
```mlir
427-
func @nested_region_control_flow(%arg0: index, %arg1: index) -> memref<?x?xf32> {
427+
func.func @nested_region_control_flow(%arg0: index, %arg1: index) -> memref<?x?xf32> {
428428
%0 = arith.cmpi "eq", %arg0, %arg1 : index
429429
%1 = memref.alloc(%arg0, %arg0) : memref<?x?xf32>
430430
%2 = scf.if %0 -> (memref<?x?xf32>) {
@@ -448,7 +448,7 @@ Reconsider a slightly adapted version of the “custom.region_if” example from
448448
above that uses a nested allocation:
449449

450450
```mlir
451-
func @inner_region_control_flow_div(
451+
func.func @inner_region_control_flow_div(
452452
%arg0 : index,
453453
%arg1 : index) -> memref<?x?xf32> {
454454
%0 = memref.alloc(%arg0, %arg0) : memref<?x?xf32>
@@ -471,7 +471,7 @@ Furthermore, %arg4 is returned to its parent operation and has an alias %1. This
471471
causes BufferDeallocation to introduce additional copies:
472472

473473
```mlir
474-
func @inner_region_control_flow_div(
474+
func.func @inner_region_control_flow_div(
475475
%arg0 : index,
476476
%arg1 : index) -> memref<?x?xf32> {
477477
%0 = memref.alloc(%arg0, %arg0) : memref<?x?xf32>
@@ -509,7 +509,7 @@ Consider the following “scf.for” use case containing a nested structured
509509
control-flow if:
510510

511511
```mlir
512-
func @loop_nested_if(
512+
func.func @loop_nested_if(
513513
%lb: index,
514514
%ub: index,
515515
%step: index,
@@ -547,7 +547,7 @@ buffer, we have to free the buffer from the previous iteration to avoid memory
547547
leaks:
548548

549549
```mlir
550-
func @loop_nested_if(
550+
func.func @loop_nested_if(
551551
%lb: index,
552552
%ub: index,
553553
%step: index,
@@ -624,7 +624,7 @@ analysis of this sample reveals that the highlighted operations are redundant
624624
and can be removed.
625625

626626
```mlir
627-
func @dynamic_allocation(%arg0: index, %arg1: index) -> memref<?x?xf32> {
627+
func.func @dynamic_allocation(%arg0: index, %arg1: index) -> memref<?x?xf32> {
628628
%1 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
629629
%2 = bufferization.clone %1 : (memref<?x?xf32>) -> (memref<?x?xf32>)
630630
memref.dealloc %1 : memref<?x?xf32>
@@ -635,7 +635,7 @@ func @dynamic_allocation(%arg0: index, %arg1: index) -> memref<?x?xf32> {
635635
Will be transformed to:
636636

637637
```mlir
638-
func @dynamic_allocation(%arg0: index, %arg1: index) -> memref<?x?xf32> {
638+
func.func @dynamic_allocation(%arg0: index, %arg1: index) -> memref<?x?xf32> {
639639
%1 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
640640
return %1 : memref<?x?xf32>
641641
}
@@ -656,7 +656,7 @@ merged into a single step. Canonicalization removes the clone operation and
656656
%temp, and replaces the uses of %temp with %result:
657657

658658
```mlir
659-
func @reuseTarget(%arg0: memref<2xf32>, %result: memref<2xf32>){
659+
func.func @reuseTarget(%arg0: memref<2xf32>, %result: memref<2xf32>){
660660
%temp = memref.alloc() : memref<2xf32>
661661
test.generic {
662662
args_in = 1 : i64,
@@ -676,7 +676,7 @@ func @reuseTarget(%arg0: memref<2xf32>, %result: memref<2xf32>){
676676
Will be transformed to:
677677

678678
```mlir
679-
func @reuseTarget(%arg0: memref<2xf32>, %result: memref<2xf32>){
679+
func.func @reuseTarget(%arg0: memref<2xf32>, %result: memref<2xf32>){
680680
test.generic {
681681
args_in = 1 : i64,
682682
args_out = 1 : i64,

mlir/docs/Diagnostics.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ diagnostic. Example usage of this handler can be seen in the `mlir-opt` tool.
243243
$ mlir-opt foo.mlir
244244
245245
/tmp/test.mlir:6:24: error: expected non-function type
246-
func @foo() -> (index, ind) {
246+
func.func @foo() -> (index, ind) {
247247
^
248248
```
249249

@@ -306,12 +306,12 @@ A few examples are shown below:
306306

307307
```mlir
308308
// Expect an error on the same line.
309-
func @bad_branch() {
309+
func.func @bad_branch() {
310310
cf.br ^missing // expected-error {{reference to an undefined block}}
311311
}
312312
313313
// Expect an error on an adjacent line.
314-
func @foo(%a : f32) {
314+
func.func @foo(%a : f32) {
315315
// expected-error@+1 {{unknown comparison predicate "foo"}}
316316
%result = arith.cmpf "foo", %a, %a : f32
317317
return
@@ -320,10 +320,10 @@ func @foo(%a : f32) {
320320
// Expect an error on the next line that does not contain a designator.
321321
// expected-remark@below {{remark on function below}}
322322
// expected-remark@below {{another remark on function below}}
323-
func @bar(%a : f32)
323+
func.func @bar(%a : f32)
324324
325325
// Expect an error on the previous line that does not contain a designator.
326-
func @baz(%a : f32)
326+
func.func @baz(%a : f32)
327327
// expected-remark@above {{remark on function above}}
328328
// expected-remark@above {{another remark on function above}}
329329
@@ -336,7 +336,7 @@ any expected diagnostics weren't.
336336
$ mlir-opt foo.mlir
337337

338338
/tmp/test.mlir:6:24: error: unexpected error: expected non-function type
339-
func @foo() -> (index, ind) {
339+
func.func @foo() -> (index, ind) {
340340
^
341341

342342
/tmp/test.mlir:15:4: error: expected remark "expected some remark" was not produced

mlir/docs/Dialects/Linalg/_index.md

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ layout, and the second one is a `memref` of 4-element vectors with a 2-strided,
102102
// memory layouts
103103
#identity = affine_map<(d0) -> (d0)>
104104
105-
func @example(%A: memref<?xf32, #identity>,
105+
func.func @example(%A: memref<?xf32, #identity>,
106106
%B: memref<?xvector<4xf32>, offset: 1, strides: [2]>) {
107107
linalg.generic #attrs
108108
ins(%A: memref<?xf32, #identity>)
@@ -124,7 +124,7 @@ materialized by a lowering into a form that will resemble:
124124
// It's syntax can be found here: https://mlir.llvm.org/docs/Dialects/SCFDialect/
125125
#map0 = affine_map<(d0) -> (d0 * 2 + 1)>
126126
127-
func @example(%arg0: memref<?xf32>, %arg1: memref<?xvector<4xf32>, #map0>) {
127+
func.func @example(%arg0: memref<?xf32>, %arg1: memref<?xvector<4xf32>, #map0>) {
128128
%c0 = arith.constant 0 : index
129129
%c1 = arith.constant 1 : index
130130
%0 = memref.dim %arg0, %c0 : memref<?xf32>
@@ -186,7 +186,7 @@ uses an identity layout.
186186
iterator_types = ["parallel", "parallel"]
187187
}
188188
189-
func @example(%A: memref<8x?xf32, offset: 0, strides: [2, 2]>,
189+
func.func @example(%A: memref<8x?xf32, offset: 0, strides: [2, 2]>,
190190
%B: memref<?xvector<4xf32>>) {
191191
linalg.generic #attrs
192192
ins(%A: memref<8x?xf32, offset: 0, strides: [2, 2]>)
@@ -206,7 +206,7 @@ materialized by a lowering into a form that will resemble:
206206
// Run: mlir-opt example2.mlir -allow-unregistered-dialect -convert-linalg-to-loops
207207
#map0 = affine_map<(d0, d1) -> (d0 * 2 + d1 * 2)>
208208
209-
func @example(%arg0: memref<8x?xf32, #map0>, %arg1: memref<?xvector<4xf32>>) {
209+
func.func @example(%arg0: memref<8x?xf32, #map0>, %arg1: memref<?xvector<4xf32>>) {
210210
%c8 = arith.constant 8 : index
211211
%c0 = arith.constant 0 : index
212212
%c1 = arith.constant 1 : index
@@ -309,7 +309,7 @@ be when using a concrete operation `addf`:
309309
iterator_types = ["parallel", "parallel"]
310310
}
311311
312-
func @example(%A: memref<?x?xf32>, %B: memref<?x?xf32>, %C: memref<?x?xf32>) {
312+
func.func @example(%A: memref<?x?xf32>, %B: memref<?x?xf32>, %C: memref<?x?xf32>) {
313313
linalg.generic #attrs
314314
ins(%A, %B: memref<?x?xf32>, memref<?x?xf32>)
315315
outs(%C: memref<?x?xf32>) {
@@ -329,7 +329,7 @@ The property "*The Compute Payload is Specified With a Region*" is materialized
329329
by a lowering into a form that will resemble:
330330

331331
```mlir
332-
func @example(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
332+
func.func @example(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
333333
%c0 = arith.constant 0 : index
334334
%c1 = arith.constant 1 : index
335335
%0 = memref.dim %arg0, %c0 : memref<?x?xf32>
@@ -382,7 +382,7 @@ call we intend to use:
382382
library_call = "pointwise_add"
383383
}
384384
385-
func @example(%A: memref<?x?xf32>, %B: memref<?x?xf32>, %C: memref<?x?xf32>) {
385+
func.func @example(%A: memref<?x?xf32>, %B: memref<?x?xf32>, %C: memref<?x?xf32>) {
386386
linalg.generic #attrs
387387
ins(%A, %B: memref<?x?xf32>, memref<?x?xf32>)
388388
outs(%C: memref<?x?xf32>) {
@@ -402,22 +402,22 @@ into a form that will resemble:
402402
403403
#map0 = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
404404
405-
func @example(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
405+
func.func @example(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
406406
%0 = memref.cast %arg0 : memref<?x?xf32> to memref<?x?xf32, #map0>
407407
%1 = memref.cast %arg1 : memref<?x?xf32> to memref<?x?xf32, #map0>
408408
%2 = memref.cast %arg2 : memref<?x?xf32> to memref<?x?xf32, #map0>
409409
call @pointwise_add(%0, %1, %2) : (memref<?x?xf32, #map0>, memref<?x?xf32, #map0>, memref<?x?xf32, #map0>) -> ()
410410
return
411411
}
412-
func @pointwise_add(memref<?x?xf32, #map0>, memref<?x?xf32, #map0>, memref<?x?xf32, #map0>) attributes {llvm.emit_c_interface}
412+
func.func @pointwise_add(memref<?x?xf32, #map0>, memref<?x?xf32, #map0>, memref<?x?xf32, #map0>) attributes {llvm.emit_c_interface}
413413
```
414414

415415
Which, after lowering to LLVM resembles:
416416

417417
```mlir
418418
// Run: mlir-opt example4.mlir -convert-linalg-to-std | mlir-opt -convert-func-to-llvm
419419
// Some generated code are omitted here.
420-
func @example(%arg0: !llvm<"float*">, ...) {
420+
func.func @example(%arg0: !llvm<"float*">, ...) {
421421
...
422422
llvm.call @pointwise_add(...) : (!llvm<"float*">, ...) -> ()
423423
return

mlir/docs/Dialects/SPIR-V.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -422,7 +422,7 @@ the SPIR-V dialect. Instead, we reuse the builtin `func` op to express functions
422422
more concisely:
423423

424424
```mlir
425-
func @f(%arg: i32) -> i32 {
425+
func.func @f(%arg: i32) -> i32 {
426426
"spv.ReturnValue"(%arg) : (i32) -> (i32)
427427
}
428428
```
@@ -580,7 +580,7 @@ void loop(bool cond) {
580580
It will be represented as
581581
582582
```mlir
583-
func @selection(%cond: i1) -> () {
583+
func.func @selection(%cond: i1) -> () {
584584
%zero = spv.Constant 0: i32
585585
%one = spv.Constant 1: i32
586586
%two = spv.Constant 2: i32
@@ -668,7 +668,7 @@ void loop(int count) {
668668
It will be represented as
669669
670670
```mlir
671-
func @loop(%count : i32) -> () {
671+
func.func @loop(%count : i32) -> () {
672672
%zero = spv.Constant 0: i32
673673
%one = spv.Constant 1: i32
674674
%var = spv.Variable init(%zero) : !spv.ptr<i32, Function>
@@ -728,7 +728,7 @@ example, for the following SPIR-V function `foo`:
728728
It will be represented as:
729729

730730
```mlir
731-
func @foo() -> () {
731+
func.func @foo() -> () {
732732
%var = spv.Variable : !spv.ptr<i32, Function>
733733
734734
spv.mlir.selection {

mlir/docs/Dialects/ShapeDialect.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ separate shape function library, while here we would normally reify it as part
9797
of lowering, but for simplicity will show as a standalone shape function.
9898
9999
```mlir
100-
func @matmul_shape1(%lhs: tensor<*xf32>, %rhs: tensor<*xindex>) -> tensor<?xindex> {
100+
func.func @matmul_shape1(%lhs: tensor<*xf32>, %rhs: tensor<*xindex>) -> tensor<?xindex> {
101101
%c1 = shape.const_size 1
102102
%c2 = shape.const_size 2
103103
// We allow `shape.shape_of` to return either a `!shape.shape` or
@@ -136,7 +136,7 @@ We can now hoist computations of constraint were possible (which in the case
136136
below is not too many as we need to verify the rank before we can split)
137137

138138
```mlir
139-
func @matmul_shape2(%lhs: tensor<*xf32>, %lhs: tensor<*xf32>) -> tensor<?xindex> {
139+
func.func @matmul_shape2(%lhs: tensor<*xf32>, %lhs: tensor<*xf32>) -> tensor<?xindex> {
140140
%c1 = shape.const_size 1
141141
%c2 = shape.const_size 2
142142
%lhs_shape = shape.shape_of %lhs : tensor<*xf32> -> tensor<?xindex>
@@ -167,7 +167,7 @@ The above form can now be lowered to the fully imperative form (see
167167
for example).
168168

169169
```mlir
170-
func @matmul_shape3(%lhs: tensor<*xf32>, %lhs: tensor<*xf32>) -> tensor<?xindex> {
170+
func.func @matmul_shape3(%lhs: tensor<*xf32>, %lhs: tensor<*xf32>) -> tensor<?xindex> {
171171
%c1 = arith.constant 1 : index
172172
%c2 = arith.constant 2 : index
173173
%lhs_shape = shape.shape_of %lhs : tensor<*xf32> -> tensor<?xindex>

0 commit comments

Comments
 (0)