12 changes: 6 additions & 6 deletions mlir/test/Conversion/GPUToNVVM/wmma-ops-to-nvvm.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ gpu.module @test_module {
// CHECK-LABEL: func @gpu_wmma_load_op() ->
// CHECK-SAME: !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)> {
// CHECK32-LABEL: func @gpu_wmma_load_op() ->
builtin.func @gpu_wmma_load_op() -> (!gpu.mma_matrix<16x16xf16, "AOp">) {
func.func @gpu_wmma_load_op() -> (!gpu.mma_matrix<16x16xf16, "AOp">) {
%wg = memref.alloca() {alignment = 32} : memref<32x32xf16, 3>
%i = arith.constant 16 : index
%j = arith.constant 16 : index
Expand Down Expand Up @@ -46,7 +46,7 @@ gpu.module @test_module {
// CHECK-SAME: (%[[D:.*]]: !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>) {
// CHECK32-LABEL: func @gpu_wmma_store_op
// CHECK32-SAME: (%[[D:.*]]: !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>) {
builtin.func @gpu_wmma_store_op(%arg0 : !gpu.mma_matrix<16x16xf16, "COp">) -> () {
func.func @gpu_wmma_store_op(%arg0 : !gpu.mma_matrix<16x16xf16, "COp">) -> () {
%sg = memref.alloca(){alignment = 32} : memref<32x32xf16, 3>
%i = arith.constant 16 : index
%j = arith.constant 16 : index
Expand Down Expand Up @@ -92,7 +92,7 @@ gpu.module @test_module {

// CHECK-LABEL: func @gpu_wmma_mma_op
// CHECK-SAME: (%[[A:.*]]: !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>, %[[B:.*]]: !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>, %[[C:.*]]: !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>)
builtin.func @gpu_wmma_mma_op(%A : !gpu.mma_matrix<16x16xf16, "AOp">, %B : !gpu.mma_matrix<16x16xf16, "BOp">, %C : !gpu.mma_matrix<16x16xf16, "COp">) -> (!gpu.mma_matrix<16x16xf16, "COp">) {
func.func @gpu_wmma_mma_op(%A : !gpu.mma_matrix<16x16xf16, "AOp">, %B : !gpu.mma_matrix<16x16xf16, "BOp">, %C : !gpu.mma_matrix<16x16xf16, "COp">) -> (!gpu.mma_matrix<16x16xf16, "COp">) {
%D = gpu.subgroup_mma_compute %A, %B, %C : !gpu.mma_matrix<16x16xf16, "AOp">, !gpu.mma_matrix<16x16xf16, "BOp"> -> !gpu.mma_matrix<16x16xf16, "COp">
// CHECK: %[[A1:.*]] = llvm.extractvalue %[[A]][0 : i32] : !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>
// CHECK: %[[A2:.*]] = llvm.extractvalue %[[A]][1 : i32] : !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>
Expand Down Expand Up @@ -163,7 +163,7 @@ gpu.module @test_module {
// CHECK: %[[E3:.+]] = llvm.extractvalue %[[ACC]][3 : i32] : !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>
// CHECK: nvvm.wmma.store %{{.*}}, %{{.*}}, %[[E0]], %[[E1]], %[[E2]], %[[E3]] {eltype = #nvvm.mma_type<f16>, k = 16 : i32, layout = #nvvm.mma_layout<row>, m = 16 : i32, n = 16 : i32} : !llvm.ptr<f16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>

builtin.func @gpu_wmma_mma_loop_op(%arg0: memref<128x128xf16>, %arg1: memref<128x128xf16>, %arg2: memref<128x128xf16>) {
func.func @gpu_wmma_mma_loop_op(%arg0: memref<128x128xf16>, %arg1: memref<128x128xf16>, %arg2: memref<128x128xf16>) {
%c0 = arith.constant 0 : index
%c128 = arith.constant 128 : index
%c32 = arith.constant 32 : index
Expand Down Expand Up @@ -202,7 +202,7 @@ gpu.module @test_module {
// CHECK: %[[M3:.+]] = llvm.insertvalue %[[V2]], %[[M2]][2 : i32] : !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>
// CHECK: %[[M4:.+]] = llvm.insertvalue %[[V2]], %[[M3]][3 : i32] : !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>
// CHECK: llvm.return %[[M4]] : !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>
builtin.func @gpu_wmma_constant_op() ->(!gpu.mma_matrix<16x16xf16, "COp">) {
func.func @gpu_wmma_constant_op() ->(!gpu.mma_matrix<16x16xf16, "COp">) {
%cst = arith.constant 1.0 : f16
%C = gpu.subgroup_mma_constant_matrix %cst : !gpu.mma_matrix<16x16xf16, "COp">
return %C : !gpu.mma_matrix<16x16xf16, "COp">
Expand Down Expand Up @@ -232,7 +232,7 @@ gpu.module @test_module {
// CHECK: %[[C3:.*]] = llvm.fadd %[[A3]], %[[B3]] : vector<2xf16>
// CHECK: %[[M4:.*]] = llvm.insertvalue %[[C3]], %[[M3]][3 : i32] : !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>
// CHECK: llvm.return %[[M4]] : !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>
builtin.func @gpu_wmma_elementwise(%A : !gpu.mma_matrix<16x16xf16, "COp">, %B : !gpu.mma_matrix<16x16xf16, "COp">) ->(!gpu.mma_matrix<16x16xf16, "COp">) {
func.func @gpu_wmma_elementwise(%A : !gpu.mma_matrix<16x16xf16, "COp">, %B : !gpu.mma_matrix<16x16xf16, "COp">) ->(!gpu.mma_matrix<16x16xf16, "COp">) {
%C = gpu.subgroup_mma_elementwise addf %A, %B : (!gpu.mma_matrix<16x16xf16, "COp">, !gpu.mma_matrix<16x16xf16, "COp">) -> !gpu.mma_matrix<16x16xf16, "COp">
return %C : !gpu.mma_matrix<16x16xf16, "COp">
}
Expand Down
42 changes: 21 additions & 21 deletions mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
gpu.module @test_module {
// CHECK-LABEL: func @gpu_index_ops()
// CHECK32-LABEL: func @gpu_index_ops()
builtin.func @gpu_index_ops()
func.func @gpu_index_ops()
-> (index, index, index, index, index, index,
index, index, index, index, index, index) {
// CHECK32-NOT: = llvm.sext %{{.*}} : i32 to i64
Expand Down Expand Up @@ -61,7 +61,7 @@ gpu.module @test_module {
gpu.module @test_module {
// CHECK-LABEL: func @gpu_index_comp
// CHECK32-LABEL: func @gpu_index_comp
builtin.func @gpu_index_comp(%idx : index) -> index {
func.func @gpu_index_comp(%idx : index) -> index {
// CHECK: = llvm.add %{{.*}}, %{{.*}} : i64
// CHECK32: = llvm.add %{{.*}}, %{{.*}} : i32
%0 = arith.addi %idx, %idx : index
Expand All @@ -75,7 +75,7 @@ gpu.module @test_module {

gpu.module @test_module {
// CHECK-LABEL: func @gpu_sync()
builtin.func @gpu_sync() {
func.func @gpu_sync() {
// CHECK: rocdl.barrier
gpu.barrier
func.return
Expand All @@ -88,7 +88,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_fabs_f32(f32) -> f32
// CHECK: llvm.func @__ocml_fabs_f64(f64) -> f64
// CHECK-LABEL: func @gpu_fabs
builtin.func @gpu_fabs(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
func.func @gpu_fabs(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
%result32 = math.abs %arg_f32 : f32
// CHECK: llvm.call @__ocml_fabs_f32(%{{.*}}) : (f32) -> f32
%result64 = math.abs %arg_f64 : f64
Expand All @@ -103,7 +103,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_ceil_f32(f32) -> f32
// CHECK: llvm.func @__ocml_ceil_f64(f64) -> f64
// CHECK-LABEL: func @gpu_ceil
builtin.func @gpu_ceil(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
func.func @gpu_ceil(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
%result32 = math.ceil %arg_f32 : f32
// CHECK: llvm.call @__ocml_ceil_f32(%{{.*}}) : (f32) -> f32
%result64 = math.ceil %arg_f64 : f64
Expand All @@ -118,7 +118,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_floor_f32(f32) -> f32
// CHECK: llvm.func @__ocml_floor_f64(f64) -> f64
// CHECK-LABEL: func @gpu_floor
builtin.func @gpu_floor(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
func.func @gpu_floor(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
%result32 = math.floor %arg_f32 : f32
// CHECK: llvm.call @__ocml_floor_f32(%{{.*}}) : (f32) -> f32
%result64 = math.floor %arg_f64 : f64
Expand All @@ -133,7 +133,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_cos_f32(f32) -> f32
// CHECK: llvm.func @__ocml_cos_f64(f64) -> f64
// CHECK-LABEL: func @gpu_cos
builtin.func @gpu_cos(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
func.func @gpu_cos(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
%result32 = math.cos %arg_f32 : f32
// CHECK: llvm.call @__ocml_cos_f32(%{{.*}}) : (f32) -> f32
%result64 = math.cos %arg_f64 : f64
Expand All @@ -148,7 +148,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_exp_f32(f32) -> f32
// CHECK: llvm.func @__ocml_exp_f64(f64) -> f64
// CHECK-LABEL: func @gpu_exp
builtin.func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
func.func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
%exp_f32 = math.exp %arg_f32 : f32
// CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) : (f32) -> f32
%result32 = math.exp %exp_f32 : f32
Expand All @@ -165,7 +165,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_exp2_f32(f32) -> f32
// CHECK: llvm.func @__ocml_exp2_f64(f64) -> f64
// CHECK-LABEL: func @gpu_exp2
builtin.func @gpu_exp2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
func.func @gpu_exp2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
%exp2_f32 = math.exp2 %arg_f32 : f32
// CHECK: llvm.call @__ocml_exp2_f32(%{{.*}}) : (f32) -> f32
%result32 = math.exp2 %exp2_f32 : f32
Expand All @@ -185,7 +185,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_exp_f32(f32) -> f32
// CHECK: llvm.func @__ocml_exp_f64(f64) -> f64
// CHECK-LABEL: func @gpu_exp
builtin.func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
func.func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
%exp_f32 = math.exp %arg_f32 : f32
// CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) : (f32) -> f32
%result32 = math.exp %exp_f32 : f32
Expand All @@ -204,7 +204,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_expm1_f32(f32) -> f32
// CHECK: llvm.func @__ocml_expm1_f64(f64) -> f64
// CHECK-LABEL: func @gpu_expm1
builtin.func @gpu_expm1(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
func.func @gpu_expm1(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
%expm1_f32 = math.expm1 %arg_f32 : f32
// CHECK: llvm.call @__ocml_expm1_f32(%{{.*}}) : (f32) -> f32
%result32 = math.expm1 %expm1_f32 : f32
Expand All @@ -221,7 +221,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_log_f32(f32) -> f32
// CHECK: llvm.func @__ocml_log_f64(f64) -> f64
// CHECK-LABEL: func @gpu_log
builtin.func @gpu_log(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
func.func @gpu_log(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
%result32 = math.log %arg_f32 : f32
// CHECK: llvm.call @__ocml_log_f32(%{{.*}}) : (f32) -> f32
%result64 = math.log %arg_f64 : f64
Expand All @@ -236,7 +236,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_log1p_f32(f32) -> f32
// CHECK: llvm.func @__ocml_log1p_f64(f64) -> f64
// CHECK-LABEL: func @gpu_log1p
builtin.func @gpu_log1p(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
func.func @gpu_log1p(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
%result32 = math.log1p %arg_f32 : f32
// CHECK: llvm.call @__ocml_log1p_f32(%{{.*}}) : (f32) -> f32
%result64 = math.log1p %arg_f64 : f64
Expand All @@ -251,7 +251,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_log10_f32(f32) -> f32
// CHECK: llvm.func @__ocml_log10_f64(f64) -> f64
// CHECK-LABEL: func @gpu_log10
builtin.func @gpu_log10(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
func.func @gpu_log10(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
%result32 = math.log10 %arg_f32 : f32
// CHECK: llvm.call @__ocml_log10_f32(%{{.*}}) : (f32) -> f32
%result64 = math.log10 %arg_f64 : f64
Expand All @@ -266,7 +266,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_log2_f32(f32) -> f32
// CHECK: llvm.func @__ocml_log2_f64(f64) -> f64
// CHECK-LABEL: func @gpu_log2
builtin.func @gpu_log2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
func.func @gpu_log2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
%result32 = math.log2 %arg_f32 : f32
// CHECK: llvm.call @__ocml_log2_f32(%{{.*}}) : (f32) -> f32
%result64 = math.log2 %arg_f64 : f64
Expand All @@ -281,7 +281,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_rsqrt_f32(f32) -> f32
// CHECK: llvm.func @__ocml_rsqrt_f64(f64) -> f64
// CHECK-LABEL: func @gpu_rsqrt
builtin.func @gpu_rsqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64)
func.func @gpu_rsqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64)
-> (f16, f32, f64) {
%result16 = math.rsqrt %arg_f16 : f16
// CHECK: llvm.fpext %{{.*}} : f16 to f32
Expand All @@ -301,7 +301,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_sqrt_f32(f32) -> f32
// CHECK: llvm.func @__ocml_sqrt_f64(f64) -> f64
// CHECK-LABEL: func @gpu_sqrt
builtin.func @gpu_sqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64)
func.func @gpu_sqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64)
-> (f16, f32, f64) {
%result16 = math.sqrt %arg_f16 : f16
// CHECK: llvm.fpext %{{.*}} : f16 to f32
Expand All @@ -321,7 +321,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_tanh_f32(f32) -> f32
// CHECK: llvm.func @__ocml_tanh_f64(f64) -> f64
// CHECK-LABEL: func @gpu_tanh
builtin.func @gpu_tanh(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
func.func @gpu_tanh(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
%result32 = math.tanh %arg_f32 : f32
// CHECK: llvm.call @__ocml_tanh_f32(%{{.*}}) : (f32) -> f32
%result64 = math.tanh %arg_f64 : f64
Expand All @@ -336,7 +336,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_atan_f32(f32) -> f32
// CHECK: llvm.func @__ocml_atan_f64(f64) -> f64
// CHECK-LABEL: func @gpu_atan
builtin.func @gpu_atan(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
func.func @gpu_atan(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
%result32 = math.atan %arg_f32 : f32
// CHECK: llvm.call @__ocml_atan_f32(%{{.*}}) : (f32) -> f32
%result64 = math.atan %arg_f64 : f64
Expand All @@ -351,7 +351,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_atan2_f32(f32, f32) -> f32
// CHECK: llvm.func @__ocml_atan2_f64(f64, f64) -> f64
// CHECK-LABEL: func @gpu_atan2
builtin.func @gpu_atan2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
func.func @gpu_atan2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
%result32 = math.atan2 %arg_f32, %arg_f32 : f32
// CHECK: llvm.call @__ocml_atan2_f32(%{{.*}}) : (f32, f32) -> f32
%result64 = math.atan2 %arg_f64, %arg_f64 : f64
Expand All @@ -366,7 +366,7 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_pow_f32(f32, f32) -> f32
// CHECK: llvm.func @__ocml_pow_f64(f64, f64) -> f64
// CHECK-LABEL: func @gpu_pow
builtin.func @gpu_pow(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
func.func @gpu_pow(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
%result32 = math.powf %arg_f32, %arg_f32 : f32
// CHECK: llvm.call @__ocml_pow_f32(%{{.*}}, %{{.*}}) : (f32, f32) -> f32
%result64 = math.powf %arg_f64, %arg_f64 : f64
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -split-input-file -pass-pipeline="builtin.func(convert-math-to-llvm)" | FileCheck %s
// RUN: mlir-opt %s -split-input-file -pass-pipeline="func.func(convert-math-to-llvm)" | FileCheck %s

// CHECK-LABEL: @ops
func @ops(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: i32, %arg4: f64) {
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=0 gpu-thread-dims=1})" %s | FileCheck --check-prefix=CHECK-THREADS %s
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=0})" %s | FileCheck --check-prefix=CHECK-BLOCKS %s
// RUN: mlir-opt -pass-pipeline="func.func(convert-affine-for-to-gpu{gpu-block-dims=0 gpu-thread-dims=1})" %s | FileCheck --check-prefix=CHECK-THREADS %s
// RUN: mlir-opt -pass-pipeline="func.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=0})" %s | FileCheck --check-prefix=CHECK-BLOCKS %s

// CHECK-THREADS-LABEL: @one_d_loop
// CHECK-BLOCKS-LABEL: @one_d_loop
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Conversion/SCFToGPU/step_one.mlir
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=1})" %s | FileCheck --check-prefix=CHECK-11 %s
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=2 gpu-thread-dims=2})" %s | FileCheck --check-prefix=CHECK-22 %s
// RUN: mlir-opt -pass-pipeline="func.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=1})" %s | FileCheck --check-prefix=CHECK-11 %s
// RUN: mlir-opt -pass-pipeline="func.func(convert-affine-for-to-gpu{gpu-block-dims=2 gpu-thread-dims=2})" %s | FileCheck --check-prefix=CHECK-22 %s

// CHECK-11-LABEL: @step_1
// CHECK-22-LABEL: @step_1
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Conversion/SCFToGPU/step_positive.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=1})" %s | FileCheck %s
// RUN: mlir-opt -pass-pipeline="func.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=1})" %s | FileCheck %s

// CHECK-LABEL: @step_var
func @step_var(%A : memref<?x?xf32>, %B : memref<?x?xf32>) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-shape-constraints)" <%s | FileCheck %s
// RUN: mlir-opt -pass-pipeline="func.func(convert-shape-constraints)" <%s | FileCheck %s

// There's not very much useful to check here other than pasting the output.
// CHECK-LABEL: func @cstr_broadcastable(
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt --split-input-file -pass-pipeline="builtin.func(tosa-to-linalg-named)" %s -verify-diagnostics -o -| FileCheck %s
// RUN: mlir-opt --split-input-file -pass-pipeline="func.func(tosa-to-linalg-named)" %s -verify-diagnostics -o -| FileCheck %s

// CHECK-LABEL: @matmul
func @matmul(%arg0: tensor<1x5x3xf32>, %arg1: tensor<1x3x6xf32>) -> (tensor<1x5x6xf32>) {
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt --split-input-file -pass-pipeline="builtin.func(tosa-to-linalg)" %s -verify-diagnostics -o -| FileCheck %s
// RUN: mlir-opt --split-input-file -pass-pipeline="func.func(tosa-to-linalg)" %s -verify-diagnostics -o -| FileCheck %s

// CHECK: #[[$MAP0:.*]] = affine_map<() -> ()>

Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Conversion/VectorToGPU/vector-to-mma-ops.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-gpu)" -canonicalize | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-gpu)" -canonicalize | FileCheck %s

#map0 = affine_map<(d0, d1) -> (d1, d0)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
Expand Down
12 changes: 6 additions & 6 deletions mlir/test/Conversion/VectorToROCDL/vector-to-rocdl.mlir
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -convert-vector-to-rocdl | FileCheck %s

gpu.module @test_read{
builtin.func @transfer_readx2(%A : memref<?xf32>, %base: index) -> vector<2xf32> {
func.func @transfer_readx2(%A : memref<?xf32>, %base: index) -> vector<2xf32> {
%f0 = arith.constant 0.0: f32
%f = vector.transfer_read %A[%base], %f0
{permutation_map = affine_map<(d0) -> (d0)>} :
Expand All @@ -11,7 +11,7 @@ builtin.func @transfer_readx2(%A : memref<?xf32>, %base: index) -> vector<2xf32>
// CHECK-LABEL: @transfer_readx2
// CHECK: rocdl.buffer.load {{.*}} vector<2xf32>

builtin.func @transfer_readx4(%A : memref<?xf32>, %base: index) -> vector<4xf32> {
func.func @transfer_readx4(%A : memref<?xf32>, %base: index) -> vector<4xf32> {
%f0 = arith.constant 0.0: f32
%f = vector.transfer_read %A[%base], %f0
{permutation_map = affine_map<(d0) -> (d0)>} :
Expand All @@ -21,7 +21,7 @@ builtin.func @transfer_readx4(%A : memref<?xf32>, %base: index) -> vector<4xf32>
// CHECK-LABEL: @transfer_readx4
// CHECK: rocdl.buffer.load {{.*}} vector<4xf32>

builtin.func @transfer_read_dwordConfig(%A : memref<?xf32>, %base: index) -> vector<4xf32> {
func.func @transfer_read_dwordConfig(%A : memref<?xf32>, %base: index) -> vector<4xf32> {
%f0 = arith.constant 0.0: f32
%f = vector.transfer_read %A[%base], %f0
{permutation_map = affine_map<(d0) -> (d0)>} :
Expand All @@ -36,7 +36,7 @@ builtin.func @transfer_read_dwordConfig(%A : memref<?xf32>, %base: index) -> vec
}

gpu.module @test_write{
builtin.func @transfer_writex2(%A : memref<?xf32>, %B : vector<2xf32>, %base: index) {
func.func @transfer_writex2(%A : memref<?xf32>, %B : vector<2xf32>, %base: index) {
vector.transfer_write %B, %A[%base]
{permutation_map = affine_map<(d0) -> (d0)>} :
vector<2xf32>, memref<?xf32>
Expand All @@ -45,7 +45,7 @@ builtin.func @transfer_writex2(%A : memref<?xf32>, %B : vector<2xf32>, %base: in
// CHECK-LABEL: @transfer_writex2
// CHECK: rocdl.buffer.store {{.*}} vector<2xf32>

builtin.func @transfer_writex4(%A : memref<?xf32>, %B : vector<4xf32>, %base: index) {
func.func @transfer_writex4(%A : memref<?xf32>, %B : vector<4xf32>, %base: index) {
vector.transfer_write %B, %A[%base]
{permutation_map = affine_map<(d0) -> (d0)>} :
vector<4xf32>, memref<?xf32>
Expand All @@ -54,7 +54,7 @@ builtin.func @transfer_writex4(%A : memref<?xf32>, %B : vector<4xf32>, %base: in
// CHECK-LABEL: @transfer_writex4
// CHECK: rocdl.buffer.store {{.*}} vector<4xf32>

builtin.func @transfer_write_dwordConfig(%A : memref<?xf32>, %B : vector<2xf32>, %base: index) {
func.func @transfer_write_dwordConfig(%A : memref<?xf32>, %B : vector<2xf32>, %base: index) {
vector.transfer_write %B, %A[%base]
{permutation_map = affine_map<(d0) -> (d0)>} :
vector<2xf32>, memref<?xf32>
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Conversion/VectorToSCF/tensor-transfer-ops.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-tensors=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{lower-tensors=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s

// CHECK-LABEL: func @transfer_read_2d(
// CHECK: %[[ALLOC:.*]] = memref.alloca() : memref<vector<4x9xf32>>
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true lower-tensors=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true lower-tensors=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s

// CHECK-LABEL: func @transfer_read_2d(
// CHECK: %[[V_INIT:.*]] = arith.constant dense<-4.200000e+01> : vector<4x9xf32>
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s

// CHECK-LABEL: func @transfer_read_inbounds
func @transfer_read_inbounds(%A : memref<?x?x?xf32>) -> (vector<2x3x4xf32>) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-permutation-maps=true})" -split-input-file | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{lower-permutation-maps=true})" -split-input-file | FileCheck %s

// Ensure that the permutation map is lowered (by inserting a transpose op)
// before lowering the vector.transfer_read.
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf)" -split-input-file -allow-unregistered-dialect | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s --check-prefix=FULL-UNROLL
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf)" -split-input-file -allow-unregistered-dialect | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s --check-prefix=FULL-UNROLL

// CHECK-LABEL: func @vector_transfer_ops_0d(
func @vector_transfer_ops_0d(%M: memref<f32>) {
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Affine/canonicalize.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -pass-pipeline='builtin.func(canonicalize)' | FileCheck %s
// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -pass-pipeline='func.func(canonicalize)' | FileCheck %s

// -----

Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Affine/loop-unswitch.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -split-input-file -pass-pipeline="builtin.func(test-affine-loop-unswitch)" | FileCheck %s
// RUN: mlir-opt %s -split-input-file -pass-pipeline="func.func(test-affine-loop-unswitch)" | FileCheck %s

// CHECK-DAG: #[[$SET:.*]] = affine_set<(d0) : (d0 - 2 >= 0)>

Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Affine/memref-stride-calculation.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-memref-stride-calculation)" -o /dev/null | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="func.func(test-memref-stride-calculation)" -o /dev/null | FileCheck %s

func @f(%0: index) {
// CHECK-LABEL: Testing: f
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/ControlFlow/canonicalize.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -allow-unregistered-dialect -pass-pipeline='builtin.func(canonicalize)' -split-input-file | FileCheck --dump-input-context 20 %s
// RUN: mlir-opt %s -allow-unregistered-dialect -pass-pipeline='func.func(canonicalize)' -split-input-file | FileCheck --dump-input-context 20 %s

/// Test the folding of BranchOp.

Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/LLVMIR/terminator.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -pass-pipeline='builtin.func(canonicalize)' %s | FileCheck %s
// RUN: mlir-opt -pass-pipeline='func.func(canonicalize)' %s | FileCheck %s
// verify that terminators survive the canonicalizer

// CHECK-LABEL: @return
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -784,7 +784,7 @@ func @write_into_constant_via_alias(%v : vector<5xi32>,

// -----

builtin.func @matmul_on_tensors(
func.func @matmul_on_tensors(
%arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
%arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
%arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
Expand Down Expand Up @@ -822,7 +822,7 @@ builtin.func @matmul_on_tensors(

// -----

builtin.func @matmul_on_tensors(
func.func @matmul_on_tensors(
%arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
%arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false},
%arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true})
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -pass-pipeline="builtin.func(convert-elementwise-to-linalg)" -split-input-file %s | FileCheck %s
// RUN: mlir-opt -pass-pipeline="func.func(convert-elementwise-to-linalg)" -split-input-file %s | FileCheck %s

// In-depth checking of the linalg.generic op for a very trivial case.
// CHECK: #[[$MAP:.*]] = affine_map<() -> ()>
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Linalg/detensorize_0d.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -allow-unregistered-dialect -pass-pipeline="builtin.func(linalg-detensorize{aggressive-mode})" | FileCheck %s
// RUN: mlir-opt %s -allow-unregistered-dialect -pass-pipeline="func.func(linalg-detensorize{aggressive-mode})" | FileCheck %s

#map = affine_map<() -> ()>

Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Linalg/detensorize_br_operands.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -split-input-file -allow-unregistered-dialect -pass-pipeline="builtin.func(linalg-detensorize)" | FileCheck %s
// RUN: mlir-opt %s -split-input-file -allow-unregistered-dialect -pass-pipeline="func.func(linalg-detensorize)" | FileCheck %s

// TODO: Detensoring breaks if %arg0 or %arg1 are passed directly as tensors. Fix that.
func @if_true_test(%arg0: i1, %arg1: i32) -> tensor<i32> attributes {} {
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Linalg/detensorize_if.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -split-input-file -allow-unregistered-dialect -pass-pipeline="builtin.func(linalg-detensorize)" | FileCheck %s
// RUN: mlir-opt %s -split-input-file -allow-unregistered-dialect -pass-pipeline="func.func(linalg-detensorize)" | FileCheck %s

#map0 = affine_map<() -> ()>

Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/Linalg/detensorize_trivial.mlir
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(linalg-detensorize{aggressive-mode})" | FileCheck %s -check-prefix=DET-ALL
// RUN: mlir-opt %s -pass-pipeline="builtin.func(linalg-detensorize)" | FileCheck %s -check-prefix=DET-CF
// RUN: mlir-opt %s -pass-pipeline="func.func(linalg-detensorize{aggressive-mode})" | FileCheck %s -check-prefix=DET-ALL
// RUN: mlir-opt %s -pass-pipeline="func.func(linalg-detensorize)" | FileCheck %s -check-prefix=DET-CF


#map0 = affine_map<() -> ()>
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/Linalg/detensorize_while.mlir
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(linalg-detensorize{aggressive-mode})" | FileCheck %s -check-prefix=DET-ALL
// RUN: mlir-opt %s -pass-pipeline="builtin.func(linalg-detensorize)" | FileCheck %s -check-prefix=DET-CF
// RUN: mlir-opt %s -pass-pipeline="func.func(linalg-detensorize{aggressive-mode})" | FileCheck %s -check-prefix=DET-ALL
// RUN: mlir-opt %s -pass-pipeline="func.func(linalg-detensorize)" | FileCheck %s -check-prefix=DET-CF

#map0 = affine_map<() -> ()>

Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(linalg-detensorize{aggressive-mode})" | FileCheck %s -check-prefix=DET-ALL
// RUN: mlir-opt %s -pass-pipeline="builtin.func(linalg-detensorize)" | FileCheck %s -check-prefix=DET-CF
// RUN: mlir-opt %s -pass-pipeline="func.func(linalg-detensorize{aggressive-mode})" | FileCheck %s -check-prefix=DET-ALL
// RUN: mlir-opt %s -pass-pipeline="func.func(linalg-detensorize)" | FileCheck %s -check-prefix=DET-CF

#map0 = affine_map<() -> ()>
#map1 = affine_map<(i) -> ()>
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -allow-unregistered-dialect -pass-pipeline="builtin.func(linalg-detensorize)" | FileCheck %s
// RUN: mlir-opt %s -allow-unregistered-dialect -pass-pipeline="func.func(linalg-detensorize)" | FileCheck %s

#map0 = affine_map<() -> ()>

Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -split-input-file -pass-pipeline="builtin.func(linalg-fold-unit-extent-dims)" | FileCheck %s
// RUN: mlir-opt %s -split-input-file -pass-pipeline="func.func(linalg-fold-unit-extent-dims)" | FileCheck %s

#accesses = [
affine_map<(i, j, k, l, m) -> (i, k, m)>,
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Linalg/fold-unit-trip-loops.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -split-input-file -pass-pipeline="builtin.func(linalg-fold-unit-extent-dims{fold-one-trip-loops-only})" | FileCheck %s
// RUN: mlir-opt %s -split-input-file -pass-pipeline="func.func(linalg-fold-unit-extent-dims{fold-one-trip-loops-only})" | FileCheck %s

#accesses = [
affine_map<(i, j, k, l, m) -> (i, k, m)>,
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Linalg/fusion-sequence.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -pass-pipeline="builtin.func(test-linalg-tile-and-fuse{tile-sizes=16,32,64}),resolve-shaped-type-result-dims,canonicalize,cse" -split-input-file %s | FileCheck %s
// RUN: mlir-opt -pass-pipeline="func.func(test-linalg-tile-and-fuse{tile-sizes=16,32,64}),resolve-shaped-type-result-dims,canonicalize,cse" -split-input-file %s | FileCheck %s

module {
func @three_op_fusion(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>,
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Linalg/tile-and-fuse-no-fuse.mlir
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// RUN: mlir-opt %s -test-linalg-codegen-strategy="anchor-op=linalg.matmul fuse tile-sizes=0,0,0 run-enable-pass=false" -split-input-file | FileCheck %s

builtin.func @no_fuse_gemm(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>) -> tensor<?x?xf32> {
func.func @no_fuse_gemm(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>) -> tensor<?x?xf32> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%cst = arith.constant 0.0 : f32
Expand Down
12 changes: 6 additions & 6 deletions mlir/test/Dialect/Linalg/tile-and-fuse-on-tensors.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

// MATMUL: fuse_input
// MATMUL-SAME: %[[ARG0:[0-9a-zA-Z]*]]: tensor<24x12xf32>
builtin.func @fuse_input(%arg0: tensor<24x12xf32>,
func.func @fuse_input(%arg0: tensor<24x12xf32>,
%arg1: tensor<12x25xf32>,
%arg2: tensor<24x25xf32>) -> tensor<24x25xf32> {
%c0 = arith.constant 0 : index
Expand Down Expand Up @@ -44,7 +44,7 @@ builtin.func @fuse_input(%arg0: tensor<24x12xf32>,

// MATMUL: fuse_output
// MATMUL-SAME: %[[ARG2:[0-9a-zA-Z]*]]: tensor<24x25xf32>
builtin.func @fuse_output(%arg0: tensor<24x12xf32>,
func.func @fuse_output(%arg0: tensor<24x12xf32>,
%arg1: tensor<12x25xf32>,
%arg2: tensor<24x25xf32>) -> tensor<24x25xf32> {
// MATMUL-DAG: %[[C0:.*]] = arith.constant 0 : index
Expand Down Expand Up @@ -96,7 +96,7 @@ builtin.func @fuse_output(%arg0: tensor<24x12xf32>,
// MATMUL: fuse_reduction
// MATMUL-SAME: %[[ARG1:[0-9a-zA-Z]*]]: tensor<12x25xf32>
// MATMUL-SAME: %[[ARG3:[0-9a-zA-Z]*]]: tensor<12x7x25xf32>
builtin.func @fuse_reduction(%arg0: tensor<24x12xf32>,
func.func @fuse_reduction(%arg0: tensor<24x12xf32>,
%arg1: tensor<12x25xf32>,
%arg2: tensor<24x25xf32>,
%arg3: tensor<12x7x25xf32>) -> tensor<24x25xf32> {
Expand Down Expand Up @@ -140,7 +140,7 @@ builtin.func @fuse_reduction(%arg0: tensor<24x12xf32>,
// MATMUL: fuse_transposed
// MATMUL-SAME: %[[ARG0:[0-9a-zA-Z]*]]: tensor<24x12xf32>
// MATMUL-SAME: %[[ARG3:[0-9a-zA-Z]*]]: tensor<12x24xf32>
builtin.func @fuse_transposed(%arg0: tensor<24x12xf32>,
func.func @fuse_transposed(%arg0: tensor<24x12xf32>,
%arg1: tensor<12x25xf32>,
%arg2: tensor<24x25xf32>,
%arg3: tensor<12x24xf32>) -> tensor<24x25xf32> {
Expand Down Expand Up @@ -175,7 +175,7 @@ builtin.func @fuse_transposed(%arg0: tensor<24x12xf32>,
// MATMUL: fuse_input_and_output
// MATMUL-SAME: %[[ARG0:[0-9a-zA-Z]*]]: tensor<24x12xf32>
// MATMUL-SAME: %[[ARG2:[0-9a-zA-Z]*]]: tensor<24x25xf32>
builtin.func @fuse_input_and_output(%arg0: tensor<24x12xf32>,
func.func @fuse_input_and_output(%arg0: tensor<24x12xf32>,
%arg1: tensor<12x25xf32>,
%arg2: tensor<24x25xf32>) -> tensor<24x25xf32> {
%c0 = arith.constant 0 : index
Expand Down Expand Up @@ -210,7 +210,7 @@ builtin.func @fuse_input_and_output(%arg0: tensor<24x12xf32>,

// MATMUL: fuse_indexed
// MATMUL-SAME: %[[ARG1:[0-9a-zA-Z]*]]: tensor<12x25xi32>
builtin.func @fuse_indexed(%arg0: tensor<24x12xi32>,
func.func @fuse_indexed(%arg0: tensor<24x12xi32>,
%arg1: tensor<12x25xi32>,
%arg2: tensor<24x25xi32>) -> tensor<24x25xi32> {
%c0 = arith.constant 0 : index
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
// CONV-SAME: %[[ARG2:[0-9a-zA-Z]*]]: tensor<10x10xf32>
// CONV-SAME: %[[ARG3:[0-9a-zA-Z]*]]: tensor<9x9xf32>
// CONV-SAME: %[[ARG4:[0-9a-zA-Z]*]]: tensor<8x8xf32>
builtin.func @fuse_conv_chain(%arg0: tensor<2x2xf32>,
func.func @fuse_conv_chain(%arg0: tensor<2x2xf32>,
%arg1: tensor<11x11xf32>,
%arg2: tensor<10x10xf32>,
%arg3: tensor<9x9xf32>,
Expand Down Expand Up @@ -52,7 +52,7 @@ builtin.func @fuse_conv_chain(%arg0: tensor<2x2xf32>,

// MATMUL: fuse_matmul_chain
// MATMUL-SAME: %[[ARG0:[0-9a-zA-Z]*]]: tensor<8x8xf32>
builtin.func @fuse_matmul_chain(%arg0: tensor<8x8xf32>) -> tensor<8x8xf32> {
func.func @fuse_matmul_chain(%arg0: tensor<8x8xf32>) -> tensor<8x8xf32> {
%c0 = arith.constant 0 : index
%c12 = arith.constant 12 : index
%c25 = arith.constant 25 : index
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Quant/canonicalize.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -split-input-file -pass-pipeline='builtin.func(canonicalize)' | FileCheck %s
// RUN: mlir-opt %s -split-input-file -pass-pipeline='func.func(canonicalize)' | FileCheck %s

// -----
// CHECK-LABEL: redundant_scast
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SCF/canonicalize.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline='builtin.func(canonicalize)' -split-input-file | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline='func.func(canonicalize)' -split-input-file | FileCheck %s


// -----
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SCF/for-loop-to-while-loop.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline='builtin.func(scf-for-to-while)' -split-input-file | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline='func.func(scf-for-to-while)' -split-input-file | FileCheck %s
// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py

// CHECK-LABEL: func @single_loop(
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SCF/loop-range.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline='builtin.func(scf-for-loop-range-folding)' -split-input-file | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline='func.func(scf-for-loop-range-folding)' -split-input-file | FileCheck %s

func @fold_one_loop(%arg0: memref<?xi32>, %arg1: index, %arg2: index) {
%c0 = arith.constant 0 : index
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SCF/parallel-loop-fusion.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.func(scf-parallel-loop-fusion)' -split-input-file | FileCheck %s
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func.func(scf-parallel-loop-fusion)' -split-input-file | FileCheck %s

func @fuse_empty_loops() {
%c2 = arith.constant 2 : index
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline='builtin.func(scf-parallel-loop-tiling{parallel-loop-tile-sizes=1,4 no-min-max-bounds=true})' -split-input-file | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline='func.func(scf-parallel-loop-tiling{parallel-loop-tile-sizes=1,4 no-min-max-bounds=true})' -split-input-file | FileCheck %s

func @parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
%arg3 : index, %arg4 : index, %arg5 : index,
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SCF/parallel-loop-tiling.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline='builtin.func(scf-parallel-loop-tiling{parallel-loop-tile-sizes=1,4})' -split-input-file | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline='func.func(scf-parallel-loop-tiling{parallel-loop-tile-sizes=1,4})' -split-input-file | FileCheck %s

func @parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
%arg3 : index, %arg4 : index, %arg5 : index,
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SPIRV/Transforms/canonicalize.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -split-input-file -pass-pipeline='builtin.func(canonicalize)' | FileCheck %s
// RUN: mlir-opt %s -split-input-file -pass-pipeline='func.func(canonicalize)' | FileCheck %s

//===----------------------------------------------------------------------===//
// spv.AccessChain
Expand Down
6 changes: 3 additions & 3 deletions mlir/test/Dialect/Shape/invalid.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ module attributes {shape.lib = [@shape_lib, "shape_lib"]} {

shape.function_library @shape_lib {
// Test shape function that returns the shape of input arg as result shape.
builtin.func @same_result_shape(%arg: !shape.value_shape) -> !shape.shape {
func.func @same_result_shape(%arg: !shape.value_shape) -> !shape.shape {
%0 = shape.shape_of %arg : !shape.value_shape -> !shape.shape
return %0 : !shape.shape
}
Expand All @@ -192,7 +192,7 @@ module attributes {shape.lib = [@shape_lib, @shape_lib]} {

shape.function_library @shape_lib {
// Test shape function that returns the shape of input arg as result shape.
builtin.func @same_result_shape(%arg: !shape.value_shape) -> !shape.shape {
func.func @same_result_shape(%arg: !shape.value_shape) -> !shape.shape {
%0 = shape.shape_of %arg : !shape.value_shape -> !shape.shape
return %0 : !shape.shape
}
Expand All @@ -212,7 +212,7 @@ module attributes {shape.lib = [@shape_lib]} {

shape.function_library @shape_lib {
// Test shape function that returns the shape of input arg as result shape.
builtin.func @same_result_shape(%arg: !shape.value_shape) -> !shape.shape {
func.func @same_result_shape(%arg: !shape.value_shape) -> !shape.shape {
%0 = shape.shape_of %arg : !shape.value_shape -> !shape.shape
return %0 : !shape.shape
}
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Tensor/invalid.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func @tensor.generate(%m : index, %n : index)

func @tensor.generate(%m : index, %n : index)
-> tensor<?x3x?xf32> {
// expected-error @+4 {{'func.return' op expects parent op 'builtin.func'}}
// expected-error @+4 {{'func.return' op expects parent op 'func.func'}}
%tnsr = tensor.generate %m, %n {
^bb0(%i : index, %j : index, %k : index):
%elem = arith.constant 8.0 : f32
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Vector/canonicalize.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline='builtin.func(canonicalize)' -split-input-file -allow-unregistered-dialect | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline='func.func(canonicalize)' -split-input-file -allow-unregistered-dialect | FileCheck %s

// -----

Expand Down
2 changes: 1 addition & 1 deletion mlir/test/IR/diagnostic-handler-filter.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-diagnostic-filter{filters=mysource1})" -split-input-file -o - 2>&1 | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="func.func(test-diagnostic-filter{filters=mysource1})" -split-input-file -o - 2>&1 | FileCheck %s
// This test verifies that diagnostic handler can emit the call stack successfully.

// CHECK-LABEL: Test 'test1'
Expand Down
22 changes: 11 additions & 11 deletions mlir/test/IR/generic-visitors-interrupt.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ func @main(%arg0: f32) -> f32 {
}

// CHECK: step 0 op 'builtin.module' before all regions
// CHECK: step 1 op 'builtin.func' before all regions
// CHECK: step 1 op 'func.func' before all regions
// CHECK: step 2 walk was interrupted

// -----
Expand All @@ -21,7 +21,7 @@ func @main(%arg0: f32) -> f32 {
}

// CHECK: step 0 op 'builtin.module' before all regions
// CHECK: step 1 op 'builtin.func' before all regions
// CHECK: step 1 op 'func.func' before all regions
// CHECK: step 2 op 'foo' before all regions
// CHECK: step 3 op 'bar' before all regions
// CHECK: step 4 walk was interrupted
Expand All @@ -40,7 +40,7 @@ func @main(%arg0: f32) -> f32 {
}

// CHECK: step 0 op 'builtin.module' before all regions
// CHECK: step 1 op 'builtin.func' before all regions
// CHECK: step 1 op 'func.func' before all regions
// CHECK: step 2 op 'foo' before all regions
// CHECK: step 3 op 'bar0' before all regions
// CHECK: step 4 walk was interrupted
Expand All @@ -59,7 +59,7 @@ func @main() {
}

// CHECK: step 0 op 'builtin.module' before all regions
// CHECK: step 1 op 'builtin.func' before all regions
// CHECK: step 1 op 'func.func' before all regions
// CHECK: step 2 op 'foo' before all regions
// CHECK: step 3 op 'test.two_region_op' before all regions
// CHECK: step 4 op 'work' before all regions
Expand All @@ -83,7 +83,7 @@ func @main() {
}

// CHECK: step 0 op 'builtin.module' before all regions
// CHECK: step 1 op 'builtin.func' before all regions
// CHECK: step 1 op 'func.func' before all regions
// CHECK: step 2 op 'foo' before all regions
// CHECK: step 3 op 'test.two_region_op' before all regions
// CHECK: step 4 op 'work' before all regions
Expand All @@ -106,10 +106,10 @@ func @main(%arg0: f32) -> f32 {
}

// CHECK: step 0 op 'builtin.module' before all regions
// CHECK: step 1 op 'builtin.func' before all regions
// CHECK: step 1 op 'func.func' before all regions
// CHECK: step 2 op 'arith.addf' before all regions
// CHECK: step 3 op 'func.return' before all regions
// CHECK: step 4 op 'builtin.func' after all regions
// CHECK: step 4 op 'func.func' after all regions
// CHECK: step 5 op 'builtin.module' after all regions

// -----
Expand All @@ -125,14 +125,14 @@ func @main(%arg0: f32) -> f32 {
}

// CHECK: step 0 op 'builtin.module' before all regions
// CHECK: step 1 op 'builtin.func' before all regions
// CHECK: step 1 op 'func.func' before all regions
// CHECK: step 2 op 'foo' before all regions
// CHECK: step 3 op 'bar0' before all regions
// CHECK: step 4 op 'foo' before region #1
// CHECK: step 5 op 'bar1' before all regions
// CHECK: step 6 op 'arith.addf' before all regions
// CHECK: step 7 op 'func.return' before all regions
// CHECK: step 8 op 'builtin.func' after all regions
// CHECK: step 8 op 'func.func' after all regions
// CHECK: step 9 op 'builtin.module' after all regions

// -----
Expand All @@ -148,10 +148,10 @@ func @main(%arg0: f32) -> f32 {
}

// CHECK: step 0 op 'builtin.module' before all regions
// CHECK: step 1 op 'builtin.func' before all regions
// CHECK: step 1 op 'func.func' before all regions
// CHECK: step 2 op 'foo' before all regions
// CHECK: step 3 op 'bar0' before all regions
// CHECK: step 4 op 'arith.addf' before all regions
// CHECK: step 5 op 'func.return' before all regions
// CHECK: step 6 op 'builtin.func' after all regions
// CHECK: step 6 op 'func.func' after all regions
// CHECK: step 7 op 'builtin.module' after all regions
4 changes: 2 additions & 2 deletions mlir/test/IR/generic-visitors.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ func @structured_cfg() {
}

// CHECK: step 0 op 'builtin.module' before all regions
// CHECK: step 1 op 'builtin.func' before all regions
// CHECK: step 1 op 'func.func' before all regions
// CHECK: step 2 op 'arith.constant' before all regions
// CHECK: step 3 op 'arith.constant' before all regions
// CHECK: step 4 op 'arith.constant' before all regions
Expand All @@ -37,7 +37,7 @@ func @structured_cfg() {
// CHECK: step 15 op 'scf.yield' before all regions
// CHECK: step 16 op 'scf.for' after all regions
// CHECK: step 17 op 'func.return' before all regions
// CHECK: step 18 op 'builtin.func' after all regions
// CHECK: step 18 op 'func.func' after all regions
// CHECK: step 19 op 'builtin.module' after all regions

// -----
Expand Down
10 changes: 5 additions & 5 deletions mlir/test/IR/invalid-func-op.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,15 @@

func @func_op() {
// expected-error@+1 {{expected valid '@'-identifier for symbol name}}
builtin.func missingsigil() -> (i1, index, f32)
func.func missingsigil() -> (i1, index, f32)
return
}

// -----

func @func_op() {
// expected-error@+1 {{expected type instead of SSA identifier}}
builtin.func @mixed_named_arguments(f32, %a : i32) {
func.func @mixed_named_arguments(f32, %a : i32) {
return
}
return
Expand All @@ -22,7 +22,7 @@ func @func_op() {

func @func_op() {
// expected-error@+1 {{expected SSA identifier}}
builtin.func @mixed_named_arguments(%a : i32, f32) -> () {
func.func @mixed_named_arguments(%a : i32, f32) -> () {
return
}
return
Expand All @@ -32,7 +32,7 @@ func @func_op() {

func @func_op() {
// expected-error@+1 {{entry block must have 1 arguments to match function signature}}
builtin.func @mixed_named_arguments(f32) {
func.func @mixed_named_arguments(f32) {
^entry:
return
}
Expand All @@ -43,7 +43,7 @@ func @func_op() {

func @func_op() {
// expected-error@+1 {{type of entry block argument #0('i32') must match the type of the corresponding argument in function signature('f32')}}
builtin.func @mixed_named_arguments(f32) {
func.func @mixed_named_arguments(f32) {
^entry(%arg : i32):
return
}
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/IR/invalid-ops.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ func @func_with_ops(tensor<12xi1>, tensor<42xi32>, tensor<42xi32>) {

func @return_not_in_function() {
"foo.region"() ({
// expected-error@+1 {{'func.return' op expects parent op 'builtin.func'}}
// expected-error@+1 {{'func.return' op expects parent op 'func.func'}}
return
}): () -> ()
return
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/IR/invalid.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -542,7 +542,7 @@ func @return_type_mismatch() -> i32 {

func @return_inside_loop() {
affine.for %i = 1 to 100 {
// expected-error@+1 {{'func.return' op expects parent op 'builtin.func'}}
// expected-error@+1 {{'func.return' op expects parent op 'func.func'}}
return
}
return
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/IR/print-ir-invalid.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ module {}
// The operation is invalid because the body does not have a terminator, print
// the generic form.
// CHECK: Invalid operation:
// CHECK-NEXT: "builtin.func"() ({
// CHECK-NEXT: "func.func"() ({
// CHECK-NEXT: ^bb0:
// CHECK-NEXT: })
// CHECK-SAME: sym_name = "test"
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/IR/test-matchers.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline="builtin.func(test-matchers)" -o /dev/null 2>&1 | FileCheck %s
// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline="func.func(test-matchers)" -o /dev/null 2>&1 | FileCheck %s

func @test1(%a: f32, %b: f32, %c: f32) {
%0 = arith.addf %a, %b: f32
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/IR/traits.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -575,7 +575,7 @@ func @failedHasDominanceScopeOutsideDominanceFreeScope() -> () {
// checked for dominance
func @illegalInsideDominanceFreeScope() -> () {
test.graph_region {
builtin.func @test() -> i1 {
func.func @test() -> i1 {
^bb1:
// expected-error @+1 {{operand #0 does not dominate this use}}
%2:3 = "bar"(%1) : (i64) -> (i1,i1,i1)
Expand All @@ -594,7 +594,7 @@ func @illegalInsideDominanceFreeScope() -> () {
// checked for dominance
func @illegalCDFGInsideDominanceFreeScope() -> () {
test.graph_region {
builtin.func @test() -> i1 {
func.func @test() -> i1 {
^bb1:
// expected-error @+1 {{operand #0 does not dominate this use}}
%2:3 = "bar"(%1) : (i64) -> (i1,i1,i1)
Expand Down
30 changes: 15 additions & 15 deletions mlir/test/IR/visitors.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ func @structured_cfg() {

// CHECK-LABEL: Op pre-order visit
// CHECK: Visiting op 'builtin.module'
// CHECK: Visiting op 'builtin.func'
// CHECK: Visiting op 'func.func'
// CHECK: Visiting op 'scf.for'
// CHECK: Visiting op 'use0'
// CHECK: Visiting op 'scf.if'
Expand All @@ -34,14 +34,14 @@ func @structured_cfg() {

// CHECK-LABEL: Block pre-order visits
// CHECK: Visiting block ^bb0 from region 0 from operation 'builtin.module'
// CHECK: Visiting block ^bb0 from region 0 from operation 'builtin.func'
// CHECK: Visiting block ^bb0 from region 0 from operation 'func.func'
// CHECK: Visiting block ^bb0 from region 0 from operation 'scf.for'
// CHECK: Visiting block ^bb0 from region 0 from operation 'scf.if'
// CHECK: Visiting block ^bb0 from region 1 from operation 'scf.if'

// CHECK-LABEL: Region pre-order visits
// CHECK: Visiting region 0 from operation 'builtin.module'
// CHECK: Visiting region 0 from operation 'builtin.func'
// CHECK: Visiting region 0 from operation 'func.func'
// CHECK: Visiting region 0 from operation 'scf.for'
// CHECK: Visiting region 0 from operation 'scf.if'
// CHECK: Visiting region 1 from operation 'scf.if'
Expand All @@ -54,21 +54,21 @@ func @structured_cfg() {
// CHECK: Visiting op 'use3'
// CHECK: Visiting op 'scf.for'
// CHECK: Visiting op 'func.return'
// CHECK: Visiting op 'builtin.func'
// CHECK: Visiting op 'func.func'
// CHECK: Visiting op 'builtin.module'

// CHECK-LABEL: Block post-order visits
// CHECK: Visiting block ^bb0 from region 0 from operation 'scf.if'
// CHECK: Visiting block ^bb0 from region 1 from operation 'scf.if'
// CHECK: Visiting block ^bb0 from region 0 from operation 'scf.for'
// CHECK: Visiting block ^bb0 from region 0 from operation 'builtin.func'
// CHECK: Visiting block ^bb0 from region 0 from operation 'func.func'
// CHECK: Visiting block ^bb0 from region 0 from operation 'builtin.module'

// CHECK-LABEL: Region post-order visits
// CHECK: Visiting region 0 from operation 'scf.if'
// CHECK: Visiting region 1 from operation 'scf.if'
// CHECK: Visiting region 0 from operation 'scf.for'
// CHECK: Visiting region 0 from operation 'builtin.func'
// CHECK: Visiting region 0 from operation 'func.func'
// CHECK: Visiting region 0 from operation 'builtin.module'

// CHECK-LABEL: Op pre-order erasures
Expand Down Expand Up @@ -100,14 +100,14 @@ func @structured_cfg() {
// CHECK: Erasing op 'use3'
// CHECK: Erasing op 'scf.for'
// CHECK: Erasing op 'func.return'
// CHECK: Erasing op 'builtin.func'
// CHECK: Erasing op 'func.func'
// CHECK: Erasing op 'builtin.module'

// CHECK-LABEL: Block post-order erasures (no skip)
// CHECK: Erasing block ^bb0 from region 0 from operation 'scf.if'
// CHECK: Erasing block ^bb0 from region 1 from operation 'scf.if'
// CHECK: Erasing block ^bb0 from region 0 from operation 'scf.for'
// CHECK: Erasing block ^bb0 from region 0 from operation 'builtin.func'
// CHECK: Erasing block ^bb0 from region 0 from operation 'func.func'
// CHECK: Erasing block ^bb0 from region 0 from operation 'builtin.module'

// -----
Expand All @@ -128,7 +128,7 @@ func @unstructured_cfg() {

// CHECK-LABEL: Op pre-order visits
// CHECK: Visiting op 'builtin.module'
// CHECK: Visiting op 'builtin.func'
// CHECK: Visiting op 'func.func'
// CHECK: Visiting op 'regionOp0'
// CHECK: Visiting op 'op0'
// CHECK: Visiting op 'cf.br'
Expand All @@ -139,14 +139,14 @@ func @unstructured_cfg() {

// CHECK-LABEL: Block pre-order visits
// CHECK: Visiting block ^bb0 from region 0 from operation 'builtin.module'
// CHECK: Visiting block ^bb0 from region 0 from operation 'builtin.func'
// CHECK: Visiting block ^bb0 from region 0 from operation 'func.func'
// CHECK: Visiting block ^bb0 from region 0 from operation 'regionOp0'
// CHECK: Visiting block ^bb1 from region 0 from operation 'regionOp0'
// CHECK: Visiting block ^bb2 from region 0 from operation 'regionOp0'

// CHECK-LABEL: Region pre-order visits
// CHECK: Visiting region 0 from operation 'builtin.module'
// CHECK: Visiting region 0 from operation 'builtin.func'
// CHECK: Visiting region 0 from operation 'func.func'
// CHECK: Visiting region 0 from operation 'regionOp0'

// CHECK-LABEL: Op post-order visits
Expand All @@ -157,19 +157,19 @@ func @unstructured_cfg() {
// CHECK: Visiting op 'op2'
// CHECK: Visiting op 'regionOp0'
// CHECK: Visiting op 'func.return'
// CHECK: Visiting op 'builtin.func'
// CHECK: Visiting op 'func.func'
// CHECK: Visiting op 'builtin.module'

// CHECK-LABEL: Block post-order visits
// CHECK: Visiting block ^bb0 from region 0 from operation 'regionOp0'
// CHECK: Visiting block ^bb1 from region 0 from operation 'regionOp0'
// CHECK: Visiting block ^bb2 from region 0 from operation 'regionOp0'
// CHECK: Visiting block ^bb0 from region 0 from operation 'builtin.func'
// CHECK: Visiting block ^bb0 from region 0 from operation 'func.func'
// CHECK: Visiting block ^bb0 from region 0 from operation 'builtin.module'

// CHECK-LABEL: Region post-order visits
// CHECK: Visiting region 0 from operation 'regionOp0'
// CHECK: Visiting region 0 from operation 'builtin.func'
// CHECK: Visiting region 0 from operation 'func.func'
// CHECK: Visiting region 0 from operation 'builtin.module'

// CHECK-LABEL: Op pre-order erasures (skip)
Expand Down Expand Up @@ -208,5 +208,5 @@ func @unstructured_cfg() {
// CHECK: Erasing block ^bb0 from region 0 from operation 'regionOp0'
// CHECK: Erasing block ^bb0 from region 0 from operation 'regionOp0'
// CHECK: Erasing block ^bb0 from region 0 from operation 'regionOp0'
// CHECK: Erasing block ^bb0 from region 0 from operation 'builtin.func'
// CHECK: Erasing block ^bb0 from region 0 from operation 'func.func'
// CHECK: Erasing block ^bb0 from region 0 from operation 'builtin.module'
2 changes: 1 addition & 1 deletion mlir/test/IR/wrapping_op.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
// RUN: mlir-opt -allow-unregistered-dialect -mlir-print-op-generic -mlir-print-debuginfo -mlir-print-local-scope %s | FileCheck %s --check-prefix=CHECK-GENERIC

// CHECK-LABEL: func @wrapping_op
// CHECK-GENERIC: "builtin.func"
// CHECK-GENERIC: "func.func"
func @wrapping_op(%arg0 : i32, %arg1 : f32) -> (i3, i2, i1) {
// CHECK: %0:3 = test.wrapping_region wraps "some.op"(%arg1, %arg0) {test.attr = "attr"} : (f32, i32) -> (i1, i2, i3)
// CHECK-GENERIC: "test.wrapping_region"() ({
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
// RUN: mlir-opt -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=linalg.fill register-tile-sizes=4,32 vectorize" | \
// RUN: mlir-opt -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=memref.copy register-tile-sizes=4,32 vectorize" | \

// RUN: mlir-opt -pass-pipeline="builtin.func(canonicalize,convert-vector-to-scf,lower-affine,convert-linalg-to-loops)" | \
// RUN: mlir-opt -pass-pipeline="builtin.func(canonicalize,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt -pass-pipeline="func.func(canonicalize,convert-vector-to-scf,lower-affine,convert-linalg-to-loops)" | \
// RUN: mlir-opt -pass-pipeline="func.func(canonicalize,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -O3 -e main -entry-point-result=void \
// Activate to dump assembly
// R_UN: -dump-object-file -object-filename=/tmp/a.o \
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(canonicalize,cse),linalg-comprehensive-module-bufferize" |\
// RUN: mlir-opt -pass-pipeline="builtin.func(buffer-deallocation,convert-vector-to-scf,lower-affine,convert-linalg-to-loops)" |\
// RUN: mlir-opt -pass-pipeline="builtin.func(canonicalize,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(canonicalize,cse),linalg-comprehensive-module-bufferize" |\
// RUN: mlir-opt -pass-pipeline="func.func(buffer-deallocation,convert-vector-to-scf,lower-affine,convert-linalg-to-loops)" |\
// RUN: mlir-opt -pass-pipeline="func.func(canonicalize,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \

// RUN: mlir-cpu-runner -O3 -e main -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext |\
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@

from mlir.dialects import sparse_tensor as st
from mlir.dialects import builtin
from mlir.dialects import func
from mlir.dialects.linalg.opdsl import lang as dsl

_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
Expand Down Expand Up @@ -44,7 +45,7 @@ def build_SDDMM(attr: st.EncodingAttr):
arguments = [a, b, s, c]
with ir.InsertionPoint(module.body):

@builtin.FuncOp.from_py_func(*arguments)
@func.FuncOp.from_py_func(*arguments)
def sddmm(*args):
return sddmm_dsl(args[0], args[1], args[2], outs=[args[3]])

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@

from mlir.dialects import sparse_tensor as st
from mlir.dialects import builtin
from mlir.dialects import func
from mlir.dialects.linalg.opdsl import lang as dsl

_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
Expand Down Expand Up @@ -41,7 +42,7 @@ def build_SpMM(attr: st.EncodingAttr):
arguments = [a, b, c]
with ir.InsertionPoint(module.body):

@builtin.FuncOp.from_py_func(*arguments)
@func.FuncOp.from_py_func(*arguments)
def spMxM(*args):
return matmul_dsl(args[0], args[1], outs=[args[2]])

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def build(self, types: List[ir.Type]):
# TODO: assert dense? assert element type is recognised by the TypeConverter?
types.append(tp0)
funcTp = ir.FunctionType.get(inputs=[tp0], results=[tp0])
funcOp = builtin.FuncOp(name='main', type=funcTp)
funcOp = func.FuncOp(name='main', type=funcTp)
funcOp.attributes['llvm.emit_c_interface'] = ir.UnitAttr.get()
with ir.InsertionPoint(funcOp.add_entry_block()):
arg0 = funcOp.entry_block.arguments[0]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -670,7 +670,7 @@ def _emit_assignment(
# Build the kernel for the operations.
with ir.InsertionPoint(module.body):

@builtin.FuncOp.from_py_func(*input_types, name=_ENTRY_NAME)
@func.FuncOp.from_py_func(*input_types, name=_ENTRY_NAME)
def linalg_funcop(*args):
# Set up the mapping from the Access nodes to their MLIR values.
for e, mlir in zip(input_accesses, args):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf,memref-expand,arith-expand),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf,memref-expand,arith-expand),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
Expand Down
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
Expand Down
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
Expand Down
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e main -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s

// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e main -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
Expand Down
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-vector-to-forloop,convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-opt %s -pass-pipeline="func.func(test-vector-to-forloop,convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \
// RUN: mlir-cpu-runner -e main -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext | \
// RUN: FileCheck %s

// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e main \
// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e main \
// RUN: -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext | \
// RUN: FileCheck %s

// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-vector-to-forloop)" | FileCheck %s -check-prefix=TRANSFORM
// RUN: mlir-opt %s -pass-pipeline="func.func(test-vector-to-forloop)" | FileCheck %s -check-prefix=TRANSFORM


func private @print_memref_f32(memref<*xf32>)
Expand Down
8 changes: 4 additions & 4 deletions mlir/test/Pass/dynamic-pipeline.mlir
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -pass-pipeline='builtin.module(test-dynamic-pipeline{op-name=inner_mod1, dynamic-pipeline=builtin.func(cse,canonicalize)})' --mlir-disable-threading -print-ir-before-all 2>&1 | FileCheck %s --check-prefix=MOD1 --check-prefix=MOD1-ONLY --check-prefix=CHECK
// RUN: mlir-opt %s -pass-pipeline='builtin.module(test-dynamic-pipeline{op-name=inner_mod2, dynamic-pipeline=builtin.func(cse,canonicalize)})' --mlir-disable-threading -print-ir-before-all 2>&1 | FileCheck %s --check-prefix=MOD2 --check-prefix=MOD2-ONLY --check-prefix=CHECK
// RUN: mlir-opt %s -pass-pipeline='builtin.module(test-dynamic-pipeline{op-name=inner_mod1,inner_mod2, dynamic-pipeline=builtin.func(cse,canonicalize)})' --mlir-disable-threading -print-ir-before-all 2>&1 | FileCheck %s --check-prefix=MOD1 --check-prefix=MOD2 --check-prefix=CHECK
// RUN: mlir-opt %s -pass-pipeline='builtin.module(test-dynamic-pipeline{dynamic-pipeline=builtin.func(cse,canonicalize)})' --mlir-disable-threading -print-ir-before-all 2>&1 | FileCheck %s --check-prefix=MOD1 --check-prefix=MOD2 --check-prefix=CHECK
// RUN: mlir-opt %s -pass-pipeline='builtin.module(test-dynamic-pipeline{op-name=inner_mod1, dynamic-pipeline=func.func(cse,canonicalize)})' --mlir-disable-threading -print-ir-before-all 2>&1 | FileCheck %s --check-prefix=MOD1 --check-prefix=MOD1-ONLY --check-prefix=CHECK
// RUN: mlir-opt %s -pass-pipeline='builtin.module(test-dynamic-pipeline{op-name=inner_mod2, dynamic-pipeline=func.func(cse,canonicalize)})' --mlir-disable-threading -print-ir-before-all 2>&1 | FileCheck %s --check-prefix=MOD2 --check-prefix=MOD2-ONLY --check-prefix=CHECK
// RUN: mlir-opt %s -pass-pipeline='builtin.module(test-dynamic-pipeline{op-name=inner_mod1,inner_mod2, dynamic-pipeline=func.func(cse,canonicalize)})' --mlir-disable-threading -print-ir-before-all 2>&1 | FileCheck %s --check-prefix=MOD1 --check-prefix=MOD2 --check-prefix=CHECK
// RUN: mlir-opt %s -pass-pipeline='builtin.module(test-dynamic-pipeline{dynamic-pipeline=func.func(cse,canonicalize)})' --mlir-disable-threading -print-ir-before-all 2>&1 | FileCheck %s --check-prefix=MOD1 --check-prefix=MOD2 --check-prefix=CHECK


func @f() {
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Pass/interface-pass.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -verify-diagnostics -pass-pipeline='builtin.func(test-interface-pass)' -o /dev/null
// RUN: mlir-opt %s -verify-diagnostics -pass-pipeline='func.func(test-interface-pass)' -o /dev/null

// Test that we run the interface pass on the function.

Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Pass/invalid-parent.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline='builtin.func(test-pass-invalid-parent)' -verify-diagnostics
// RUN: mlir-opt %s -pass-pipeline='func.func(test-pass-invalid-parent)' -verify-diagnostics

// Test that we properly report errors when the parent becomes invalid after running a pass
// on a child operation.
Expand Down
18 changes: 9 additions & 9 deletions mlir/test/Pass/ir-printing.mlir
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='builtin.func(cse,canonicalize)' -print-ir-before=cse -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE %s
// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='builtin.func(cse,canonicalize)' -print-ir-before-all -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE_ALL %s
// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='builtin.func(cse,canonicalize)' -print-ir-after=cse -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER %s
// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='builtin.func(cse,canonicalize)' -print-ir-after-all -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER_ALL %s
// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='builtin.func(cse,canonicalize)' -print-ir-before=cse -print-ir-module-scope -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE_MODULE %s
// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='builtin.func(cse,cse)' -print-ir-after-all -print-ir-after-change -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER_ALL_CHANGE %s
// RUN: not mlir-opt %s -mlir-disable-threading=true -pass-pipeline='builtin.func(cse,test-pass-failure)' -print-ir-after-failure -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER_FAILURE %s
// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='func.func(cse,canonicalize)' -print-ir-before=cse -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE %s
// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='func.func(cse,canonicalize)' -print-ir-before-all -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE_ALL %s
// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='func.func(cse,canonicalize)' -print-ir-after=cse -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER %s
// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='func.func(cse,canonicalize)' -print-ir-after-all -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER_ALL %s
// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='func.func(cse,canonicalize)' -print-ir-before=cse -print-ir-module-scope -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE_MODULE %s
// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='func.func(cse,cse)' -print-ir-after-all -print-ir-after-change -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER_ALL_CHANGE %s
// RUN: not mlir-opt %s -mlir-disable-threading=true -pass-pipeline='func.func(cse,test-pass-failure)' -print-ir-after-failure -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER_FAILURE %s

func @foo() {
%0 = arith.constant 0 : i32
Expand Down Expand Up @@ -49,10 +49,10 @@ func @bar() {
// AFTER_ALL: // -----// IR Dump After{{.*}}Canonicalizer //----- //
// AFTER_ALL-NEXT: func @bar()

// BEFORE_MODULE: // -----// IR Dump Before{{.*}}CSE ('builtin.func' operation: @foo) //----- //
// BEFORE_MODULE: // -----// IR Dump Before{{.*}}CSE ('func.func' operation: @foo) //----- //
// BEFORE_MODULE: func @foo()
// BEFORE_MODULE: func @bar()
// BEFORE_MODULE: // -----// IR Dump Before{{.*}}CSE ('builtin.func' operation: @bar) //----- //
// BEFORE_MODULE: // -----// IR Dump Before{{.*}}CSE ('func.func' operation: @bar) //----- //
// BEFORE_MODULE: func @foo()
// BEFORE_MODULE: func @bar()

Expand Down
18 changes: 9 additions & 9 deletions mlir/test/Pass/pass-timing.mlir
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -mlir-disable-threading=true -verify-each=true -pass-pipeline='builtin.func(cse,canonicalize,cse)' -mlir-timing -mlir-timing-display=list 2>&1 | FileCheck -check-prefix=LIST %s
// RUN: mlir-opt %s -mlir-disable-threading=true -verify-each=true -pass-pipeline='builtin.func(cse,canonicalize,cse)' -mlir-timing -mlir-timing-display=tree 2>&1 | FileCheck -check-prefix=PIPELINE %s
// RUN: mlir-opt %s -mlir-disable-threading=false -verify-each=true -pass-pipeline='builtin.func(cse,canonicalize,cse)' -mlir-timing -mlir-timing-display=list 2>&1 | FileCheck -check-prefix=MT_LIST %s
// RUN: mlir-opt %s -mlir-disable-threading=false -verify-each=true -pass-pipeline='builtin.func(cse,canonicalize,cse)' -mlir-timing -mlir-timing-display=tree 2>&1 | FileCheck -check-prefix=MT_PIPELINE %s
// RUN: mlir-opt %s -mlir-disable-threading=true -verify-each=true -pass-pipeline='func.func(cse,canonicalize,cse)' -mlir-timing -mlir-timing-display=list 2>&1 | FileCheck -check-prefix=LIST %s
// RUN: mlir-opt %s -mlir-disable-threading=true -verify-each=true -pass-pipeline='func.func(cse,canonicalize,cse)' -mlir-timing -mlir-timing-display=tree 2>&1 | FileCheck -check-prefix=PIPELINE %s
// RUN: mlir-opt %s -mlir-disable-threading=false -verify-each=true -pass-pipeline='func.func(cse,canonicalize,cse)' -mlir-timing -mlir-timing-display=list 2>&1 | FileCheck -check-prefix=MT_LIST %s
// RUN: mlir-opt %s -mlir-disable-threading=false -verify-each=true -pass-pipeline='func.func(cse,canonicalize,cse)' -mlir-timing -mlir-timing-display=tree 2>&1 | FileCheck -check-prefix=MT_PIPELINE %s
// RUN: mlir-opt %s -mlir-disable-threading=true -verify-each=false -test-pm-nested-pipeline -mlir-timing -mlir-timing-display=tree 2>&1 | FileCheck -check-prefix=NESTED_PIPELINE %s

// LIST: Execution time report
Expand All @@ -16,7 +16,7 @@
// PIPELINE: Total Execution Time:
// PIPELINE: Name
// PIPELINE-NEXT: Parser
// PIPELINE-NEXT: 'builtin.func' Pipeline
// PIPELINE-NEXT: 'func.func' Pipeline
// PIPELINE-NEXT: CSE
// PIPELINE-NEXT: (A) DominanceInfo
// PIPELINE-NEXT: Canonicalizer
Expand All @@ -38,7 +38,7 @@
// MT_PIPELINE: Total Execution Time:
// MT_PIPELINE: Name
// MT_PIPELINE-NEXT: Parser
// MT_PIPELINE-NEXT: 'builtin.func' Pipeline
// MT_PIPELINE-NEXT: 'func.func' Pipeline
// MT_PIPELINE-NEXT: CSE
// MT_PIPELINE-NEXT: (A) DominanceInfo
// MT_PIPELINE-NEXT: Canonicalizer
Expand All @@ -52,12 +52,12 @@
// NESTED_PIPELINE: Total Execution Time:
// NESTED_PIPELINE: Name
// NESTED_PIPELINE-NEXT: Parser
// NESTED_PIPELINE-NEXT: Pipeline Collection : ['builtin.func', 'builtin.module']
// NESTED_PIPELINE-NEXT: 'builtin.func' Pipeline
// NESTED_PIPELINE-NEXT: Pipeline Collection : ['builtin.module', 'func.func']
// NESTED_PIPELINE-NEXT: 'func.func' Pipeline
// NESTED_PIPELINE-NEXT: TestFunctionPass
// NESTED_PIPELINE-NEXT: 'builtin.module' Pipeline
// NESTED_PIPELINE-NEXT: TestModulePass
// NESTED_PIPELINE-NEXT: 'builtin.func' Pipeline
// NESTED_PIPELINE-NEXT: 'func.func' Pipeline
// NESTED_PIPELINE-NEXT: TestFunctionPass
// NESTED_PIPELINE-NEXT: Output
// NESTED_PIPELINE-NEXT: Rest
Expand Down
10 changes: 5 additions & 5 deletions mlir/test/Pass/pipeline-options-parsing.mlir
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
// RUN: not mlir-opt %s -pass-pipeline='builtin.module(test-module-pass{)' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_1 %s
// RUN: not mlir-opt %s -pass-pipeline='builtin.module(test-module-pass{test-option=3})' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_2 %s
// RUN: not mlir-opt %s -pass-pipeline='builtin.module(builtin.func(test-options-pass{list=3}), test-module-pass{invalid-option=3})' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_3 %s
// RUN: not mlir-opt %s -pass-pipeline='builtin.module(func.func(test-options-pass{list=3}), test-module-pass{invalid-option=3})' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_3 %s
// RUN: not mlir-opt %s -pass-pipeline='test-options-pass{list=3 list=notaninteger}' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_4 %s
// RUN: mlir-opt %s -pass-pipeline='builtin.func(test-options-pass{list=1,2,3,4 list=5 string=value1 string=value2})'
// RUN: mlir-opt %s -verify-each=false -pass-pipeline='builtin.func(test-options-pass{string-list=a list=1,2,3,4 string-list=b,c list=5 string-list=d string=nested_pipeline{arg1=10 arg2=" {} " arg3=true}})' -test-dump-pipeline 2>&1 | FileCheck --check-prefix=CHECK_1 %s
// RUN: mlir-opt %s -pass-pipeline='func.func(test-options-pass{list=1,2,3,4 list=5 string=value1 string=value2})'
// RUN: mlir-opt %s -verify-each=false -pass-pipeline='func.func(test-options-pass{string-list=a list=1,2,3,4 string-list=b,c list=5 string-list=d string=nested_pipeline{arg1=10 arg2=" {} " arg3=true}})' -test-dump-pipeline 2>&1 | FileCheck --check-prefix=CHECK_1 %s
// RUN: mlir-opt %s -verify-each=false -test-options-pass-pipeline='list=1 string-list=a,b' -test-dump-pipeline 2>&1 | FileCheck --check-prefix=CHECK_2 %s
// RUN: mlir-opt %s -verify-each=false -pass-pipeline='builtin.module(builtin.func(test-options-pass{list=3}), builtin.func(test-options-pass{list=1,2,3,4}))' -test-dump-pipeline 2>&1 | FileCheck --check-prefix=CHECK_3 %s
// RUN: mlir-opt %s -verify-each=false -pass-pipeline='builtin.module(func.func(test-options-pass{list=3}), func.func(test-options-pass{list=1,2,3,4}))' -test-dump-pipeline 2>&1 | FileCheck --check-prefix=CHECK_3 %s

// CHECK_ERROR_1: missing closing '}' while processing pass options
// CHECK_ERROR_2: no such option test-option
Expand All @@ -14,4 +14,4 @@

// CHECK_1: test-options-pass{list=1,2,3,4,5 string=nested_pipeline{arg1=10 arg2=" {} " arg3=true} string-list=a,b,c,d}
// CHECK_2: test-options-pass{list=1 string= string-list=a,b}
// CHECK_3: builtin.module(builtin.func(test-options-pass{list=3 string= }), builtin.func(test-options-pass{list=1,2,3,4 string= }))
// CHECK_3: builtin.module(func.func(test-options-pass{list=3 string= }), func.func(test-options-pass{list=1,2,3,4 string= }))
18 changes: 9 additions & 9 deletions mlir/test/Pass/pipeline-parsing.mlir
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
// RUN: mlir-opt %s -mlir-disable-threading -pass-pipeline='builtin.module(test-module-pass,builtin.func(test-function-pass)),builtin.func(test-function-pass)' -pass-pipeline="builtin.func(cse,canonicalize)" -verify-each=false -mlir-timing -mlir-timing-display=tree 2>&1 | FileCheck %s
// RUN: mlir-opt %s -mlir-disable-threading -pass-pipeline='builtin.module(test-module-pass,func.func(test-function-pass)),func.func(test-function-pass)' -pass-pipeline="func.func(cse,canonicalize)" -verify-each=false -mlir-timing -mlir-timing-display=tree 2>&1 | FileCheck %s
// RUN: mlir-opt %s -mlir-disable-threading -test-textual-pm-nested-pipeline -verify-each=false -mlir-timing -mlir-timing-display=tree 2>&1 | FileCheck %s --check-prefix=TEXTUAL_CHECK
// RUN: not mlir-opt %s -pass-pipeline='builtin.module(test-module-pass' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_1 %s
// RUN: not mlir-opt %s -pass-pipeline='builtin.module(test-module-pass))' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_2 %s
// RUN: not mlir-opt %s -pass-pipeline='builtin.module()(' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_3 %s
// RUN: not mlir-opt %s -pass-pipeline=',' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_4 %s
// RUN: not mlir-opt %s -pass-pipeline='builtin.func(test-module-pass)' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_5 %s
// RUN: not mlir-opt %s -pass-pipeline='func.func(test-module-pass)' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_5 %s

// CHECK_ERROR_1: encountered unbalanced parentheses while parsing pipeline
// CHECK_ERROR_2: encountered extra closing ')' creating unbalanced parentheses while parsing pipeline
// CHECK_ERROR_3: expected ',' after parsing pipeline
// CHECK_ERROR_4: does not refer to a registered pass or pass pipeline
// CHECK_ERROR_5: Can't add pass '{{.*}}TestModulePass' restricted to 'builtin.module' on a PassManager intended to run on 'builtin.func', did you intend to nest?
// CHECK_ERROR_5: Can't add pass '{{.*}}TestModulePass' restricted to 'builtin.module' on a PassManager intended to run on 'func.func', did you intend to nest?
func @foo() {
return
}
Expand All @@ -21,21 +21,21 @@ module {
}
}

// CHECK: Pipeline Collection : ['builtin.func', 'builtin.module']
// CHECK-NEXT: 'builtin.func' Pipeline
// CHECK: Pipeline Collection : ['builtin.module', 'func.func']
// CHECK-NEXT: 'func.func' Pipeline
// CHECK-NEXT: TestFunctionPass
// CHECK-NEXT: CSE
// CHECK-NEXT: DominanceInfo
// CHECK-NEXT: Canonicalizer
// CHECK-NEXT: 'builtin.module' Pipeline
// CHECK-NEXT: TestModulePass
// CHECK-NEXT: 'builtin.func' Pipeline
// CHECK-NEXT: 'func.func' Pipeline
// CHECK-NEXT: TestFunctionPass

// TEXTUAL_CHECK: Pipeline Collection : ['builtin.func', 'builtin.module']
// TEXTUAL_CHECK-NEXT: 'builtin.func' Pipeline
// TEXTUAL_CHECK: Pipeline Collection : ['builtin.module', 'func.func']
// TEXTUAL_CHECK-NEXT: 'func.func' Pipeline
// TEXTUAL_CHECK-NEXT: TestFunctionPass
// TEXTUAL_CHECK-NEXT: 'builtin.module' Pipeline
// TEXTUAL_CHECK-NEXT: TestModulePass
// TEXTUAL_CHECK-NEXT: 'builtin.func' Pipeline
// TEXTUAL_CHECK-NEXT: 'func.func' Pipeline
// TEXTUAL_CHECK-NEXT: TestFunctionPass
6 changes: 3 additions & 3 deletions mlir/test/Pass/pipeline-stats.mlir
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
// REQUIRES: asserts
// RUN: mlir-opt %s -verify-each=true -pass-pipeline='builtin.func(test-stats-pass,test-stats-pass)' -pass-statistics -pass-statistics-display=list 2>&1 | FileCheck -check-prefix=LIST %s
// RUN: mlir-opt %s -verify-each=true -pass-pipeline='builtin.func(test-stats-pass,test-stats-pass)' -pass-statistics -pass-statistics-display=pipeline 2>&1 | FileCheck -check-prefix=PIPELINE %s
// RUN: mlir-opt %s -verify-each=true -pass-pipeline='func.func(test-stats-pass,test-stats-pass)' -pass-statistics -pass-statistics-display=list 2>&1 | FileCheck -check-prefix=LIST %s
// RUN: mlir-opt %s -verify-each=true -pass-pipeline='func.func(test-stats-pass,test-stats-pass)' -pass-statistics -pass-statistics-display=pipeline 2>&1 | FileCheck -check-prefix=PIPELINE %s

// LIST: Pass statistics report
// LIST: TestStatisticPass
// LIST-NEXT: (S) {{0|8}} num-ops - Number of operations counted
// LIST-NOT: Verifier

// PIPELINE: Pass statistics report
// PIPELINE: 'builtin.func' Pipeline
// PIPELINE: 'func.func' Pipeline
// PIPELINE-NEXT: TestStatisticPass
// PIPELINE-NEXT: (S) {{0|4}} num-ops - Number of operations counted
// PIPELINE-NEXT: TestStatisticPass
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Pass/run-reproducer.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// configuration: -mlir-disable-threading=true -pass-pipeline='builtin.func(cse,canonicalize)' -print-ir-before=cse
// configuration: -mlir-disable-threading=true -pass-pipeline='func.func(cse,canonicalize)' -print-ir-before=cse

// Test of the reproducer run option. The first line has to be the
// configuration (matching what is produced by reproducer).
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Target/Cpp/invalid.mlir
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// RUN: mlir-translate -split-input-file -mlir-to-cpp -verify-diagnostics %s

// expected-error@+1 {{'builtin.func' op with multiple blocks needs variables declared at top}}
// expected-error@+1 {{'func.func' op with multiple blocks needs variables declared at top}}
func @multiple_blocks() {
^bb1:
cf.br ^bb2
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Target/LLVMIR/arm-neon-2d.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -pass-pipeline="builtin.func(arm-neon-2d-to-intr)" %s | FileCheck %s
// RUN: mlir-opt -pass-pipeline="func.func(arm-neon-2d-to-intr)" %s | FileCheck %s

// CHECK-LABEL: arm_neon_sdot2d_4x4_i8i8
func @arm_neon_sdot2d_4x4_i8i8(%a: vector<4xi32>, %b: vector<4x4xi8>, %c: vector<4x4xi8>) -> vector<4xi32> {
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Target/LLVMIR/vector-to-llvm-ir.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline="convert-vector-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" | mlir-translate -mlir-to-llvmir | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline="convert-vector-to-llvm,func.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" | mlir-translate -mlir-to-llvmir | FileCheck %s

func @genbool_1d() -> vector<8xi1> {
%0 = vector.constant_mask [4] : vector<8xi1>
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Transforms/canonicalize-block-merge.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.func(canonicalize)' -split-input-file | FileCheck %s
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func.func(canonicalize)' -split-input-file | FileCheck %s

// Check the simple case of single operation blocks with a return.

Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Transforms/canonicalize-dce.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -pass-pipeline='builtin.func(canonicalize)' | FileCheck %s
// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -pass-pipeline='func.func(canonicalize)' | FileCheck %s

// Test case: Simple case of deleting a dead pure op.

Expand Down Expand Up @@ -82,7 +82,7 @@ func @f(%arg0: f32, %pred: i1) {
// CHECK-NEXT: return

func @f(%arg0: f32) {
builtin.func @g(%arg1: f32) {
func.func @g(%arg1: f32) {
%0 = "arith.addf"(%arg1, %arg1) : (f32, f32) -> f32
return
}
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Transforms/canonicalize-td.mlir
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.func(canonicalize{top-down=true})' | FileCheck %s --check-prefix=TD
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.func(canonicalize)' | FileCheck %s --check-prefix=BU
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func.func(canonicalize{top-down=true})' | FileCheck %s --check-prefix=TD
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func.func(canonicalize)' | FileCheck %s --check-prefix=BU


// BU-LABEL: func @default_insertion_position
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Transforms/canonicalize.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.func(canonicalize)' -split-input-file | FileCheck %s
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func.func(canonicalize)' -split-input-file | FileCheck %s

// CHECK-LABEL: func @test_subi_zero
func @test_subi_zero(%arg0: i32) -> i32 {
Expand Down Expand Up @@ -424,7 +424,7 @@ func @write_only_alloca_fold(%v: f32) {
// CHECK-LABEL: func @dead_block_elim
func @dead_block_elim() {
// CHECK-NOT: ^bb
builtin.func @nested() {
func.func @nested() {
return

^bb1:
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Transforms/constant-fold.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -758,7 +758,7 @@ func @cmpf_inf() -> (i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1,
func @nested_isolated_region() {
// CHECK-NEXT: func @isolated_op
// CHECK-NEXT: arith.constant 2
builtin.func @isolated_op() {
func.func @isolated_op() {
%0 = arith.constant 1 : i32
%2 = arith.addi %0, %0 : i32
"foo.yield"(%2) : (i32) -> ()
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Transforms/cse.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.func(cse)' | FileCheck %s
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func.func(cse)' | FileCheck %s

// CHECK-DAG: #[[$MAP:.*]] = affine_map<(d0) -> (d0 mod 2)>
#map0 = affine_map<(d0) -> (d0 mod 2)>
Expand Down Expand Up @@ -229,7 +229,7 @@ func @nested_isolated() -> i32 {
%0 = arith.constant 1 : i32

// CHECK-NEXT: @nested_func
builtin.func @nested_func() {
func.func @nested_func() {
// CHECK-NEXT: arith.constant 1
%foo = arith.constant 1 : i32
"foo.yield"(%foo) : (i32) -> ()
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Transforms/parallel-loop-collapsing.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.func(scf-parallel-loop-collapsing{collapsed-indices-0=0,3 collapsed-indices-1=1,4 collapsed-indices-2=2}, canonicalize)' | FileCheck %s
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func.func(scf-parallel-loop-collapsing{collapsed-indices-0=0,3 collapsed-indices-1=1,4 collapsed-indices-2=2}, canonicalize)' | FileCheck %s

// CHECK-LABEL: func @parallel_many_dims() {
func @parallel_many_dims() {
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Transforms/parametric-mapping.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -allow-unregistered-dialect -pass-pipeline="builtin.func(test-mapping-to-processing-elements)" %s | FileCheck %s
// RUN: mlir-opt -allow-unregistered-dialect -pass-pipeline="func.func(test-mapping-to-processing-elements)" %s | FileCheck %s

// CHECK: #[[mul_map:.+]] = affine_map<()[s0, s1] -> (s0 * s1)>
// CHECK: #[[add_map:.+]] = affine_map<()[s0, s1] -> (s0 + s1)>
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Transforms/print-op-graph.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
// DFG-LABEL: digraph G {
// DFG: subgraph {{.*}} {
// DFG: subgraph {{.*}}
// DFG: label = "builtin.func{{.*}}merge_blocks
// DFG: label = "func.func{{.*}}merge_blocks
// DFG: subgraph {{.*}} {
// DFG: v[[ARG0:.*]] [label = "arg0"
// DFG: v[[CONST10:.*]] [label ={{.*}}10 : i32
Expand All @@ -26,7 +26,7 @@
// CFG-LABEL: digraph G {
// CFG: subgraph {{.*}} {
// CFG: subgraph {{.*}}
// CFG: label = "builtin.func{{.*}}merge_blocks
// CFG: label = "func.func{{.*}}merge_blocks
// CFG: subgraph {{.*}} {
// CFG: v[[C1:.*]] [label = "arith.constant
// CFG: v[[C2:.*]] [label = "arith.constant
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Transforms/sccp-structured.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline="builtin.func(sccp)" -split-input-file | FileCheck %s
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline="func.func(sccp)" -split-input-file | FileCheck %s

/// Check that a constant is properly propagated when only one edge is taken.

Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Transforms/sccp.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline="builtin.func(sccp)" -split-input-file | FileCheck %s
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline="func.func(sccp)" -split-input-file | FileCheck %s

/// Check simple forward constant propagation without any control flow.

Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Transforms/single-parallel-loop-collapsing.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.func(scf-parallel-loop-collapsing{collapsed-indices-0=0,1}, canonicalize)' | FileCheck %s
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func.func(scf-parallel-loop-collapsing{collapsed-indices-0=0,1}, canonicalize)' | FileCheck %s

func @collapse_to_single() {
%c0 = arith.constant 3 : index
Expand Down
6 changes: 3 additions & 3 deletions mlir/test/Transforms/test-canonicalize-filter.mlir
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// RUN: mlir-opt %s -pass-pipeline='builtin.func(canonicalize)' | FileCheck %s --check-prefix=NO_FILTER
// RUN: mlir-opt %s -pass-pipeline='builtin.func(canonicalize{enable-patterns=TestRemoveOpWithInnerOps})' | FileCheck %s --check-prefix=FILTER_ENABLE
// RUN: mlir-opt %s -pass-pipeline='builtin.func(canonicalize{disable-patterns=TestRemoveOpWithInnerOps})' | FileCheck %s --check-prefix=FILTER_DISABLE
// RUN: mlir-opt %s -pass-pipeline='func.func(canonicalize)' | FileCheck %s --check-prefix=NO_FILTER
// RUN: mlir-opt %s -pass-pipeline='func.func(canonicalize{enable-patterns=TestRemoveOpWithInnerOps})' | FileCheck %s --check-prefix=FILTER_ENABLE
// RUN: mlir-opt %s -pass-pipeline='func.func(canonicalize{disable-patterns=TestRemoveOpWithInnerOps})' | FileCheck %s --check-prefix=FILTER_DISABLE

// NO_FILTER-LABEL: func @remove_op_with_inner_ops_pattern
// NO_FILTER-NEXT: return
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Transforms/test-canonicalize.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline='builtin.func(canonicalize)' | FileCheck %s
// RUN: mlir-opt %s -pass-pipeline='func.func(canonicalize)' | FileCheck %s

// CHECK-LABEL: func @remove_op_with_inner_ops_pattern
func @remove_op_with_inner_ops_pattern() {
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Transforms/test-legalizer-analysis.mlir
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// RUN: mlir-opt -allow-unregistered-dialect -test-legalize-patterns -verify-diagnostics -test-legalize-mode=analysis %s | FileCheck %s
// expected-remark@-2 {{op 'builtin.module' is legalizable}}

// expected-remark@+1 {{op 'builtin.func' is legalizable}}
// expected-remark@+1 {{op 'func.func' is legalizable}}
func @test(%arg0: f32) {
// expected-remark@+1 {{op 'test.illegal_op_a' is legalizable}}
%result = "test.illegal_op_a"() : () -> (i32)
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Transforms/test-legalizer-full.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func @recursively_legal_invalid_op() {
}
/// Operation that is dynamically legal, i.e. the function has a pattern
/// applied to legalize the argument type before it becomes recursively legal.
builtin.func @dynamic_func(%arg: i64) attributes {test.recursively_legal} {
func.func @dynamic_func(%arg: i64) attributes {test.recursively_legal} {
%ignored = "test.illegal_op_f"() : () -> (i32)
"test.return"() : () -> ()
}
Expand Down
3 changes: 1 addition & 2 deletions mlir/test/lib/Dialect/Affine/TestAffineDataCopy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include "mlir/Dialect/Affine/Analysis/Utils.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Affine/LoopUtils.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
Expand All @@ -23,8 +24,6 @@

using namespace mlir;

static llvm::cl::OptionCategory clOptionsCategory(PASS_NAME " options");

namespace {

struct TestAffineDataCopy
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Affine/LoopUtils.h"
#include "mlir/Dialect/Affine/Passes.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"

using namespace mlir;

Expand Down
1 change: 1 addition & 0 deletions mlir/test/lib/Dialect/Affine/TestLoopFusion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Affine/LoopFusionUtils.h"
#include "mlir/Dialect/Affine/LoopUtils.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"

#define DEBUG_TYPE "test-loop-fusion"
Expand Down
1 change: 1 addition & 0 deletions mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Affine/LoopUtils.h"
#include "mlir/Dialect/Affine/Utils.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/Dialect/Vector/Utils/VectorUtils.h"
#include "mlir/IR/Builders.h"
Expand Down
3 changes: 2 additions & 1 deletion mlir/test/lib/Dialect/Linalg/TestLinalgCodegenStrategy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <utility>

#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/GPU/GPUDialect.h"
#include "mlir/Dialect/Linalg/IR/Linalg.h"
#include "mlir/Dialect/Linalg/Transforms/CodegenStrategy.h"
Expand Down Expand Up @@ -217,7 +218,7 @@ void TestLinalgCodegenStrategy::runStrategy(
.enableTransferToSCFConversion());
// Created a nested OpPassManager and run.
FuncOp funcOp = getOperation();
OpPassManager dynamicPM("builtin.func");
OpPassManager dynamicPM("func.func");
strategy.configurePassPipeline(dynamicPM, funcOp.getContext(), runEnablePass);
if (failed(runPipeline(dynamicPM, funcOp)))
return signalPassFailure();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//

#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//

#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
#include "mlir/Dialect/SCF/Transforms.h"
Expand Down
1 change: 1 addition & 0 deletions mlir/test/lib/Dialect/Linalg/TestLinalgHoisting.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//

#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Linalg/IR/Linalg.h"
#include "mlir/Dialect/Linalg/Transforms/Hoisting.h"
#include "mlir/Pass/Pass.h"
Expand Down
1 change: 1 addition & 0 deletions mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@

#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/GPU/GPUDialect.h"
#include "mlir/Dialect/Linalg/IR/Linalg.h"
#include "mlir/Dialect/Linalg/Passes.h"
Expand Down
1 change: 1 addition & 0 deletions mlir/test/lib/Dialect/SCF/TestSCFUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//

#include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/SCF/Transforms.h"
#include "mlir/Dialect/SCF/Utils/Utils.h"
Expand Down
1 change: 1 addition & 0 deletions mlir/test/lib/Dialect/SPIRV/TestAvailability.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
//
//===----------------------------------------------------------------------===//

#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/SPIRV/IR/SPIRVOps.h"
#include "mlir/Dialect/SPIRV/IR/SPIRVTypes.h"
#include "mlir/Dialect/SPIRV/Transforms/SPIRVConversion.h"
Expand Down
1 change: 1 addition & 0 deletions mlir/test/lib/Dialect/Test/TestDialect.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "TestInterfaces.h"
#include "mlir/Dialect/DLTI/DLTI.h"
#include "mlir/Dialect/DLTI/Traits.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Linalg/IR/Linalg.h"
#include "mlir/Dialect/Traits.h"
#include "mlir/IR/BuiltinOps.h"
Expand Down
1 change: 1 addition & 0 deletions mlir/test/lib/Dialect/Tosa/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ add_mlir_dialect_library(MLIRTosaTestPasses
MLIRTosaPassIncGen

LINK_LIBS PUBLIC
MLIRFunc
MLIRPass
MLIRTosa
MLIRTransformUtils
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ struct TestVectorTransposeLowering
.lower8x8xf32()));
}

OpPassManager dynamicPM("builtin.func");
OpPassManager dynamicPM("func.func");
dynamicPM.addPass(createLinalgStrategyLowerVectorsPass(options));
if (failed(runPipeline(dynamicPM, getOperation())))
return signalPassFailure();
Expand Down
1 change: 1 addition & 0 deletions mlir/test/lib/Pass/TestPassManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//

#include "TestDialect.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/mlir-cpu-runner/async-error.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,builtin.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,builtin.func(convert-linalg-to-loops,convert-scf-to-cf),convert-linalg-to-llvm,convert-vector-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \
// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,func.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,func.func(convert-linalg-to-loops,convert-scf-to-cf),convert-linalg-to-llvm,convert-vector-to-llvm,func.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \
// RUN: | mlir-cpu-runner \
// RUN: -e main -entry-point-result=void -O0 \
// RUN: -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext \
Expand Down
Loading