From 219f4bc4153642864c3f22ae4b15c9a4b1fac837 Mon Sep 17 00:00:00 2001 From: Jakub Kuderski Date: Wed, 12 Apr 2023 17:19:20 -0400 Subject: [PATCH] [StableHLO][NFC] Enable FileCheck variable scope in linalg lowering tests This option invalidates all variable bindings after `CHECK-LABEL`. Enabling this option revealed a couple of issues in dot prod tests, where incorrect variables from previous functions were used. Issue: https://github.com/openxla/iree/issues/12678 --- .../StableHLO/test/stablehlo_to_linalg.mlir | 34 ++++++++++--------- .../test/stablehlo_to_linalg_dot_prod.mlir | 16 +++++---- .../test/stablehlo_to_linalg_pointwise.mlir | 20 +++++------ 3 files changed, 38 insertions(+), 32 deletions(-) diff --git a/compiler/src/iree/compiler/InputConversion/StableHLO/test/stablehlo_to_linalg.mlir b/compiler/src/iree/compiler/InputConversion/StableHLO/test/stablehlo_to_linalg.mlir index bf19fe26d01f..7d36d8ceea79 100644 --- a/compiler/src/iree/compiler/InputConversion/StableHLO/test/stablehlo_to_linalg.mlir +++ b/compiler/src/iree/compiler/InputConversion/StableHLO/test/stablehlo_to_linalg.mlir @@ -1,5 +1,5 @@ // RUN: iree-opt %s --iree-stablehlo-to-linalg --split-input-file \ -// RUN: --canonicalize | FileCheck --enable-var-scope=false %s +// RUN: --canonicalize | FileCheck %s // CHECK-LABEL: func @concatenate( // CHECK-SAME: %[[VAL_0:[a-zA-Z0-9_]*]] @@ -227,7 +227,7 @@ func.func @einsum_dynamic_size_broadcast_dot(%arg0: tensor, %arg1: te // CHECK-DAG: #[[OPERAND_MAP:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d4, d0, 0)> // CHECK-DAG: #[[RESULT_MAP:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)> -// CHECK-LABEL: func @broadcast_in_dim +// CHECK: func @broadcast_in_dim func.func @broadcast_in_dim(%operand: tensor<5x7x1xf32>) -> tensor<7x10x6x4x5xf32> { %0 = "stablehlo.broadcast_in_dim"(%operand) {broadcast_dimensions = dense<[4,0,2]> : tensor<3xi64>} @@ -251,7 +251,7 @@ func.func @broadcast_in_dim(%operand: tensor<5x7x1xf32>) -> tensor<7x10x6x4x5xf3 // CHECK-DAG: #[[OPERAND_MAP:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d4, d0, 0)> // CHECK-DAG: #[[RESULT_MAP:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)> -// CHECK-LABEL: func @broadcast_in_dim_ui32 +// CHECK: func @broadcast_in_dim_ui32 func.func @broadcast_in_dim_ui32(%operand: tensor<5x7x1xui32>) -> tensor<7x10x6x4x5xui32> { %0 = "stablehlo.broadcast_in_dim"(%operand) {broadcast_dimensions = dense<[4,0,2]> : tensor<3xi64>} @@ -278,7 +278,7 @@ func.func @broadcast_in_dim_ui32(%operand: tensor<5x7x1xui32>) -> tensor<7x10x6x // CHECK-DAG: #[[OPERAND_MAP:.+]] = affine_map<(d0, d1) -> (d0)> // CHECK-DAG: #[[RESULT_MAP:.+]] = affine_map<(d0, d1) -> (d0, d1)> -// CHECK-LABEL: func @broadcast_in_dim_with_one_to_one +// CHECK: func @broadcast_in_dim_with_one_to_one func.func @broadcast_in_dim_with_one_to_one( %operand: tensor<1xf32>) -> tensor<1x5xf32> { %0 = "stablehlo.broadcast_in_dim"(%operand) @@ -301,7 +301,7 @@ func.func @broadcast_in_dim_with_one_to_one( // CHECK-DAG: #[[OPERAND_MAP:.+]] = affine_map<(d0, d1, d2, d3) -> (d2, d0, d1)> // CHECK-DAG: #[[RESULT_MAP:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> -// CHECK-LABEL: func @broadcast_in_dim_with_transpose +// CHECK: func @broadcast_in_dim_with_transpose func.func @broadcast_in_dim_with_transpose( %operand: tensor<2x3x4xf32>) -> tensor<3x4x2x5xf32> { %0 = "stablehlo.broadcast_in_dim"(%operand) @@ -326,7 +326,7 @@ func.func @broadcast_in_dim_with_transpose( // CHECK-DAG: #[[OPERAND_MAP:.*]] = affine_map<(d0, d1, d2) -> ()> // CHECK-DAG: #[[RESULT_MAP:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> -// CHECK-LABEL: func @broadcast_in_dim_scalar +// CHECK: func @broadcast_in_dim_scalar func.func @broadcast_in_dim_scalar(%operand: tensor) -> tensor<7x10x6xf32> { %0 = "stablehlo.broadcast_in_dim"(%operand) {broadcast_dimensions = dense<[]> : tensor<0xi64>} @@ -347,7 +347,7 @@ func.func @broadcast_in_dim_scalar(%operand: tensor) -> tensor<7x10x6xf32> // CHECK-DAG: #[[OPERAND_MAP:.+]] = affine_map<(d0, d1, d2) -> ()> // CHECK-DAG: #[[RESULT_MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> -// CHECK-LABEL: func @broadcast_scalar +// CHECK: func @broadcast_scalar func.func @broadcast_scalar(%arg: tensor) -> tensor<4x2x1xf32> { %0 = "stablehlo.broadcast"(%arg) {broadcast_sizes = dense<[4, 2, 1]> : tensor<3xi64>} : (tensor) -> tensor<4x2x1xf32> func.return %0: tensor<4x2x1xf32> @@ -368,7 +368,7 @@ func.func @broadcast_scalar(%arg: tensor) -> tensor<4x2x1xf32> { // CHECK-DAG: #[[OPERAND_MAP:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)> // CHECK-DAG: #[[RESULT_MAP:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)> -// CHECK-LABEL: func @broadcast +// CHECK: func @broadcast func.func @broadcast(%arg: tensor<4x?x16xf32>) -> tensor<4x2x1x4x?x16xf32> { %0 = "stablehlo.broadcast"(%arg) {broadcast_sizes = dense<[4, 2, 1]> : tensor<3xi64>} : (tensor<4x?x16xf32>) -> tensor<4x2x1x4x?x16xf32> func.return %0: tensor<4x2x1x4x?x16xf32> @@ -390,7 +390,7 @@ func.func @broadcast(%arg: tensor<4x?x16xf32>) -> tensor<4x2x1x4x?x16xf32> { // ----- // CHECK: #[[RESULT_MAP:.*]] = affine_map<(d0, d1) -> (d0, d1)> -// CHECK-LABEL: func @iota_f32 +// CHECK: func @iota_f32 func.func @iota_f32() -> tensor<7x10xf32> { %result = "stablehlo.iota"() {iota_dimension = 1 : i64, someattr} : () -> (tensor<7x10xf32>) func.return %result : tensor<7x10xf32> @@ -417,7 +417,7 @@ func.func @iota_f32() -> tensor<7x10xf32> { // ----- // CHECK: #[[RESULT_MAP:.*]] = affine_map<(d0, d1) -> (d0, d1)> -// CHECK-LABEL: func @iota_i32 +// CHECK: func @iota_i32 func.func @iota_i32() -> tensor<7x10xi32> { %result = "stablehlo.iota"() {iota_dimension = 1 : i64} : () -> (tensor<7x10xi32>) func.return %result : tensor<7x10xi32> @@ -433,7 +433,7 @@ func.func @iota_i32() -> tensor<7x10xi32> { // ----- // CHECK: #[[RESULT_MAP:.*]] = affine_map<(d0, d1) -> (d0, d1)> -// CHECK-LABEL: func @iota_ui32 +// CHECK: func @iota_ui32 func.func @iota_ui32() -> tensor<7x10xui32> { %result = "stablehlo.iota"() {iota_dimension = 1 : i64} : () -> (tensor<7x10xui32>) func.return %result : tensor<7x10xui32> @@ -450,7 +450,7 @@ func.func @iota_ui32() -> tensor<7x10xui32> { // ----- // CHECK: #[[RESULT_MAP:.*]] = affine_map<(d0, d1) -> (d0, d1)> -// CHECK-LABEL: func @iota_complexf32 +// CHECK: func @iota_complexf32 func.func @iota_complexf32() -> tensor<7x10xcomplex> { %result = "stablehlo.iota"() {iota_dimension = 1 : i64} : () -> (tensor<7x10xcomplex>) func.return %result : tensor<7x10xcomplex> @@ -469,7 +469,7 @@ func.func @iota_complexf32() -> tensor<7x10xcomplex> { // ----- // CHECK: #[[RESULT_MAP:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> -// CHECK-LABEL: func @dynamic_iota_f32 +// CHECK: func @dynamic_iota_f32 // CHECK-SAME: %[[SHAPE:.*]]: tensor func.func @dynamic_iota_f32(%shape: tensor) -> tensor { %result = "stablehlo.dynamic_iota"(%shape) {iota_dimension = 1 : i64} : (tensor) -> (tensor) @@ -491,7 +491,7 @@ func.func @dynamic_iota_f32(%shape: tensor) -> tensor { // ----- // CHECK: #[[RESULT_MAP:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> -// CHECK-LABEL: func @dyanmic_iota_ui32 +// CHECK: func @dyanmic_iota_ui32 // CHECK-SAME: %[[SHAPE:.*]]: tensor func.func @dyanmic_iota_ui32(%shape: tensor) -> tensor { %result = "stablehlo.dynamic_iota"(%shape) {iota_dimension = 1 : i64} : (tensor) -> (tensor) @@ -704,7 +704,7 @@ func.func @set_dimension_size( // CHECK-DAG: #[[OPERAND_MAP:.*]] = affine_map<(d0, d1, d2, d3) -> (d1, d0, d3, d2)> // CHECK-DAG: #[[RESULT_MAP:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> -// CHECK-LABEL: func @transpose +// CHECK: func @transpose func.func @transpose(%arg0: tensor<2x3x9x5xi32>) -> tensor<3x2x5x9xi32> { %0 = "stablehlo.transpose"(%arg0) {permutation = dense<[1, 0, 3, 2]> : tensor<4xi64>} : (tensor<2x3x9x5xi32>) -> tensor<3x2x5x9xi32> @@ -719,7 +719,7 @@ func.func @transpose(%arg0: tensor<2x3x9x5xi32>) -> tensor<3x2x5x9xi32> { // CHECK-DAG: #[[OPERAND_MAP:.*]] = affine_map<(d0, d1, d2, d3) -> (d1, d0, d3, d2)> // CHECK-DAG: #[[RESULT_MAP:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> -// CHECK-LABEL: func @transpose_dynamic +// CHECK: func @transpose_dynamic func.func @transpose_dynamic(%arg0: tensor) -> tensor { %0 = "stablehlo.transpose"(%arg0) {permutation = dense<[1, 0, 3, 2]> : tensor<4xi64>, someattr} : (tensor) -> tensor @@ -750,6 +750,8 @@ func.func @transpose_dynamic(%arg0: tensor) -> tensor // CHECK-PRIMITIVE-SAME: permutation = [1, 0, 3, 2] // CHECK-PRIMITIVE-SAME: {someattr} +// ----- + func.func @transpose_unsigned(%arg0: tensor<2x2xui32>) -> tensor<2x2xui32> { %0 = "stablehlo.transpose"(%arg0) { permutation = dense<[1, 0]> : tensor<2xi64>, diff --git a/compiler/src/iree/compiler/InputConversion/StableHLO/test/stablehlo_to_linalg_dot_prod.mlir b/compiler/src/iree/compiler/InputConversion/StableHLO/test/stablehlo_to_linalg_dot_prod.mlir index 13120ebf1e01..1c4f3e2bfcbb 100644 --- a/compiler/src/iree/compiler/InputConversion/StableHLO/test/stablehlo_to_linalg_dot_prod.mlir +++ b/compiler/src/iree/compiler/InputConversion/StableHLO/test/stablehlo_to_linalg_dot_prod.mlir @@ -1,5 +1,8 @@ // RUN: iree-opt %s --iree-stablehlo-to-linalg --split-input-file \ -// RUN: --canonicalize | FileCheck --enable-var-scope=false %s +// RUN: --canonicalize | FileCheck %s + +// Note: We need the canonicalization pass to deduplicate constants. This test +// does not rely on it to simplify arithmetic, etc. func.func @dot_general(%arg0: tensor, %arg1: tensor) -> tensor { @@ -105,8 +108,9 @@ func.func @dot_general_multiple_batch_dimensions(%arg0: tensor<3x4x2x4xi32>, // CHECK: #[[MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d4, d2)> // CHECK: #[[MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d3, d4)> // CHECK: #[[MAP2:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3)> -// CHECK-LABEL: func @dot_general_multiple_batch_dimensions( -// CHECK: linalg.generic +// CHECK: func @dot_general_multiple_batch_dimensions +// CHECK-SAME: (%[[ARG0:.+]]: tensor<3x4x2x4xi32>, %[[ARG1:.+]]: tensor<3x4x3x2xi32>) +// CHECK: linalg.generic // CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]], #[[MAP2]]] // CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction"]} // CHECK-SAME: ins(%[[ARG0]], %[[ARG1]] : tensor<3x4x2x4xi32>, tensor<3x4x3x2xi32>) @@ -121,8 +125,8 @@ func.func @dot_matmul(%arg0: tensor<2x3xf32>, : (tensor<2x3xf32>, tensor<3x?xf32>) -> tensor<2x?xf32> func.return %0 : tensor<2x?xf32> } -// CHECK-LABEL: func @dot_matmul( -// CHECK-SAME: %[[ARG0:.*]]: tensor<2x3xf32>, %[[ARG1:.*]]: tensor<3x?xf32>) +// CHECK-LABEL: func @dot_matmul +// CHECK-SAME: (%[[ARG0:.*]]: tensor<2x3xf32>, %[[ARG1:.*]]: tensor<3x?xf32>) // CHECK: %[[C1:.*]] = arith.constant 1 : index // CHECK: %[[D1:.*]] = tensor.dim %[[ARG1]], %[[C1]] // CHECK: %[[INIT:.*]] = tensor.empty(%[[D1]]) @@ -236,7 +240,7 @@ func.func @dot_vecmat(%arg0: tensor<3xf32>, // CHECK: %[[C1:.*]] = arith.constant 1 : index // CHECK: %[[D1:.*]] = tensor.dim %[[ARG1]], %[[C1]] // CHECK: %[[INIT:.*]] = tensor.empty(%[[D1]]) -// CHECK: linalg.fill ins(%{{.*}}{{.*}}outs(%[[INIT]] +// CHECK: %[[FILL:.*]] = linalg.fill ins(%{{.*}}{{.*}}outs(%[[INIT]] // CHECK: linalg.vecmat // CHECK-SAME: ins(%[[ARG0]], %[[ARG1]] : tensor<3xf32>, tensor<3x?xf32>) // CHECK-SAME: outs(%[[FILL]] : tensor) diff --git a/compiler/src/iree/compiler/InputConversion/StableHLO/test/stablehlo_to_linalg_pointwise.mlir b/compiler/src/iree/compiler/InputConversion/StableHLO/test/stablehlo_to_linalg_pointwise.mlir index f309a8f48e3f..bfca74817fed 100644 --- a/compiler/src/iree/compiler/InputConversion/StableHLO/test/stablehlo_to_linalg_pointwise.mlir +++ b/compiler/src/iree/compiler/InputConversion/StableHLO/test/stablehlo_to_linalg_pointwise.mlir @@ -1,9 +1,9 @@ // RUN: iree-opt %s --iree-stablehlo-to-linalg --split-input-file \ -// RUN: --canonicalize | FileCheck --enable-var-scope=false %s +// RUN: --canonicalize | FileCheck %s // RUN: iree-opt %s --iree-stablehlo-to-linalg="enable-primitive-ops=true" \ // RUN: --split-input-file --canonicalize | \ -// RUN: FileCheck %s --enable-var-scope=false --check-prefix=CHECK-PRIMITIVE +// RUN: FileCheck %s --check-prefix=CHECK-PRIMITIVE // CHECK: #map = affine_map<(d0, d1) -> (d0, d1)> // CHECK-LABEL: func @float_add @@ -875,7 +875,7 @@ func.func @select(%pred: tensor<2x2xi1>, %lhs: tensor<2x2xf32>, // CHECK-DAG: #[[SCALAR_MAP:.*]] = affine_map<(d0, d1) -> ()> // CHECK-DAG: #[[ID_MAP:.*]] = affine_map<(d0, d1) -> (d0, d1)> -// CHECK-LABEL: func @select_scalar_pred_dyn +// CHECK: func @select_scalar_pred_dyn // CHECK-SAME: (%[[PRED:.*]]: tensor, %[[LHS:.*]]: tensor<2x?xf32>, %[[RHS:.*]]: tensor<2x?xf32>) func.func @select_scalar_pred_dyn(%pred : tensor, %lhs: tensor<2x?xf32>, %rhs: tensor<2x?xf32>) -> tensor<2x?xf32> { %0 = "stablehlo.select"(%pred, %lhs, %rhs) {someattr} : (tensor, tensor<2x?xf32>, tensor<2x?xf32>) -> (tensor<2x?xf32>) @@ -954,7 +954,7 @@ func.func @bitcast_convert_dynamic(%input: tensor) -> tensor { // CHECK: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d0)> // CHECK: #[[MAP1:.*]] = affine_map<(d0, d1) -> (d0, d1)> -// CHECK-LABEL: func @bitcast_convert_expand +// CHECK: func @bitcast_convert_expand func.func @bitcast_convert_expand(%input: tensor<6xi32>) -> tensor<6x4xi8> { %result = "stablehlo.bitcast_convert"(%input) : (tensor<6xi32>) -> tensor<6x4xi8> func.return %result : tensor<6x4xi8> @@ -979,7 +979,7 @@ func.func @bitcast_convert_expand(%input: tensor<6xi32>) -> tensor<6x4xi8> { // CHECK: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)> // CHECK: #[[MAP1:.*]] = affine_map<(d0, d1) -> (d0)> -// CHECK-LABEL: func @bitcast_convert_contract +// CHECK: func @bitcast_convert_contract func.func @bitcast_convert_contract(%input: tensor<7x4xi8>) -> tensor<7xi32> { %result = "stablehlo.bitcast_convert"(%input) : (tensor<7x4xi8>) -> tensor<7xi32> func.return %result : tensor<7xi32> @@ -1118,7 +1118,7 @@ func.func @shift_right_logical(%lhs: tensor<2x2xi32>, // CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> // CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> // CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> -// CHECK-LABEL: func @einsum_basic +// CHECK: func @einsum_basic func.func @einsum_basic(%arg0: tensor<3x4x5xf32>, %arg1: tensor<3x5x6xf32>) -> tensor<3x4x6xf32> { %0 = "stablehlo.einsum"(%arg0, %arg1) {einsum_config = "ijk,ikm->ijm", someattr}: (tensor<3x4x5xf32>, tensor<3x5x6xf32>) -> tensor<3x4x6xf32> func.return %0 : tensor<3x4x6xf32> @@ -1141,7 +1141,7 @@ func.func @einsum_basic(%arg0: tensor<3x4x5xf32>, %arg1: tensor<3x5x6xf32>) -> t // ----- // CHECK: #map = affine_map<(d0, d1) -> (d0, d1)> -// CHECK-LABEL: func @float_pow +// CHECK: func @float_pow func.func @float_pow(%lhs: tensor<2x2xf32>, %rhs: tensor<2x2xf32>) -> tensor<2x2xf32> { // CHECK: linalg.generic @@ -1174,10 +1174,10 @@ func.func @complex_pow(%lhs: tensor<2x2xcomplex>, // ----- // CHECK: #map = affine_map<(d0, d1) -> (d0, d1)> -// CHECK-LABEL: func @integer_pow +// CHECK: func @integer_pow func.func @integer_pow(%lhs: tensor<2x2xi32>, %rhs: tensor<2x2xi32>) -> tensor<2x2xi32> { - // CHECK: linalg.generic + // CHECK: linalg.generic // CHECK: ^{{[a-z0-9_]*}} // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: i32 // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: i32 @@ -1445,4 +1445,4 @@ func.func @reduce_precision(%arg0: tensor<1x2x3x4xf32>) -> tensor<1x2x3x4xf32> { %0 = "stablehlo.reduce_precision"(%arg0) {exponent_bits=3:i32, mantissa_bits=3:i32} : (tensor<1x2x3x4xf32>) -> tensor<1x2x3x4xf32> return %0 : tensor<1x2x3x4xf32> -} \ No newline at end of file +}