Skip to content

Commit

Permalink
[StableHLO][NFC] Enable FileCheck variable scope in linalg lowering t…
Browse files Browse the repository at this point in the history
…ests

This option invalidates all variable bindings after `CHECK-LABEL`.

Enabling this option revealed a couple of issues in dot prod tests,
where incorrect variables from previous functions were used.

Issue: iree-org#12678
  • Loading branch information
kuhar committed Apr 12, 2023
1 parent 862f414 commit 219f4bc
Show file tree
Hide file tree
Showing 3 changed files with 38 additions and 32 deletions.
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// RUN: iree-opt %s --iree-stablehlo-to-linalg --split-input-file \
// RUN: --canonicalize | FileCheck --enable-var-scope=false %s
// RUN: --canonicalize | FileCheck %s

// CHECK-LABEL: func @concatenate(
// CHECK-SAME: %[[VAL_0:[a-zA-Z0-9_]*]]
Expand Down Expand Up @@ -227,7 +227,7 @@ func.func @einsum_dynamic_size_broadcast_dot(%arg0: tensor<?x?x4xf32>, %arg1: te

// CHECK-DAG: #[[OPERAND_MAP:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d4, d0, 0)>
// CHECK-DAG: #[[RESULT_MAP:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
// CHECK-LABEL: func @broadcast_in_dim
// CHECK: func @broadcast_in_dim
func.func @broadcast_in_dim(%operand: tensor<5x7x1xf32>) -> tensor<7x10x6x4x5xf32> {
%0 = "stablehlo.broadcast_in_dim"(%operand)
{broadcast_dimensions = dense<[4,0,2]> : tensor<3xi64>}
Expand All @@ -251,7 +251,7 @@ func.func @broadcast_in_dim(%operand: tensor<5x7x1xf32>) -> tensor<7x10x6x4x5xf3

// CHECK-DAG: #[[OPERAND_MAP:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d4, d0, 0)>
// CHECK-DAG: #[[RESULT_MAP:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3, d4)>
// CHECK-LABEL: func @broadcast_in_dim_ui32
// CHECK: func @broadcast_in_dim_ui32
func.func @broadcast_in_dim_ui32(%operand: tensor<5x7x1xui32>) -> tensor<7x10x6x4x5xui32> {
%0 = "stablehlo.broadcast_in_dim"(%operand)
{broadcast_dimensions = dense<[4,0,2]> : tensor<3xi64>}
Expand All @@ -278,7 +278,7 @@ func.func @broadcast_in_dim_ui32(%operand: tensor<5x7x1xui32>) -> tensor<7x10x6x

// CHECK-DAG: #[[OPERAND_MAP:.+]] = affine_map<(d0, d1) -> (d0)>
// CHECK-DAG: #[[RESULT_MAP:.+]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @broadcast_in_dim_with_one_to_one
// CHECK: func @broadcast_in_dim_with_one_to_one
func.func @broadcast_in_dim_with_one_to_one(
%operand: tensor<1xf32>) -> tensor<1x5xf32> {
%0 = "stablehlo.broadcast_in_dim"(%operand)
Expand All @@ -301,7 +301,7 @@ func.func @broadcast_in_dim_with_one_to_one(

// CHECK-DAG: #[[OPERAND_MAP:.+]] = affine_map<(d0, d1, d2, d3) -> (d2, d0, d1)>
// CHECK-DAG: #[[RESULT_MAP:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
// CHECK-LABEL: func @broadcast_in_dim_with_transpose
// CHECK: func @broadcast_in_dim_with_transpose
func.func @broadcast_in_dim_with_transpose(
%operand: tensor<2x3x4xf32>) -> tensor<3x4x2x5xf32> {
%0 = "stablehlo.broadcast_in_dim"(%operand)
Expand All @@ -326,7 +326,7 @@ func.func @broadcast_in_dim_with_transpose(

// CHECK-DAG: #[[OPERAND_MAP:.*]] = affine_map<(d0, d1, d2) -> ()>
// CHECK-DAG: #[[RESULT_MAP:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
// CHECK-LABEL: func @broadcast_in_dim_scalar
// CHECK: func @broadcast_in_dim_scalar
func.func @broadcast_in_dim_scalar(%operand: tensor<f32>) -> tensor<7x10x6xf32> {
%0 = "stablehlo.broadcast_in_dim"(%operand)
{broadcast_dimensions = dense<[]> : tensor<0xi64>}
Expand All @@ -347,7 +347,7 @@ func.func @broadcast_in_dim_scalar(%operand: tensor<f32>) -> tensor<7x10x6xf32>

// CHECK-DAG: #[[OPERAND_MAP:.+]] = affine_map<(d0, d1, d2) -> ()>
// CHECK-DAG: #[[RESULT_MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
// CHECK-LABEL: func @broadcast_scalar
// CHECK: func @broadcast_scalar
func.func @broadcast_scalar(%arg: tensor<f32>) -> tensor<4x2x1xf32> {
%0 = "stablehlo.broadcast"(%arg) {broadcast_sizes = dense<[4, 2, 1]> : tensor<3xi64>} : (tensor<f32>) -> tensor<4x2x1xf32>
func.return %0: tensor<4x2x1xf32>
Expand All @@ -368,7 +368,7 @@ func.func @broadcast_scalar(%arg: tensor<f32>) -> tensor<4x2x1xf32> {

// CHECK-DAG: #[[OPERAND_MAP:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>
// CHECK-DAG: #[[RESULT_MAP:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d3, d4, d5)>
// CHECK-LABEL: func @broadcast
// CHECK: func @broadcast
func.func @broadcast(%arg: tensor<4x?x16xf32>) -> tensor<4x2x1x4x?x16xf32> {
%0 = "stablehlo.broadcast"(%arg) {broadcast_sizes = dense<[4, 2, 1]> : tensor<3xi64>} : (tensor<4x?x16xf32>) -> tensor<4x2x1x4x?x16xf32>
func.return %0: tensor<4x2x1x4x?x16xf32>
Expand All @@ -390,7 +390,7 @@ func.func @broadcast(%arg: tensor<4x?x16xf32>) -> tensor<4x2x1x4x?x16xf32> {
// -----

// CHECK: #[[RESULT_MAP:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @iota_f32
// CHECK: func @iota_f32
func.func @iota_f32() -> tensor<7x10xf32> {
%result = "stablehlo.iota"() {iota_dimension = 1 : i64, someattr} : () -> (tensor<7x10xf32>)
func.return %result : tensor<7x10xf32>
Expand All @@ -417,7 +417,7 @@ func.func @iota_f32() -> tensor<7x10xf32> {
// -----

// CHECK: #[[RESULT_MAP:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @iota_i32
// CHECK: func @iota_i32
func.func @iota_i32() -> tensor<7x10xi32> {
%result = "stablehlo.iota"() {iota_dimension = 1 : i64} : () -> (tensor<7x10xi32>)
func.return %result : tensor<7x10xi32>
Expand All @@ -433,7 +433,7 @@ func.func @iota_i32() -> tensor<7x10xi32> {
// -----

// CHECK: #[[RESULT_MAP:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @iota_ui32
// CHECK: func @iota_ui32
func.func @iota_ui32() -> tensor<7x10xui32> {
%result = "stablehlo.iota"() {iota_dimension = 1 : i64} : () -> (tensor<7x10xui32>)
func.return %result : tensor<7x10xui32>
Expand All @@ -450,7 +450,7 @@ func.func @iota_ui32() -> tensor<7x10xui32> {
// -----

// CHECK: #[[RESULT_MAP:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @iota_complexf32
// CHECK: func @iota_complexf32
func.func @iota_complexf32() -> tensor<7x10xcomplex<f32>> {
%result = "stablehlo.iota"() {iota_dimension = 1 : i64} : () -> (tensor<7x10xcomplex<f32>>)
func.return %result : tensor<7x10xcomplex<f32>>
Expand All @@ -469,7 +469,7 @@ func.func @iota_complexf32() -> tensor<7x10xcomplex<f32>> {
// -----

// CHECK: #[[RESULT_MAP:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
// CHECK-LABEL: func @dynamic_iota_f32
// CHECK: func @dynamic_iota_f32
// CHECK-SAME: %[[SHAPE:.*]]: tensor<?xi32>
func.func @dynamic_iota_f32(%shape: tensor<?xi32>) -> tensor<?x?x8xf32> {
%result = "stablehlo.dynamic_iota"(%shape) {iota_dimension = 1 : i64} : (tensor<?xi32>) -> (tensor<?x?x8xf32>)
Expand All @@ -491,7 +491,7 @@ func.func @dynamic_iota_f32(%shape: tensor<?xi32>) -> tensor<?x?x8xf32> {
// -----

// CHECK: #[[RESULT_MAP:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
// CHECK-LABEL: func @dyanmic_iota_ui32
// CHECK: func @dyanmic_iota_ui32
// CHECK-SAME: %[[SHAPE:.*]]: tensor<?xi32>
func.func @dyanmic_iota_ui32(%shape: tensor<?xi32>) -> tensor<?x?x8xui32> {
%result = "stablehlo.dynamic_iota"(%shape) {iota_dimension = 1 : i64} : (tensor<?xi32>) -> (tensor<?x?x8xui32>)
Expand Down Expand Up @@ -704,7 +704,7 @@ func.func @set_dimension_size(

// CHECK-DAG: #[[OPERAND_MAP:.*]] = affine_map<(d0, d1, d2, d3) -> (d1, d0, d3, d2)>
// CHECK-DAG: #[[RESULT_MAP:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
// CHECK-LABEL: func @transpose
// CHECK: func @transpose
func.func @transpose(%arg0: tensor<2x3x9x5xi32>) -> tensor<3x2x5x9xi32> {
%0 = "stablehlo.transpose"(%arg0) {permutation = dense<[1, 0, 3, 2]> : tensor<4xi64>}
: (tensor<2x3x9x5xi32>) -> tensor<3x2x5x9xi32>
Expand All @@ -719,7 +719,7 @@ func.func @transpose(%arg0: tensor<2x3x9x5xi32>) -> tensor<3x2x5x9xi32> {

// CHECK-DAG: #[[OPERAND_MAP:.*]] = affine_map<(d0, d1, d2, d3) -> (d1, d0, d3, d2)>
// CHECK-DAG: #[[RESULT_MAP:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
// CHECK-LABEL: func @transpose_dynamic
// CHECK: func @transpose_dynamic
func.func @transpose_dynamic(%arg0: tensor<?x?x9x?xi32>) -> tensor<?x?x?x9xi32> {
%0 = "stablehlo.transpose"(%arg0) {permutation = dense<[1, 0, 3, 2]> : tensor<4xi64>, someattr}
: (tensor<?x?x9x?xi32>) -> tensor<?x?x?x9xi32>
Expand Down Expand Up @@ -750,6 +750,8 @@ func.func @transpose_dynamic(%arg0: tensor<?x?x9x?xi32>) -> tensor<?x?x?x9xi32>
// CHECK-PRIMITIVE-SAME: permutation = [1, 0, 3, 2]
// CHECK-PRIMITIVE-SAME: {someattr}

// -----

func.func @transpose_unsigned(%arg0: tensor<2x2xui32>) -> tensor<2x2xui32> {
%0 = "stablehlo.transpose"(%arg0) {
permutation = dense<[1, 0]> : tensor<2xi64>,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
// RUN: iree-opt %s --iree-stablehlo-to-linalg --split-input-file \
// RUN: --canonicalize | FileCheck --enable-var-scope=false %s
// RUN: --canonicalize | FileCheck %s

// Note: We need the canonicalization pass to deduplicate constants. This test
// does not rely on it to simplify arithmetic, etc.

func.func @dot_general(%arg0: tensor<?x?x?xf32>,
%arg1: tensor<?x?x?xf32>) -> tensor<?x?x?xf32> {
Expand Down Expand Up @@ -105,8 +108,9 @@ func.func @dot_general_multiple_batch_dimensions(%arg0: tensor<3x4x2x4xi32>,
// CHECK: #[[MAP0:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d4, d2)>
// CHECK: #[[MAP1:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d3, d4)>
// CHECK: #[[MAP2:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1, d2, d3)>
// CHECK-LABEL: func @dot_general_multiple_batch_dimensions(
// CHECK: linalg.generic
// CHECK: func @dot_general_multiple_batch_dimensions
// CHECK-SAME: (%[[ARG0:.+]]: tensor<3x4x2x4xi32>, %[[ARG1:.+]]: tensor<3x4x3x2xi32>)
// CHECK: linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction"]}
// CHECK-SAME: ins(%[[ARG0]], %[[ARG1]] : tensor<3x4x2x4xi32>, tensor<3x4x3x2xi32>)
Expand All @@ -121,8 +125,8 @@ func.func @dot_matmul(%arg0: tensor<2x3xf32>,
: (tensor<2x3xf32>, tensor<3x?xf32>) -> tensor<2x?xf32>
func.return %0 : tensor<2x?xf32>
}
// CHECK-LABEL: func @dot_matmul(
// CHECK-SAME: %[[ARG0:.*]]: tensor<2x3xf32>, %[[ARG1:.*]]: tensor<3x?xf32>)
// CHECK-LABEL: func @dot_matmul
// CHECK-SAME: (%[[ARG0:.*]]: tensor<2x3xf32>, %[[ARG1:.*]]: tensor<3x?xf32>)
// CHECK: %[[C1:.*]] = arith.constant 1 : index
// CHECK: %[[D1:.*]] = tensor.dim %[[ARG1]], %[[C1]]
// CHECK: %[[INIT:.*]] = tensor.empty(%[[D1]])
Expand Down Expand Up @@ -236,7 +240,7 @@ func.func @dot_vecmat(%arg0: tensor<3xf32>,
// CHECK: %[[C1:.*]] = arith.constant 1 : index
// CHECK: %[[D1:.*]] = tensor.dim %[[ARG1]], %[[C1]]
// CHECK: %[[INIT:.*]] = tensor.empty(%[[D1]])
// CHECK: linalg.fill ins(%{{.*}}{{.*}}outs(%[[INIT]]
// CHECK: %[[FILL:.*]] = linalg.fill ins(%{{.*}}{{.*}}outs(%[[INIT]]
// CHECK: linalg.vecmat
// CHECK-SAME: ins(%[[ARG0]], %[[ARG1]] : tensor<3xf32>, tensor<3x?xf32>)
// CHECK-SAME: outs(%[[FILL]] : tensor<?xf32>)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
// RUN: iree-opt %s --iree-stablehlo-to-linalg --split-input-file \
// RUN: --canonicalize | FileCheck --enable-var-scope=false %s
// RUN: --canonicalize | FileCheck %s

// RUN: iree-opt %s --iree-stablehlo-to-linalg="enable-primitive-ops=true" \
// RUN: --split-input-file --canonicalize | \
// RUN: FileCheck %s --enable-var-scope=false --check-prefix=CHECK-PRIMITIVE
// RUN: FileCheck %s --check-prefix=CHECK-PRIMITIVE

// CHECK: #map = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @float_add
Expand Down Expand Up @@ -875,7 +875,7 @@ func.func @select(%pred: tensor<2x2xi1>, %lhs: tensor<2x2xf32>,

// CHECK-DAG: #[[SCALAR_MAP:.*]] = affine_map<(d0, d1) -> ()>
// CHECK-DAG: #[[ID_MAP:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @select_scalar_pred_dyn
// CHECK: func @select_scalar_pred_dyn
// CHECK-SAME: (%[[PRED:.*]]: tensor<i1>, %[[LHS:.*]]: tensor<2x?xf32>, %[[RHS:.*]]: tensor<2x?xf32>)
func.func @select_scalar_pred_dyn(%pred : tensor<i1>, %lhs: tensor<2x?xf32>, %rhs: tensor<2x?xf32>) -> tensor<2x?xf32> {
%0 = "stablehlo.select"(%pred, %lhs, %rhs) {someattr} : (tensor<i1>, tensor<2x?xf32>, tensor<2x?xf32>) -> (tensor<2x?xf32>)
Expand Down Expand Up @@ -954,7 +954,7 @@ func.func @bitcast_convert_dynamic(%input: tensor<?x?xi32>) -> tensor<?x?xf32> {

// CHECK: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d0)>
// CHECK: #[[MAP1:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @bitcast_convert_expand
// CHECK: func @bitcast_convert_expand
func.func @bitcast_convert_expand(%input: tensor<6xi32>) -> tensor<6x4xi8> {
%result = "stablehlo.bitcast_convert"(%input) : (tensor<6xi32>) -> tensor<6x4xi8>
func.return %result : tensor<6x4xi8>
Expand All @@ -979,7 +979,7 @@ func.func @bitcast_convert_expand(%input: tensor<6xi32>) -> tensor<6x4xi8> {

// CHECK: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK: #[[MAP1:.*]] = affine_map<(d0, d1) -> (d0)>
// CHECK-LABEL: func @bitcast_convert_contract
// CHECK: func @bitcast_convert_contract
func.func @bitcast_convert_contract(%input: tensor<7x4xi8>) -> tensor<7xi32> {
%result = "stablehlo.bitcast_convert"(%input) : (tensor<7x4xi8>) -> tensor<7xi32>
func.return %result : tensor<7xi32>
Expand Down Expand Up @@ -1118,7 +1118,7 @@ func.func @shift_right_logical(%lhs: tensor<2x2xi32>,
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>
// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>
// CHECK-LABEL: func @einsum_basic
// CHECK: func @einsum_basic
func.func @einsum_basic(%arg0: tensor<3x4x5xf32>, %arg1: tensor<3x5x6xf32>) -> tensor<3x4x6xf32> {
%0 = "stablehlo.einsum"(%arg0, %arg1) {einsum_config = "ijk,ikm->ijm", someattr}: (tensor<3x4x5xf32>, tensor<3x5x6xf32>) -> tensor<3x4x6xf32>
func.return %0 : tensor<3x4x6xf32>
Expand All @@ -1141,7 +1141,7 @@ func.func @einsum_basic(%arg0: tensor<3x4x5xf32>, %arg1: tensor<3x5x6xf32>) -> t
// -----

// CHECK: #map = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @float_pow
// CHECK: func @float_pow
func.func @float_pow(%lhs: tensor<2x2xf32>,
%rhs: tensor<2x2xf32>) -> tensor<2x2xf32> {
// CHECK: linalg.generic
Expand Down Expand Up @@ -1174,10 +1174,10 @@ func.func @complex_pow(%lhs: tensor<2x2xcomplex<f32>>,
// -----

// CHECK: #map = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @integer_pow
// CHECK: func @integer_pow
func.func @integer_pow(%lhs: tensor<2x2xi32>,
%rhs: tensor<2x2xi32>) -> tensor<2x2xi32> {
// CHECK: linalg.generic
// CHECK: linalg.generic
// CHECK: ^{{[a-z0-9_]*}}
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: i32
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: i32
Expand Down Expand Up @@ -1445,4 +1445,4 @@ func.func @reduce_precision(%arg0: tensor<1x2x3x4xf32>)
-> tensor<1x2x3x4xf32> {
%0 = "stablehlo.reduce_precision"(%arg0) {exponent_bits=3:i32, mantissa_bits=3:i32} : (tensor<1x2x3x4xf32>) -> tensor<1x2x3x4xf32>
return %0 : tensor<1x2x3x4xf32>
}
}

0 comments on commit 219f4bc

Please sign in to comment.