30 changes: 15 additions & 15 deletions mlir/test/Dialect/Linalg/fuse-with-reshape-by-collapsing.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ func.func @fuse_by_collapsing(%arg0 : tensor<2x12x5x336x9xi32>,
%arg1 : tensor<2x3x4xi32>, %arg2 : tensor<5x6x7x8xi32>) -> tensor<2x3x4x5x6x7x8x9xi32> {
%expand = tensor.expand_shape %arg0 [[0], [1, 2], [3], [4, 5, 6], [7]]
: tensor<2x12x5x336x9xi32> into tensor<2x3x4x5x6x7x8x9xi32>
%init = linalg.init_tensor [2, 3, 4, 5, 6, 7, 8, 9] : tensor<2x3x4x5x6x7x8x9xi32>
%init = tensor.empty() : tensor<2x3x4x5x6x7x8x9xi32>
%generic = linalg.generic {
indexing_maps = [#map0, #map1, #map2, #map3],
iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]}
Expand All @@ -31,7 +31,7 @@ func.func @fuse_by_collapsing(%arg0 : tensor<2x12x5x336x9xi32>,
// CHECK-SAME: %[[ARG0:.+]]: tensor<2x12x5x336x9xi32>
// CHECK-SAME: %[[ARG1:.+]]: tensor<2x3x4xi32>
// CHECK-SAME: %[[ARG2:.+]]: tensor<5x6x7x8xi32>
// CHECK-DAG: %[[INIT:.+]] = linalg.init_tensor [2, 3, 4, 5, 6, 7, 8, 9]
// CHECK-DAG: %[[INIT:.+]] = tensor.empty()
// CHECK-DAG: %[[ARG1_RESHAPE:.+]] = tensor.collapse_shape %[[ARG1]] {{\[}}[0], [1, 2]{{\]}}
// CHECK-DAG: %[[ARG2_RESHAPE:.+]] = tensor.collapse_shape %[[ARG2]] {{\[}}[0], [1, 2, 3]{{\]}}
// CHECK-DAG: %[[INIT_RESHAPE:.+]] = tensor.collapse_shape %[[INIT]] {{\[}}[0], [1, 2], [3], [4, 5, 6], [7]{{\]}}
Expand Down Expand Up @@ -62,7 +62,7 @@ func.func @fuse_by_collapsing_indexing_op(%arg0 : tensor<2x12x5x336x9xi32>,
%arg1 : tensor<2x3x4xi32>, %arg2 : tensor<5x6x7x8xi32>) -> tensor<2x3x4x5x6x7x8x9xi32> {
%expand = tensor.expand_shape %arg0 [[0], [1, 2], [3], [4, 5, 6], [7]]
: tensor<2x12x5x336x9xi32> into tensor<2x3x4x5x6x7x8x9xi32>
%init = linalg.init_tensor [2, 3, 4, 5, 6, 7, 8, 9] : tensor<2x3x4x5x6x7x8x9xi32>
%init = tensor.empty() : tensor<2x3x4x5x6x7x8x9xi32>
%generic = linalg.generic {
indexing_maps = [#map0, #map1, #map2, #map3],
iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]}
Expand Down Expand Up @@ -124,7 +124,7 @@ func.func @fuse_by_collapsing_change_reshape_order(%arg0 : tensor<9x56x2x60x6xi3
%arg1 : tensor<7x8x2xi32>, %arg2 : tensor<6x3x4x5xi32>) -> tensor<2x3x4x5x6x7x8x9xi32> {
%expand = tensor.expand_shape %arg0 [[0], [1, 2], [3], [4, 5, 6], [7]]
: tensor<9x56x2x60x6xi32> into tensor<9x7x8x2x3x4x5x6xi32>
%init = linalg.init_tensor [2, 3, 4, 5, 6, 7, 8, 9] : tensor<2x3x4x5x6x7x8x9xi32>
%init = tensor.empty() : tensor<2x3x4x5x6x7x8x9xi32>
%generic = linalg.generic {
indexing_maps = [#map0, #map1, #map2, #map3],
iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]}
Expand All @@ -145,7 +145,7 @@ func.func @fuse_by_collapsing_change_reshape_order(%arg0 : tensor<9x56x2x60x6xi3
// CHECK-SAME: %[[ARG0:.+]]: tensor<9x56x2x60x6xi32>
// CHECK-SAME: %[[ARG1:.+]]: tensor<7x8x2xi32>
// CHECK-SAME: %[[ARG2:.+]]: tensor<6x3x4x5xi32>
// CHECK-DAG: %[[INIT:.+]] = linalg.init_tensor [2, 3, 4, 5, 6, 7, 8, 9]
// CHECK-DAG: %[[INIT:.+]] = tensor.empty()
// CHECK-DAG: %[[ARG1_RESHAPE:.+]] = tensor.collapse_shape %[[ARG1]] {{\[}}[0, 1], [2]{{\]}}
// CHECK-DAG: %[[ARG2_RESHAPE:.+]] = tensor.collapse_shape %[[ARG2]] {{\[}}[0], [1, 2, 3]{{\]}}
// CHECK-DAG: %[[INIT_RESHAPE:.+]] = tensor.collapse_shape %[[INIT]] {{\[}}[0], [1, 2, 3], [4], [5, 6], [7]{{\]}}
Expand Down Expand Up @@ -176,7 +176,7 @@ func.func @fuse_by_collapsing_dynamic(%arg0 : tensor<?x?x?x?x?xi32>,
%d4 = tensor.dim %arg2, %c0 : tensor<?x?x?x?xi32>
%d6 = tensor.dim %arg1, %c1 : tensor<?x?x?xi32>
%d7 = tensor.dim %arg0, %c0 : tensor<?x?x?x?x?xi32>
%init = linalg.init_tensor [%d0, 3, %d2, 5, %d4, 7, %d6, %d7] : tensor<?x3x?x5x?x7x?x?xi32>
%init = tensor.empty(%d0, %d2, %d4, %d6, %d7) : tensor<?x3x?x5x?x7x?x?xi32>
%generic = linalg.generic {
indexing_maps = [#map0, #map1, #map2, #map3],
iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel", "parallel", "parallel"]}
Expand Down Expand Up @@ -254,7 +254,7 @@ func.func @fuse_reductions(%arg0 : tensor<2x?x5xf32>, %arg1 : tensor<2x5xf32>) -
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
func.func @no_fuse_unpreserved_folding(%arg0 : tensor<2x12x5xf32>, %arg1 : tensor<2x3xf32>) -> tensor<2x3x4x5xf32> {
%0 = tensor.expand_shape %arg0 [[0], [1, 2], [3]] : tensor<2x12x5xf32> into tensor<2x3x4x5xf32>
%init = linalg.init_tensor [2, 3, 4, 5] : tensor<2x3x4x5xf32>
%init = tensor.empty(): tensor<2x3x4x5xf32>
%1 = linalg.generic {
indexing_maps = [#map0, #map1, #map0],
iterator_types = ["parallel", "parallel", "parallel", "parallel"]}
Expand All @@ -281,7 +281,7 @@ func.func @no_fuse_unpreserved_folding(%arg0 : tensor<2x12x5xf32>, %arg1 : tenso
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d1, d3)>
func.func @no_fuse_unpreserved_folding_transpose(%arg0 : tensor<2x12x5xf32>, %arg1 : tensor<2xf32>) -> tensor<2x4x3x5xf32> {
%0 = tensor.expand_shape %arg0 [[0], [1, 2], [3]] : tensor<2x12x5xf32> into tensor<2x3x4x5xf32>
%init = linalg.init_tensor [2, 4, 3, 5] : tensor<2x4x3x5xf32>
%init = tensor.empty() : tensor<2x4x3x5xf32>
%1 = linalg.generic {
indexing_maps = [#map0, #map1, #map2],
iterator_types = ["parallel", "parallel", "parallel", "parallel"]}
Expand All @@ -308,7 +308,7 @@ func.func @no_fuse_unpreserved_folding_transpose(%arg0 : tensor<2x12x5xf32>, %ar
#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d3)>
func.func @no_fuse_mismatched_iterator_types(%arg0 : tensor<2x12x5xf32>, %arg1 : tensor<2x3xf32>) -> tensor<2x5xf32> {
%0 = tensor.expand_shape %arg0 [[0], [1, 2], [3]] : tensor<2x12x5xf32> into tensor<2x3x4x5xf32>
%init = linalg.init_tensor [2, 5] : tensor<2x5xf32>
%init = tensor.empty() : tensor<2x5xf32>
%1 = linalg.generic {
indexing_maps = [#map0, #map1, #map2],
iterator_types = ["parallel", "reduction", "parallel", "parallel"]}
Expand Down Expand Up @@ -337,7 +337,7 @@ func.func @no_fuse_mismatched_iterator_types(%arg0 : tensor<2x12x5xf32>, %arg1 :
func.func @control_fusion(%arg0 : tensor<6xf32>, %arg1 : tensor<20xf32>) -> tensor<2x3x4x5xf32> {
%0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<6xf32> into tensor<2x3xf32>
%1 = tensor.expand_shape %arg1 [[0, 1]] : tensor<20xf32> into tensor<4x5xf32>
%init = linalg.init_tensor [2, 3, 4, 5] : tensor<2x3x4x5xf32>
%init = tensor.empty() : tensor<2x3x4x5xf32>
%2 = linalg.generic {
indexing_maps = [#map0, #map1, #map2],
iterator_types = ["parallel", "parallel", "parallel", "parallel"]}
Expand Down Expand Up @@ -370,7 +370,7 @@ func.func @control_fusion(%arg0 : tensor<6xf32>, %arg1 : tensor<20xf32>) -> tens
// CONTROL-SAME: %[[ARG0:.+]]: tensor<6xf32>
// CONTROL-SAME: %[[ARG1:.+]]: tensor<20xf32>
// CONTROL: %[[EXPAND:.+]] = tensor.expand_shape %[[ARG0]]
// CONTROL: %[[INIT:.+]] = linalg.init_tensor [2, 3, 4, 5]
// CONTROL: %[[INIT:.+]] = tensor.empty()
// CONTROL: %[[INIT_RESHAPE:.+]] = tensor.collapse_shape %[[INIT]] {{\[}}[0], [1], [2, 3]{{\]}}
// CONTROL: %[[GENERIC:.+]] = linalg.generic
// CONTROL-SAME: ins(%[[EXPAND]], %[[ARG1]] :
Expand All @@ -383,7 +383,7 @@ func.func @control_fusion(%arg0 : tensor<6xf32>, %arg1 : tensor<20xf32>) -> tens
#map = affine_map<(d0) -> (d0)>
func.func @zero_D_test(%arg0: tensor<f32>) -> tensor<1xf32> {
%0 = tensor.expand_shape %arg0 [] : tensor<f32> into tensor<1xf32>
%init = linalg.init_tensor [1] : tensor<1xf32>
%init = tensor.empty() : tensor<1xf32>
%1 = linalg.generic {
indexing_maps = [#map, #map],
iterator_types = ["parallel"]}
Expand Down Expand Up @@ -444,7 +444,7 @@ func.func @fold_non_consecutive_dims(%arg0 : tensor<?x?xi32>) -> tensor<?x8x?x4x
%0 = tensor.expand_shape %arg0 [[0, 1], [2, 3]] : tensor<?x?xi32> into tensor<?x4x?x8xi32>
%d0 = tensor.dim %0, %c0 : tensor<?x4x?x8xi32>
%d1 = tensor.dim %0, %c2 : tensor<?x4x?x8xi32>
%init = linalg.init_tensor [%d1, 8, %d0, 4] : tensor<?x8x?x4xi32>
%init = tensor.empty(%d1, %d0) : tensor<?x8x?x4xi32>
%1 = linalg.generic {
indexing_maps = [#map0, #map1],
iterator_types = ["parallel", "parallel", "parallel", "parallel"]}
Expand All @@ -468,7 +468,7 @@ func.func @fold_non_consecutive_dims(%arg0 : tensor<?x?xi32>) -> tensor<?x8x?x4x
// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?xi32>)
// CHECK-DAG: %[[C4:.+]] = arith.constant 4 : index
// CHECK-DAG: %[[C8:.+]] = arith.constant 8 : index
// CHECK: %[[INIT:.+]] = linalg.init_tensor
// CHECK: %[[INIT:.+]] = tensor.empty
// CHECK: %[[COLLAPSE_INIT:.+]] = tensor.collapse_shape %[[INIT]] {{\[}}[0, 1], [2, 3]{{\]}}
// CHECK: %[[GENERIC:.+]] = linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]]]
Expand Down Expand Up @@ -500,7 +500,7 @@ func.func @no_fold_non_consecutive_reduction_dims(%arg0 : tensor<?x?xi32>) -> te
%c0 = arith.constant 0 : index
%c2 = arith.constant 2 : index
%0 = tensor.expand_shape %arg0 [[0, 1], [2, 3]] : tensor<?x?xi32> into tensor<?x4x?x8xi32>
%init = linalg.init_tensor [] : tensor<i32>
%init = tensor.empty() : tensor<i32>
%1 = linalg.generic {
indexing_maps = [#map0, #map1],
iterator_types = ["reduction", "reduction", "reduction", "reduction"]}
Expand Down
78 changes: 39 additions & 39 deletions mlir/test/Dialect/Linalg/fusion-elementwise-ops.mlir

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion mlir/test/Dialect/Linalg/fusion-elementwise-options.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ func.func @test_fusion_limit(
%c1 = arith.constant 1 : index
%d0 = tensor.dim %arg0, %c0 : tensor<?x?xf32>
%d1 = tensor.dim %arg0, %c1 : tensor<?x?xf32>
%init = linalg.init_tensor [%d0, %d1] : tensor<?x?xf32>
%init = tensor.empty(%d0, %d1) : tensor<?x?xf32>
%0 = linalg.generic #binary2Dpointwise
ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>)
outs(%init : tensor<?x?xf32>) {
Expand Down
8 changes: 4 additions & 4 deletions mlir/test/Dialect/Linalg/fusion-push-reshape.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ func.func @reshape(%A: tensor<?x16xf32>, %B: tensor<16xf32>, %init: tensor<?x112

// CHECK-LABEL: func @reshape_multiple
// CHECK-SAME: (%[[A:.*]]: tensor<12544x16xf32>, %[[B:.*]]: tensor<12544x16xf32>, %[[C:.*]]: tensor<16xf32>)
// CHECK: %[[I:.*]] = linalg.init_tensor [112, 112, 16] : tensor<112x112x16xf32>
// CHECK: %[[I:.*]] = tensor.empty() : tensor<112x112x16xf32>
// CHECK: %[[RI:.*]] = tensor.collapse_shape %[[I]] {{\[}}[0, 1], [2]] : tensor<112x112x16xf32> into tensor<12544x16xf32>
// CHECK: %[[R:.*]] = linalg.generic {indexing_maps = [#[[$MAP2]], #[[$MAP2]], #[[$MAP3]], #[[$MAP2]]],
// CHECK-SAME: iterator_types = ["parallel", "parallel"]}
Expand All @@ -47,7 +47,7 @@ func.func @reshape_multiple(%A: tensor<12544x16xf32>, %B: tensor<12544x16xf32>,
: tensor<12544x16xf32> into tensor<112x112x16xf32>
%1 = tensor.expand_shape %B [[0, 1], [2]]
: tensor<12544x16xf32> into tensor<112x112x16xf32>
%2 = linalg.init_tensor [112, 112, 16] : tensor<112x112x16xf32>
%2 = tensor.empty() : tensor<112x112x16xf32>
%3 = linalg.generic {indexing_maps = [
affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
Expand Down Expand Up @@ -75,7 +75,7 @@ func.func @reshape_multiple(%A: tensor<12544x16xf32>, %B: tensor<12544x16xf32>,
func.func @reshape_negative(%A: tensor<12544x16xf32>, %B: tensor<112xf32>) -> tensor<112x112x16xf32> {
%20 = tensor.expand_shape %A [[0, 1], [2]]
: tensor<12544x16xf32> into tensor<112x112x16xf32>
%21 = linalg.init_tensor [112, 112, 16] : tensor<112x112x16xf32>
%21 = tensor.empty() : tensor<112x112x16xf32>
%22 = linalg.generic {indexing_maps = [
affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d1)>,
affine_map<(d0, d1, d2) -> (d0, d1, d2)>],
Expand All @@ -98,7 +98,7 @@ func.func @type_correctness(%arg0 : tensor<6x5xi32>, %arg1 : tensor<5xf32>,
%cst_8 = arith.constant 1.1920929E-7 : f32
%25 = tensor.expand_shape %arg0 [[0, 1], [2]]
: tensor<6x5xi32> into tensor<2x3x5xi32>
%26 = linalg.init_tensor [2, 3, 5] : tensor<2x3x5xf32>
%26 = tensor.empty() : tensor<2x3x5xf32>
%28 = linalg.generic {
indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
affine_map<(d0, d1, d2) -> (d2)>,
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/Linalg/generalize-pad-tensor.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
// CHECK-LABEL: func @generalize_pad_tensor_static_shape(
// CHECK-SAME: %[[IN:.*]]: tensor<1x28x28x1xf32>) -> tensor<1x32x32x1xf32> {
// CHECK: %[[C0:.*]] = arith.constant 0.000000e+00 : f32
// CHECK: %[[INIT:.*]] = linalg.init_tensor [1, 32, 32, 1] : tensor<1x32x32x1xf32>
// CHECK: %[[INIT:.*]] = tensor.empty() : tensor<1x32x32x1xf32>
// CHECK: %[[FILL:.*]] = linalg.fill ins(%[[C0]] : f32) outs(%[[INIT]] : tensor<1x32x32x1xf32>) -> tensor<1x32x32x1xf32>
// CHECK: %[[PADDED:.*]] = tensor.insert_slice %[[IN]] into %[[FILL]][0, 2, 2, 0] [1, 28, 28, 1] [1, 1, 1, 1] : tensor<1x28x28x1xf32> into tensor<1x32x32x1xf32>
// CHECK: return %[[PADDED]] : tensor<1x32x32x1xf32>
Expand All @@ -28,7 +28,7 @@ func.func @generalize_pad_tensor_static_shape(%arg0: tensor<1x28x28x1xf32>) -> t
// CHECK: %[[OUT_DIM2:.*]] = arith.addi %[[OFFSET]], %[[C2]] : index
// CHECK: %[[DIM3:.*]] = tensor.dim %[[IN]], %[[C3]] : tensor<4x?x2x?xf32>
// CHECK: %[[OUT_DIM3:.*]] = arith.addi %[[DIM3]], %[[OFFSET]] : index
// CHECK: %[[INIT:.*]] = linalg.init_tensor [4, %[[DIM1]], %[[OUT_DIM2]], %[[OUT_DIM3]]] : tensor<4x?x?x?xf32>
// CHECK: %[[INIT:.*]] = tensor.empty(%[[DIM1]], %[[OUT_DIM2]], %[[OUT_DIM3]]) : tensor<4x?x?x?xf32>
// CHECK: %[[FILL:.*]] = linalg.fill ins(%[[CST]] : f32) outs(%[[INIT]] : tensor<4x?x?x?xf32>) -> tensor<4x?x?x?xf32>
// CHECK: %[[DIM1_1:.*]] = tensor.dim %[[IN]], %[[C1]] : tensor<4x?x2x?xf32>
// CHECK: %[[DIM3_1:.*]] = tensor.dim %[[IN]], %[[C3]] : tensor<4x?x2x?xf32>
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/Linalg/inline-scalar-operands.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

// CHECK: func @inline_zerod(%[[ARG:.*]]: tensor<4xf32>, %[[SCALAR:.*]]: tensor<f32>)
func.func @inline_zerod(%arg0: tensor<4xf32>, %scalar: tensor<f32>) -> tensor<4xf32> {
%0 = linalg.init_tensor [4] : tensor<4xf32>
%0 = tensor.empty() : tensor<4xf32>
// CHECK: linalg.generic {indexing_maps = [#[[MAP]], #[[MAP]]],
// CHECK-SAME: iterator_types = ["parallel"]} ins(%[[ARG]] : tensor<4xf32>)
%1 = linalg.generic {indexing_maps = [#map2, #map3, #map2],
Expand All @@ -31,7 +31,7 @@ func.func @inline_zerod(%arg0: tensor<4xf32>, %scalar: tensor<f32>) -> tensor<4x
// CHECK: func @inline_oned(%[[ARG:.*]]: tensor<4xf32>, %[[SCALAR:.*]]: tensor<1xf32>)
func.func @inline_oned(%arg0: tensor<4xf32>, %scalar: tensor<1xf32>) -> tensor<4xf32> {
// CHECK: %[[ZERO:.*]] = arith.constant 0 : index
%0 = linalg.init_tensor [4] : tensor<4xf32>
%0 = tensor.empty() : tensor<4xf32>
// CHECK: linalg.generic {indexing_maps = [#[[MAP]], #[[MAP]]],
// CHECK-SAME: iterator_types = ["parallel"]} ins(%[[ARG]] : tensor<4xf32>)
%1 = linalg.generic {indexing_maps = [#map2, #map3, #map2],
Expand Down
29 changes: 1 addition & 28 deletions mlir/test/Dialect/Linalg/invalid.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -323,36 +323,9 @@ func.func @matching_inits(%m: memref<?x?xf32>, %t: tensor<?x?xf32>) {

// -----

func.func @init_tensor_err(%arg0 : index, %arg1 : index)
{
// expected-error @+1 {{specified type 'tensor<4x?x?x5xf32>' does not match the inferred type 'tensor<4x5x?x?xf32>'}}
%1 = linalg.init_tensor [4, 5, %arg0, %arg1] : tensor<4x?x?x5xf32>
return
}

// -----

func.func @init_tensor_err(%arg0 : index)
{
// expected-error @+1 {{expected 4 sizes values}}
%1 = linalg.init_tensor [4, 5, %arg0] : tensor<4x?x?x5xf32>
return
}

// -----

func.func @init_tensor_err(%arg0 : index)
{
// expected-error @+1 {{expected 2 dynamic sizes values}}
%1 = "linalg.init_tensor"(%arg0) {static_sizes = [4, -1, -1, 5]} : (index) -> tensor<4x?x?x5xf32>
return
}

// -----

func.func @illegal_fill_tensor_no_return(%arg0 : index, %arg1 : index, %arg2 : f32)
{
%0 = linalg.init_tensor [%arg0, %arg1] : tensor<?x?xf32>
%0 = tensor.empty(%arg0, %arg1) : tensor<?x?xf32>
// expected-error @+1 {{expected the number of results (0) to be equal to the number of output tensors (1)}}
linalg.fill ins(%arg2 : f32) outs(%0 : tensor<?x?xf32>)
}
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Linalg/lower-pad-tensor.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ func.func @pad_tensor_detailed(%arg0: tensor<1x28x28x1xf32>) -> tensor<1x32x32x1

// CHECK: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<1x28x28x1xf32>) -> tensor<1x32x32x1xf32>
// CHECK: %[[CTE:.+]] = arith.constant 0.000000e+00 : f32
// CHECK: %[[TMP:.+]] = linalg.init_tensor [1, 32, 32, 1] : tensor<1x32x32x1xf32>
// CHECK: %[[TMP:.+]] = tensor.empty() : tensor<1x32x32x1xf32>
// CHECK: %[[R1c:.+]] = linalg.fill
// CHECK: %[[R2c:.+]] = linalg.generic
// CHECK-SAME: indexing_maps = [#[[$MAP4]], #[[$MAP5]]]
Expand Down
62 changes: 31 additions & 31 deletions mlir/test/Dialect/Linalg/named-ops.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
// CHECK-LABEL: func @depthwise_conv_1d_nwc_wcm
func.func @depthwise_conv_1d_nwc_wcm(%input: tensor<1x12x8xf32>, %filter: tensor<3x8x8xf32>) -> tensor<1x10x8x8xf32> {
%zero = arith.constant 0.000000e+00 : f32
%init = linalg.init_tensor [1, 10, 8, 8] : tensor<1x10x8x8xf32>
%init = tensor.empty() : tensor<1x10x8x8xf32>
%fill = linalg.fill ins(%zero : f32) outs(%init : tensor<1x10x8x8xf32>) -> tensor<1x10x8x8xf32>
// CHECK: depthwise_conv_1d_nwc_wcm
%0 = linalg.depthwise_conv_1d_nwc_wcm {dilations = dense<1> : tensor<1xi64>, strides = dense<1> : tensor<1xi64>}
Expand All @@ -17,7 +17,7 @@ func.func @depthwise_conv_1d_nwc_wcm(%input: tensor<1x12x8xf32>, %filter: tensor
// CHECK-LABEL: func @depthwise_conv_1d_nwc_wc
func.func @depthwise_conv_1d_nwc_wc(%input: tensor<1x12x8xf32>, %filter: tensor<3x8xf32>) -> tensor<1x10x8xf32> {
%zero = arith.constant 0.000000e+00 : f32
%init = linalg.init_tensor [1, 10, 8] : tensor<1x10x8xf32>
%init = tensor.empty() : tensor<1x10x8xf32>
%fill = linalg.fill ins(%zero : f32) outs(%init : tensor<1x10x8xf32>) -> tensor<1x10x8xf32>
// CHECK: depthwise_conv_1d_nwc_wc
%0 = linalg.depthwise_conv_1d_nwc_wc {dilations = dense<1> : tensor<1xi64>, strides = dense<1> : tensor<1xi64>}
Expand All @@ -31,7 +31,7 @@ func.func @depthwise_conv_1d_nwc_wc(%input: tensor<1x12x8xf32>, %filter: tensor<
// CHECK-LABEL: func @depthwise_conv_2d_nhwc_hwcm_tensor
func.func @depthwise_conv_2d_nhwc_hwcm_tensor(%input: tensor<2x4x5x2xf32>, %filter: tensor<2x2x2x3xf32>) -> tensor<2x3x4x2x3xf32> {
%zero = arith.constant 0.000000e+00 : f32
%init = linalg.init_tensor [2, 3, 4, 2, 3] : tensor<2x3x4x2x3xf32>
%init = tensor.empty() : tensor<2x3x4x2x3xf32>
%fill = linalg.fill ins(%zero : f32) outs(%init : tensor<2x3x4x2x3xf32>) -> tensor<2x3x4x2x3xf32>
// CHECK: %{{.+}} = linalg.depthwise_conv_2d_nhwc_hwcm
// CHECK-SAME: {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
Expand Down Expand Up @@ -59,7 +59,7 @@ func.func @depthwise_conv_2d_nhwc_hwcm_memref(%input: memref<2x4x5x2xf32>, %filt

// CHECK-LABEL: func @depthwise_conv_1d_nw_tensor
func.func @depthwise_conv_1d_nw_tensor(%input: tensor<1x113x96xf32>, %filter: tensor<3x96xf32>) -> tensor<1x56x96xf32> {
%init = linalg.init_tensor [1, 56, 96] : tensor<1x56x96xf32>
%init = tensor.empty() : tensor<1x56x96xf32>
// CHECK: %{{.+}} = linalg.depthwise_conv_1d_nw
// CHECK-SAME: {dilations = dense<1> : vector<1xi64>, strides = dense<2> : vector<1xi64>}
// CHECK-SAME: ins(%{{.+}}, %{{.+}} : tensor<1x113x96xf32>, tensor<3x96xf32>)
Expand All @@ -72,7 +72,7 @@ func.func @depthwise_conv_1d_nw_tensor(%input: tensor<1x113x96xf32>, %filter: te

// CHECK-LABEL: func @depthwise_conv_2d_nhwc_hwc_tensor
func.func @depthwise_conv_2d_nhwc_hwc_tensor(%input: tensor<1x113x113x96xf32>, %filter: tensor<3x3x96xf32>) -> tensor<1x56x56x96xf32> {
%init = linalg.init_tensor [1, 56, 56, 96] : tensor<1x56x56x96xf32>
%init = tensor.empty() : tensor<1x56x56x96xf32>
// CHECK: %{{.+}} = linalg.depthwise_conv_2d_nhwc_hwc
// CHECK-SAME: {dilations = dense<1> : vector<2xi64>, strides = dense<2> : vector<2xi64>}
// CHECK-SAME: ins(%{{.+}}, %{{.+}} : tensor<1x113x113x96xf32>, tensor<3x3x96xf32>)
Expand All @@ -97,7 +97,7 @@ func.func @depthwise_conv_2d_nhwc_hwc_memref(%input: memref<1x113x113x96xf32>, %

// CHECK-LABEL: func @depthwise_conv_2d_nchw_chw_tensor
func.func @depthwise_conv_2d_nchw_chw_tensor(%input: tensor<1x96x113x113xf32>, %filter: tensor<96x3x3xf32>) -> tensor<1x96x56x56xf32> {
%init = linalg.init_tensor [1, 96, 56, 56] : tensor<1x96x56x56xf32>
%init = tensor.empty() : tensor<1x96x56x56xf32>
// CHECK: %{{.+}} = linalg.depthwise_conv_2d_nchw_chw
// CHECK-SAME: {dilations = dense<1> : vector<2xi64>, strides = dense<2> : vector<2xi64>}
// CHECK-SAME: ins(%{{.+}}, %{{.+}} : tensor<1x96x113x113xf32>, tensor<96x3x3xf32>)
Expand All @@ -122,7 +122,7 @@ func.func @depthwise_conv_2d_nchw_chw_memref(%input: memref<1x96x113x113xf32>, %

func.func @depthwise_conv_2d_nhwc_hwcm_tensor_dilated(%input: tensor<2x8x9x2xf32>, %filter: tensor<2x2x2x3xf32>) -> tensor<2x6x7x2x3xf32> {
%zero = arith.constant 0.000000e+00 : f32
%init = linalg.init_tensor [2, 6, 7, 2, 3] : tensor<2x6x7x2x3xf32>
%init = tensor.empty() : tensor<2x6x7x2x3xf32>
%fill = linalg.fill ins(%zero : f32) outs(%init : tensor<2x6x7x2x3xf32>) -> tensor<2x6x7x2x3xf32>
// CHECK: %{{.+}} = linalg.depthwise_conv_2d_nhwc_hwcm
// CHECK-SAME: {dilations = dense<2> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
Expand Down Expand Up @@ -186,7 +186,7 @@ func.func @depthwise_conv_2d_input_nhwc_filter_wrong_stride_size(%input: memref<
// CHECK-LABEL: func @depthwise_conv_3d_ndhwc_dhwcm
func.func @depthwise_conv_3d_ndhwc_dhwcm(%input: tensor<2x6x13x12x6xf32>, %filter: tensor<2x1x3x6x6xf32>) -> tensor<2x3x13x4x6x6xf32> {
%zero = arith.constant 0.000000e+00 : f32
%init = linalg.init_tensor [2, 3, 13, 4, 6, 6] : tensor<2x3x13x4x6x6xf32>
%init = tensor.empty() : tensor<2x3x13x4x6x6xf32>
%fill = linalg.fill ins(%zero : f32) outs(%init : tensor<2x3x13x4x6x6xf32>) -> tensor<2x3x13x4x6x6xf32>
// CHECK: depthwise_conv_3d_ndhwc_dhwcm
%0 = linalg.depthwise_conv_3d_ndhwc_dhwcm {dilations = dense<1> : tensor<3xi64>, strides = dense<[2, 1, 3]> : tensor<3xi64>}
Expand All @@ -200,7 +200,7 @@ func.func @depthwise_conv_3d_ndhwc_dhwcm(%input: tensor<2x6x13x12x6xf32>, %filte
// CHECK-LABEL: func @depthwise_conv_3d_ndhwc_dhwc
func.func @depthwise_conv_3d_ndhwc_dhwc(%input: tensor<2x6x13x12x6xf32>, %filter: tensor<2x1x3x6xf32>) -> tensor<2x3x13x4x6xf32> {
%zero = arith.constant 0.000000e+00 : f32
%init = linalg.init_tensor [2, 3, 13, 4, 6] : tensor<2x3x13x4x6xf32>
%init = tensor.empty() : tensor<2x3x13x4x6xf32>
%fill = linalg.fill ins(%zero : f32) outs(%init : tensor<2x3x13x4x6xf32>) -> tensor<2x3x13x4x6xf32>
// CHECK: depthwise_conv_3d_ndhwc_dhwc
%0 = linalg.depthwise_conv_3d_ndhwc_dhwc {dilations = dense<1> : tensor<3xi64>, strides = dense<[2, 1, 3]> : tensor<3xi64>}
Expand Down Expand Up @@ -410,8 +410,8 @@ func.func @conv_3d_ndhwc_dhwcf(%input: memref<?x?x?x?x?xf32>, %filter: memref<?x
// CHECK-SAME: ins(%{{.+}}, %{{.+}} : tensor<1x4x4x1xf32>, tensor<3x3xf32>)
// CHECK-SAME: outs(%{{.+}} : tensor<1x2x2x1xf32>) -> tensor<1x2x2x1xf32>
func.func @pooling_nhwc_sum_tensor(%input: tensor<1x4x4x1xf32>) -> tensor<1x2x2x1xf32> {
%fake = linalg.init_tensor [3, 3] : tensor<3x3xf32>
%init = linalg.init_tensor [1, 2, 2, 1] : tensor<1x2x2x1xf32>
%fake = tensor.empty() : tensor<3x3xf32>
%init = tensor.empty() : tensor<1x2x2x1xf32>
%cst = arith.constant 0.000000e+00 : f32
%fill = linalg.fill ins(%cst : f32) outs(%init : tensor<1x2x2x1xf32>) -> tensor<1x2x2x1xf32>
%res = linalg.pooling_nhwc_sum {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
Expand Down Expand Up @@ -444,8 +444,8 @@ func.func @pooling_nhwc_sum(%input: memref<1x4x4x1xf32>, %fake: memref<3x3xf32>,
// CHECK-SAME: ins(%{{.+}}, %{{.+}} : tensor<1x1x4x4xf32>, tensor<3x3xf32>)
// CHECK-SAME: outs(%{{.+}} : tensor<1x1x2x2xf32>) -> tensor<1x1x2x2xf32>
func.func @pooling_nchw_sum_tensor(%input: tensor<1x1x4x4xf32>) -> tensor<1x1x2x2xf32> {
%fake = linalg.init_tensor [3, 3] : tensor<3x3xf32>
%init = linalg.init_tensor [1, 1, 2, 2] : tensor<1x1x2x2xf32>
%fake = tensor.empty() : tensor<3x3xf32>
%init = tensor.empty() : tensor<1x1x2x2xf32>
%cst = arith.constant 0.000000e+00 : f32
%fill = linalg.fill ins(%cst : f32) outs(%init : tensor<1x1x2x2xf32>) -> tensor<1x1x2x2xf32>
%res = linalg.pooling_nchw_sum {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
Expand Down Expand Up @@ -478,8 +478,8 @@ func.func @pooling_nchw_sum(%input: memref<1x1x4x4xf32>, %fake: memref<3x3xf32>,
// CHECK-SAME: ins(%{{.+}}, %{{.+}} : tensor<1x4x4x1xf32>, tensor<3x3xf32>)
// CHECK-SAME: outs(%{{.+}} : tensor<1x2x2x1xf32>) -> tensor<1x2x2x1xf32>
func.func @pooling_nhwc_max_tensor(%input: tensor<1x4x4x1xf32>) -> tensor<1x2x2x1xf32> {
%fake = linalg.init_tensor [3, 3] : tensor<3x3xf32>
%init = linalg.init_tensor [1, 2, 2, 1] : tensor<1x2x2x1xf32>
%fake = tensor.empty() : tensor<3x3xf32>
%init = tensor.empty() : tensor<1x2x2x1xf32>
%cst = arith.constant 0.000000e+00 : f32
%fill = linalg.fill ins(%cst : f32) outs(%init : tensor<1x2x2x1xf32>) -> tensor<1x2x2x1xf32>
%res = linalg.pooling_nhwc_max {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
Expand All @@ -497,8 +497,8 @@ func.func @pooling_nhwc_max_tensor(%input: tensor<1x4x4x1xf32>) -> tensor<1x2x2x
// CHECK-SAME: outs(%{{.+}} : tensor<1x1x2x2xf32>) -> tensor<1x1x2x2xf32>

func.func @pooling_nchw_max_tensor(%input: tensor<1x1x4x4xf32>) -> tensor<1x1x2x2xf32> {
%fake = linalg.init_tensor [3, 3] : tensor<3x3xf32>
%init = linalg.init_tensor [1, 1, 2, 2] : tensor<1x1x2x2xf32>
%fake = tensor.empty() : tensor<3x3xf32>
%init = tensor.empty() : tensor<1x1x2x2xf32>
%cst = arith.constant 0.000000e+00 : f32
%fill = linalg.fill ins(%cst : f32) outs(%init : tensor<1x1x2x2xf32>) -> tensor<1x1x2x2xf32>
%res = linalg.pooling_nchw_max {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
Expand Down Expand Up @@ -531,8 +531,8 @@ func.func @pooling_nhwc_max(%input: memref<1x4x4x1xf32>, %fake: memref<3x3xf32>,
// CHECK-SAME: ins(%{{.+}}, %{{.+}} : tensor<1x4x4x1xi8>, tensor<3x3xi8>)
// CHECK-SAME: outs(%{{.+}} : tensor<1x2x2x1xi8>) -> tensor<1x2x2x1xi8>
func.func @pooling_nhwc_i8_max_tensor(%input: tensor<1x4x4x1xi8>) -> tensor<1x2x2x1xi8> {
%fake = linalg.init_tensor [3, 3] : tensor<3x3xi8>
%init = linalg.init_tensor [1, 2, 2, 1] : tensor<1x2x2x1xi8>
%fake = tensor.empty() : tensor<3x3xi8>
%init = tensor.empty() : tensor<1x2x2x1xi8>
%cst = arith.constant 0 : i8
%fill = linalg.fill ins(%cst : i8) outs(%init : tensor<1x2x2x1xi8>) -> tensor<1x2x2x1xi8>
%res = linalg.pooling_nhwc_max {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
Expand Down Expand Up @@ -565,8 +565,8 @@ func.func @pooling_nhwc_i8_max(%input: memref<1x4x4x1xi8>, %fake: memref<3x3xi8>
// CHECK-SAME: ins(%{{.+}}, %{{.+}} : tensor<1x4x4x1xi16>, tensor<3x3xi16>)
// CHECK-SAME: outs(%{{.+}} : tensor<1x2x2x1xi16>) -> tensor<1x2x2x1xi16>
func.func @pooling_nhwc_i16_max_tensor(%input: tensor<1x4x4x1xi16>) -> tensor<1x2x2x1xi16> {
%fake = linalg.init_tensor [3, 3] : tensor<3x3xi16>
%init = linalg.init_tensor [1, 2, 2, 1] : tensor<1x2x2x1xi16>
%fake = tensor.empty() : tensor<3x3xi16>
%init = tensor.empty() : tensor<1x2x2x1xi16>
%cst = arith.constant 0 : i16
%fill = linalg.fill ins(%cst : i16) outs(%init : tensor<1x2x2x1xi16>) -> tensor<1x2x2x1xi16>
%res = linalg.pooling_nhwc_max {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
Expand Down Expand Up @@ -599,8 +599,8 @@ func.func @pooling_nhwc_i16_max(%input: memref<1x4x4x1xi16>, %fake: memref<3x3xi
// CHECK-SAME: ins(%{{.+}}, %{{.+}} : tensor<1x4x4x1xi32>, tensor<3x3xi32>)
// CHECK-SAME: outs(%{{.+}} : tensor<1x2x2x1xi32>) -> tensor<1x2x2x1xi32>
func.func @pooling_nhwc_i32_max_tensor(%input: tensor<1x4x4x1xi32>) -> tensor<1x2x2x1xi32> {
%fake = linalg.init_tensor [3, 3] : tensor<3x3xi32>
%init = linalg.init_tensor [1, 2, 2, 1] : tensor<1x2x2x1xi32>
%fake = tensor.empty() : tensor<3x3xi32>
%init = tensor.empty() : tensor<1x2x2x1xi32>
%cst = arith.constant 0 : i32
%fill = linalg.fill ins(%cst : i32) outs(%init : tensor<1x2x2x1xi32>) -> tensor<1x2x2x1xi32>
%res = linalg.pooling_nhwc_max {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
Expand Down Expand Up @@ -634,8 +634,8 @@ func.func @pooling_nhwc_i32_max(%input: memref<1x4x4x1xi32>, %fake: memref<3x3xi
// CHECK-SAME: ins(%{{.+}}, %{{.+}} : tensor<1x4x4x1xf32>, tensor<3x3xf32>)
// CHECK-SAME: outs(%{{.+}} : tensor<1x2x2x1xf32>) -> tensor<1x2x2x1xf32>
func.func @pooling_nhwc_min_tensor(%input: tensor<1x4x4x1xf32>) -> tensor<1x2x2x1xf32> {
%fake = linalg.init_tensor [3, 3] : tensor<3x3xf32>
%init = linalg.init_tensor [1, 2, 2, 1] : tensor<1x2x2x1xf32>
%fake = tensor.empty() : tensor<3x3xf32>
%init = tensor.empty() : tensor<1x2x2x1xf32>
%cst = arith.constant 0.000000e+00 : f32
%fill = linalg.fill ins(%cst : f32) outs(%init : tensor<1x2x2x1xf32>) -> tensor<1x2x2x1xf32>
%res = linalg.pooling_nhwc_min {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>}
Expand Down Expand Up @@ -668,8 +668,8 @@ func.func @pooling_nhwc_min(%input: memref<1x4x4x1xf32>, %fake: memref<3x3xf32>,
// CHECK-SAME: ins(%{{.+}}, %{{.+}} : tensor<1x4x4x4x1xf32>, tensor<3x3x3xf32>)
// CHECK-SAME: outs(%{{.+}} : tensor<1x2x2x2x1xf32>) -> tensor<1x2x2x2x1xf32>
func.func @pooling_ndhwc_sum_tensor(%input: tensor<1x4x4x4x1xf32>) -> tensor<1x2x2x2x1xf32> {
%fake = linalg.init_tensor [3, 3, 3] : tensor<3x3x3xf32>
%init = linalg.init_tensor [1, 2, 2, 2, 1] : tensor<1x2x2x2x1xf32>
%fake = tensor.empty() : tensor<3x3x3xf32>
%init = tensor.empty() : tensor<1x2x2x2x1xf32>
%cst = arith.constant 0.000000e+00 : f32
%fill = linalg.fill ins(%cst : f32) outs(%init : tensor<1x2x2x2x1xf32>) -> tensor<1x2x2x2x1xf32>
%res = linalg.pooling_ndhwc_sum {dilations = dense<1> : tensor<3xi64>, strides = dense<1> : tensor<3xi64>}
Expand Down Expand Up @@ -702,8 +702,8 @@ func.func @pooling_ndhwc_sum(%input: memref<1x4x4x4x1xf32>, %fake: memref<3x3x3x
// CHECK-SAME: ins(%{{.+}}, %{{.+}} : tensor<1x4x4x4x1xf32>, tensor<3x3x3xf32>)
// CHECK-SAME: outs(%{{.+}} : tensor<1x2x2x2x1xf32>) -> tensor<1x2x2x2x1xf32>
func.func @pooling_ndhwc_max_tensor(%input: tensor<1x4x4x4x1xf32>) -> tensor<1x2x2x2x1xf32> {
%fake = linalg.init_tensor [3, 3, 3] : tensor<3x3x3xf32>
%init = linalg.init_tensor [1, 2, 2, 2, 1] : tensor<1x2x2x2x1xf32>
%fake = tensor.empty() : tensor<3x3x3xf32>
%init = tensor.empty() : tensor<1x2x2x2x1xf32>
%cst = arith.constant 0.000000e+00 : f32
%fill = linalg.fill ins(%cst : f32) outs(%init : tensor<1x2x2x2x1xf32>) -> tensor<1x2x2x2x1xf32>
%res = linalg.pooling_ndhwc_max {dilations = dense<1> : tensor<3xi64>, strides = dense<1> : tensor<3xi64>}
Expand Down Expand Up @@ -736,8 +736,8 @@ func.func @pooling_ndhwc_max(%input: memref<1x4x4x4x1xf32>, %fake: memref<3x3x3x
// CHECK-SAME: ins(%{{.+}}, %{{.+}} : tensor<1x4x4x4x1xf32>, tensor<3x3x3xf32>)
// CHECK-SAME: outs(%{{.+}} : tensor<1x2x2x2x1xf32>) -> tensor<1x2x2x2x1xf32>
func.func @pooling_ndhwc_min_tensor(%input: tensor<1x4x4x4x1xf32>) -> tensor<1x2x2x2x1xf32> {
%fake = linalg.init_tensor [3, 3, 3] : tensor<3x3x3xf32>
%init = linalg.init_tensor [1, 2, 2, 2, 1] : tensor<1x2x2x2x1xf32>
%fake = tensor.empty() : tensor<3x3x3xf32>
%init = tensor.empty() : tensor<1x2x2x2x1xf32>
%cst = arith.constant 0.000000e+00 : f32
%fill = linalg.fill ins(%cst : f32) outs(%init : tensor<1x2x2x2x1xf32>) -> tensor<1x2x2x2x1xf32>
%res = linalg.pooling_ndhwc_min {dilations = dense<1> : tensor<3xi64>, strides = dense<1> : tensor<3xi64>}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -eliminate-alloc-tensors -one-shot-bufferize="bufferize-function-boundaries test-analysis-only allow-return-allocs" -split-input-file | FileCheck %s

//===----------------------------------------------------------------------===//
// InitTensorOp elimination
// AllocTensorOp elimination
//===----------------------------------------------------------------------===//

// CHECK-LABEL: func @buffer_forwarding_conflict
Expand Down
8 changes: 4 additions & 4 deletions mlir/test/Dialect/Linalg/pad_fusion.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ func.func @dynamic_pad_fusion(%arg0 : tensor<?x?xf32>, %arg1 : index, %arg2 : in
%c1 = arith.constant 1 : index
%d0 = tensor.dim %arg0, %c0 : tensor<?x?xf32>
%d1 = tensor.dim %arg0, %c1 : tensor<?x?xf32>
%init = linalg.init_tensor [%d0, %d1] : tensor<?x?xf32>
%init = tensor.empty(%d0, %d1) : tensor<?x?xf32>
%0 = linalg.generic {
indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>],
iterator_types = ["parallel", "parallel"]}
Expand Down Expand Up @@ -37,7 +37,7 @@ func.func @dynamic_pad_fusion(%arg0 : tensor<?x?xf32>, %arg1 : index, %arg2 : in
// CHECK-DAG: %[[TARGET_D0:.+]] = affine.apply #[[MAP]]()[%[[ARG1]], %[[ARG3]], %[[SOURCE_D0]]]
// CHECK-DAG: %[[SOURCE_D1:.+]] = tensor.dim %[[SOURCE]], %[[C1]]
// CHECK-DAG: %[[TARGET_D1:.+]] = affine.apply #[[MAP]]()[%[[ARG2]], %[[ARG4]], %[[SOURCE_D1]]]
// CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[TARGET_D0]], %[[TARGET_D1]]]
// CHECK: %[[INIT:.+]] = tensor.empty(%[[TARGET_D0]], %[[TARGET_D1]])
// CHECK: %[[FILL:.+]] = linalg.fill ins(%[[ARG5]]{{.*}}outs(%[[INIT]]
// CHECK-DAG: %[[SIZE_D0:.+]] = tensor.dim %[[SOURCE]], %[[C0]]
// CHECK-DAG: %[[SIZE_D1:.+]] = tensor.dim %[[SOURCE]], %[[C1]]
Expand All @@ -55,7 +55,7 @@ func.func @mixed_pad_fusion(%arg0 : tensor<?x42xf32>, %arg1 : index, %arg2 : ind
%arg3 : f32) -> tensor<49x?xf32> {
%c0 = arith.constant 0 : index
%d0 = tensor.dim %arg0, %c0 : tensor<?x42xf32>
%init = linalg.init_tensor [42, %d0] : tensor<42x?xf32>
%init = tensor.empty(%d0) : tensor<42x?xf32>
%0 = linalg.generic {
indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d1, d0)>],
iterator_types = ["parallel", "parallel"]}
Expand All @@ -81,7 +81,7 @@ func.func @mixed_pad_fusion(%arg0 : tensor<?x42xf32>, %arg1 : index, %arg2 : ind
// CHECK-DAG: %[[SOURCE:.+]] = linalg.generic
// CHECK-DAG: %[[SOURCE_D1:.+]] = tensor.dim %[[SOURCE]], %[[C1]]
// CHECK-DAG: %[[TARGET_D1:.+]] = affine.apply #[[MAP]]()[%[[ARG1]], %[[ARG2]], %[[SOURCE_D1]]]
// CHECK: %[[INIT:.+]] = linalg.init_tensor [49, %[[TARGET_D1]]]
// CHECK: %[[INIT:.+]] = tensor.empty(%[[TARGET_D1]])
// CHECK: %[[FILL:.+]] = linalg.fill ins(%[[ARG3]]{{.*}}outs(%[[INIT]]
// CHECK-DAG: %[[SIZE_D1:.+]] = tensor.dim %[[SOURCE]], %[[C1]]
// CHECK: %[[SLICE:.+]] = tensor.extract_slice %[[FILL]]
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/Linalg/reshape_control_fusion.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ func.func @control_producer_reshape_fusion(%arg0 : tensor<?x?x?xf32>, %arg1 : te
%0 = tensor.collapse_shape %arg0 [[0, 1], [2]] : tensor<?x?x?xf32> into tensor<?x?xf32>
%d0 = tensor.dim %0, %c0 : tensor<?x?xf32>
%d1 = tensor.dim %0, %c1 : tensor<?x?xf32>
%init = linalg.init_tensor [%d0, %d1] : tensor<?x?xf32>
%init = tensor.empty(%d0, %d1) : tensor<?x?xf32>
%1 = linalg.generic {
indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> (d0, d1)>],
iterator_types = ["parallel", "parallel"]}
Expand Down Expand Up @@ -40,7 +40,7 @@ func.func @control_consumer_reshape_fusion(%arg0 : tensor<1x?x?xf32>, %arg1 : te
%cst = arith.constant 0.0 : f32
%d0 = tensor.dim %arg0, %c1 : tensor<1x?x?xf32>
%d1 = tensor.dim %arg1, %c2 : tensor<1x?x?xf32>
%init = linalg.init_tensor [%d0, %d1] : tensor<?x?xf32>
%init = tensor.empty(%d0, %d1) : tensor<?x?xf32>
%fill = linalg.generic {
indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>],
iterator_types = ["parallel", "parallel"]}
Expand Down
12 changes: 6 additions & 6 deletions mlir/test/Dialect/Linalg/reshape_fusion.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ func.func @reshape_as_consumer_permutation
func.func @generic_op_reshape_consumer_static(%arg0: tensor<264x4xf32>)
-> tensor<8x33x4xf32> {
%cst = arith.constant dense<2.000000e+00> : tensor<264x4xf32>
%0 = linalg.init_tensor [264, 4] : tensor<264x4xf32>
%0 = tensor.empty() : tensor<264x4xf32>
%1 = linalg.generic {
indexing_maps = [#map0, #map0, #map0],
iterator_types = ["parallel", "parallel"]}
Expand All @@ -162,7 +162,7 @@ func.func @generic_op_reshape_consumer_static(%arg0: tensor<264x4xf32>)
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<264x4xf32>
// CHECK-DAG: %[[CST:.+]] = arith.constant
// CHECK-SAME: : tensor<8x33x4xf32>
// CHECK-DAG: %[[INIT:.+]] = linalg.init_tensor [264, 4]
// CHECK-DAG: %[[INIT:.+]] = tensor.empty()
// CHECK: %[[T0:.+]] = tensor.expand_shape %[[ARG0]]
// CHECK-SAME: [0, 1], [2]
// CHECK-SAME: tensor<264x4xf32> into tensor<8x33x4xf32>
Expand Down Expand Up @@ -281,7 +281,7 @@ func.func @indexed_producer_reshape_consumer_fusion(%arg0 : tensor<?x?xi32>,
func.func @reshape_as_consumer_permutation
(%a : tensor<210x6x4xi32>, %b : tensor<210x4xi32>)
-> tensor<2x3x4x5x6x7xi32> {
%shape = linalg.init_tensor [6, 4, 210] : tensor<6x4x210xi32>
%shape = tensor.empty() : tensor<6x4x210xi32>
%c = linalg.generic {
indexing_maps = [affine_map<(d0, d1, d2) -> (d1, d0, d2)>,
affine_map<(d0, d1, d2) -> (d1, d2)>,
Expand Down Expand Up @@ -318,7 +318,7 @@ func.func @reshape_as_consumer_permutation
// CHECK: func @reshape_as_consumer_permutation
// CHECK-SAME: %[[ARG0:.+]]: tensor<210x6x4xi32>
// CHECK-SAME: %[[ARG1:.+]]: tensor<210x4xi32>
// CHECK-DAG: %[[INIT:.+]] = linalg.init_tensor [6, 4, 210]
// CHECK-DAG: %[[INIT:.+]] = tensor.empty()
// CHECK-DAG: %[[T1:.+]] = tensor.expand_shape %[[ARG0]]
// CHECK-SAME: [0, 1, 2], [3, 4], [5]
// CHECK-DAG: %[[T2:.+]] = tensor.expand_shape %[[ARG1]]
Expand Down Expand Up @@ -455,7 +455,7 @@ func.func @no_fuse_dynamic_dims(%arg0: tensor<?x?xf32>) -> tensor<?xf32> {
%c0 = arith.constant 0 : index
%0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<?x?xf32> into tensor<?xf32>
%1 = tensor.dim %0, %c0 : tensor<?xf32>
%2 = linalg.init_tensor [%1] : tensor<?xf32>
%2 = tensor.empty(%1) : tensor<?xf32>
%3 = linalg.generic {
indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>],
iterator_types = ["parallel"]}
Expand All @@ -477,7 +477,7 @@ func.func @no_fuse_dynamic_dims(%arg0: tensor<?x?xf32>) -> tensor<?xf32> {

func.func @no_fuse_mismatched_dynamism(%arg0: tensor<2x1xi64>, %arg1: tensor<?xi64>) -> tensor<2xi64> {
%0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<2x1xi64> into tensor<2xi64>
%1 = linalg.init_tensor [2] : tensor<2xi64>
%1 = tensor.empty() : tensor<2xi64>
%2 = linalg.generic
{indexing_maps = [affine_map<(d0) -> (d0)>,
affine_map<(d0) -> (d0)>,
Expand Down
26 changes: 13 additions & 13 deletions mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir
Original file line number Diff line number Diff line change
@@ -1,42 +1,42 @@
// RUN: mlir-opt -resolve-shaped-type-result-dims -split-input-file %s | FileCheck %s

func.func @init_tensor_static_dim() -> (index, index) {
func.func @empty_tensor_static_dim() -> (index, index) {
%c0 = arith.constant 0 : index
%c2 = arith.constant 2 : index
%c6 = arith.constant 6 : index
%0 = linalg.init_tensor [4, 5, %c6] : tensor<4x5x?xf32>
%0 = tensor.empty(%c6) : tensor<4x5x?xf32>
%1 = tensor.dim %0, %c2 : tensor<4x5x?xf32>
%2 = tensor.dim %0, %c0 : tensor<4x5x?xf32>
return %1, %2 : index, index
}
// CHECK: func @init_tensor_static_dim
// CHECK: func @empty_tensor_static_dim
// CHECK-DAG: %[[C4:.+]] = arith.constant 4 : index
// CHECK-DAG: %[[C6:.+]] = arith.constant 6 : index
// CHECK: return %[[C6]], %[[C4]]

// -----

func.func @init_tensor_dynamic_dim(%arg0 : index) -> (index) {
func.func @empty_tensor_dynamic_dim(%arg0 : index) -> (index) {
%c2 = arith.constant 2 : index
%0 = linalg.init_tensor [4, 5, %arg0] : tensor<4x5x?xf32>
%0 = tensor.empty(%arg0) : tensor<4x5x?xf32>
%1 = tensor.dim %0, %c2 : tensor<4x5x?xf32>
return %1 : index
}
// CHECK: func @init_tensor_dynamic_dim
// CHECK: func @empty_tensor_dynamic_dim
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: index
// CHECK: return %[[ARG0]]

// -----

func.func @init_tensor_dynamic_dim2(%arg0 : index, %arg1 : index) -> (index, index) {
func.func @empty_tensor_dynamic_dim2(%arg0 : index, %arg1 : index) -> (index, index) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%0 = linalg.init_tensor [%arg0, %arg1] : tensor<?x?xf32>
%0 = tensor.empty(%arg0, %arg1) : tensor<?x?xf32>
%1 = tensor.dim %0, %c0 : tensor<?x?xf32>
%2 = tensor.dim %0, %c1 : tensor<?x?xf32>
return %1, %2 : index, index
}
// CHECK: func @init_tensor_dynamic_dim2
// CHECK: func @empty_tensor_dynamic_dim2
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: index
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: index
// CHECK: return %[[ARG0]], %[[ARG1]]
Expand Down Expand Up @@ -87,7 +87,7 @@ func.func @remove_dim_result_uses_outs
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%d0 = tensor.dim %arg0, %c0 : tensor<?xf32>
%0 = linalg.init_tensor [%d0, %arg1] : tensor<?x?xf32>
%0 = tensor.empty(%d0, %arg1) : tensor<?x?xf32>
%1 = linalg.generic
{indexing_maps = [affine_map<(d0, d1) -> (d0)>,
affine_map<(d0, d1) -> (d0, d1)>],
Expand Down Expand Up @@ -149,7 +149,7 @@ func.func @keep_result_dim_uses_sequence2
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%d0 = tensor.dim %arg0, %c0 : tensor<?xf32>
%0 = linalg.init_tensor [%d0, %arg1] : tensor<?x?xf32>
%0 = tensor.empty(%d0, %arg1) : tensor<?x?xf32>
%1 = linalg.generic
{indexing_maps = [affine_map<(d0, d1) -> (d0)>,
affine_map<(d0, d1) -> (d0, d1)>],
Expand All @@ -173,7 +173,7 @@ func.func @keep_result_dim_uses_sequence2

#map = affine_map<(d0) -> (d0)>

func.func @init_tensor_dim_of_linalg_result(%arg_0 : tensor<?xf32>,
func.func @empty_tensor_dim_of_linalg_result(%arg_0 : tensor<?xf32>,
%arg_1: tensor<?xf32>) -> (index, index) {
%0, %1 = linalg.generic {
indexing_maps = [#map, #map, #map],
Expand All @@ -190,7 +190,7 @@ func.func @init_tensor_dim_of_linalg_result(%arg_0 : tensor<?xf32>,
%num_elem_1 = tensor.dim %1, %c0 : tensor<?xf32>
return %num_elem_0, %num_elem_1 : index, index
}
// CHECK: func @init_tensor_dim_of_linalg_result(
// CHECK: func @empty_tensor_dim_of_linalg_result(
// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<?xf32>
// CHECK-SAME: %[[ARG_1:[a-zA-Z0-9_]+]]: tensor<?xf32>)
// CHECK: %[[R0:.+]] = tensor.dim %[[ARG_0]]
Expand Down
21 changes: 3 additions & 18 deletions mlir/test/Dialect/Linalg/roundtrip.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -202,9 +202,9 @@ func.func @generic_with_multiple_tensor_outputs(
%arg0: tensor<?xi32>, %arg1: tensor<?xi32>, %arg2: i32)
-> (tensor<i32>, tensor<i32>) {
%c0 = arith.constant 0 : index
%0 = linalg.init_tensor [] : tensor<i32>
%0 = tensor.empty() : tensor<i32>
%1 = linalg.fill ins(%arg2 : i32) outs(%0 : tensor<i32>) -> tensor<i32>
%2 = linalg.init_tensor [] : tensor<i32>
%2 = tensor.empty() : tensor<i32>
%3 = linalg.fill ins(%arg2 : i32) outs(%2 : tensor<i32>) -> tensor<i32>
%4:2 = linalg.generic {
indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> ()>, affine_map<(d0) -> ()>],
Expand Down Expand Up @@ -324,23 +324,8 @@ func.func @named_ops(%a3: memref<?x?x?xf32>, %b3: memref<?x?x?xf32>, %c3: memref

// -----

#attr = {"foo"}
func.func @init_tensor(%arg0 : index, %arg1 : index)
{
%0 = linalg.init_tensor [3, 42] : tensor<3x42xf32>
%1 = linalg.init_tensor [4, %arg0, %arg1, 5] : tensor<4x?x?x5xf32>
%2 = linalg.init_tensor [2, 2] : tensor<2x2xf32, #attr>
return
}
// CHECK-LABEL: func @init_tensor
// CHECK: linalg.init_tensor [3, 42] : tensor<3x42xf32>
// CHECK: linalg.init_tensor [4, %{{.*}}, %{{.*}}, 5] : tensor<4x?x?x5xf32>
// CHECK: linalg.init_tensor [2, 2] : tensor<2x2xf32, {foo}>

// -----

func.func @fill_tensor(%arg0 : index, %arg1 : index, %arg2 : f32) -> tensor<?x?xf32> {
%0 = linalg.init_tensor [%arg0, %arg1] : tensor<?x?xf32>
%0 = tensor.empty(%arg0, %arg1) : tensor<?x?xf32>
%1 = linalg.fill ins(%arg2 : f32) outs(%0 : tensor<?x?xf32>) -> tensor<?x?xf32>
return %1 : tensor<?x?xf32>
}
Expand Down
12 changes: 6 additions & 6 deletions mlir/test/Dialect/Linalg/split_reduction.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ func.func @matmul_split(%A : tensor<16x256xf32>, %B: tensor<256x32xf32>, %C: ten
// CHECK-DAG: %[[ID:.*]] = arith.constant 0.000000e+00 : f32
// CHECK-DAG: %[[I1:.*]] = tensor.expand_shape %{{.*}}[0], [1, 2]] : tensor<16x256xf32> into tensor<16x4x64xf32>
// CHECK-DAG: %[[I2:.*]] = tensor.expand_shape %{{.*}}[0, 1], [2]] : tensor<256x32xf32> into tensor<4x64x32xf32>
// CHECK-DAG: %[[INI:.*]] = linalg.init_tensor [16, 32, 4] : tensor<16x32x4xf32>
// CHECK-DAG: %[[INI:.*]] = tensor.empty() : tensor<16x32x4xf32>
// CHECK: %[[F:.*]] = linalg.fill ins(%[[ID]] : f32) outs(%[[INI]] : tensor<16x32x4xf32>) -> tensor<16x32x4xf32>
// CHECK: %[[G:.*]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP2]]]
// CHECK-SAME: , iterator_types = ["parallel", "parallel", "parallel", "reduction"]}
Expand All @@ -41,7 +41,7 @@ func.func @matmul_split(%A : tensor<16x256xf32>, %B: tensor<256x32xf32>, %C: ten
// INNERPARALLELCHECK-DAG: %[[ID:.*]] = arith.constant 0.000000e+00 : f32
// INNERPARALLELCHECK-DAG: %[[I1:.*]] = tensor.expand_shape %{{.*}}[0], [1, 2]] : tensor<16x256xf32> into tensor<16x64x4xf32>
// INNERPARALLELCHECK-DAG: %[[I2:.*]] = tensor.expand_shape %{{.*}}[0, 1], [2]] : tensor<256x32xf32> into tensor<64x4x32xf32>
// INNERPARALLELCHECK-DAG: %[[INI:.*]] = linalg.init_tensor [16, 32, 4] : tensor<16x32x4xf32>
// INNERPARALLELCHECK-DAG: %[[INI:.*]] = tensor.empty() : tensor<16x32x4xf32>
// INNERPARALLELCHECK: %[[F:.*]] = linalg.fill ins(%[[ID]] : f32) outs(%[[INI]] : tensor<16x32x4xf32>) -> tensor<16x32x4xf32>
// INNERPARALLELCHECK: %[[G:.*]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP2]]]
// INNERPARALLELCHECK-SAME: , iterator_types = ["parallel", "parallel", "reduction", "parallel"]}
Expand Down Expand Up @@ -83,7 +83,7 @@ func.func @generic_split_1d(%arg0: tensor<32xf32>, %arg1: tensor<f32>, %out: ten
//CHECK-LABEL: @generic_split_1d
// CHECK: %[[ID:.*]] = arith.constant 1.000000e+00 : f32
// CHECK: %[[I1:.*]] = tensor.expand_shape %{{.*}}[0, 1]] : tensor<32xf32> into tensor<4x8xf32>
// CHECK: %[[INI:.*]] = linalg.init_tensor [4] : tensor<4xf32>
// CHECK: %[[INI:.*]] = tensor.empty() : tensor<4xf32>
// CHECK: %[[F:.*]] = linalg.fill ins(%[[ID]] : f32) outs(%[[INI]] : tensor<4xf32>) -> tensor<4xf32>
// CHECK: %[[G:.*]] = linalg.generic
// CHECK: {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP2]]],
Expand All @@ -107,7 +107,7 @@ func.func @generic_split_1d(%arg0: tensor<32xf32>, %arg1: tensor<f32>, %out: ten
//INNERPARALLELCHECK-LABEL: @generic_split_1d
// INNERPARALLELCHECK: %[[ID:.*]] = arith.constant 1.000000e+00 : f32
// INNERPARALLELCHECK: %[[I1:.*]] = tensor.expand_shape %{{.*}}[0, 1]] : tensor<32xf32> into tensor<8x4xf32>
// INNERPARALLELCHECK: %[[INI:.*]] = linalg.init_tensor [4] : tensor<4xf32>
// INNERPARALLELCHECK: %[[INI:.*]] = tensor.empty() : tensor<4xf32>
// INNERPARALLELCHECK: %[[F:.*]] = linalg.fill ins(%[[ID]] : f32) outs(%[[INI]] : tensor<4xf32>) -> tensor<4xf32>
// INNERPARALLELCHECK: %[[G:.*]] = linalg.generic
// INNERPARALLELCHECK: {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP2]]],
Expand Down Expand Up @@ -153,7 +153,7 @@ func.func @generic_split_3d(%input: tensor<32x2xf32>, %input_2: tensor<5x32xf32>
// CHECK: %[[ID:.*]] = arith.constant -3.40282347E+38 : f32
// CHECK-DAG: %[[I1:.*]] = tensor.expand_shape %{{.*}}[0, 1], [2]] : tensor<32x2xf32> into tensor<4x8x2xf32>
// CHECK-DAG: %[[I2:.*]] = tensor.expand_shape %{{.*}}[0], [1, 2]] : tensor<5x32xf32> into tensor<5x4x8xf32>
// CHECK: %[[INI:.*]] = linalg.init_tensor [5, 2, 4] : tensor<5x2x4xf32>
// CHECK: %[[INI:.*]] = tensor.empty() : tensor<5x2x4xf32>
// CHECK: %[[F:.*]] = linalg.fill ins(%[[ID]] : f32) outs(%[[INI]] : tensor<5x2x4xf32>) -> tensor<5x2x4xf32>
// CHECK: %[[G:.*]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP2]]], iterator_types = ["parallel", "reduction", "parallel", "parallel"]}
// CHECK-SAME: ins(%[[I1]], %[[I2]] : tensor<4x8x2xf32>, tensor<5x4x8xf32>) outs(%[[F]] : tensor<5x2x4xf32>) {
Expand All @@ -177,7 +177,7 @@ func.func @generic_split_3d(%input: tensor<32x2xf32>, %input_2: tensor<5x32xf32>
// INNERPARALLELCHECK: %[[ID:.*]] = arith.constant -3.40282347E+38 : f32
// INNERPARALLELCHECK-DAG: %[[I1:.*]] = tensor.expand_shape %{{.*}}[0, 1], [2]] : tensor<32x2xf32> into tensor<8x4x2xf32>
// INNERPARALLELCHECK-DAG: %[[I2:.*]] = tensor.expand_shape %{{.*}}[0], [1, 2]] : tensor<5x32xf32> into tensor<5x8x4xf32>
// INNERPARALLELCHECK: %[[INI:.*]] = linalg.init_tensor [5, 2, 4] : tensor<5x2x4xf32>
// INNERPARALLELCHECK: %[[INI:.*]] = tensor.empty() : tensor<5x2x4xf32>
// INNERPARALLELCHECK: %[[F:.*]] = linalg.fill ins(%[[ID]] : f32) outs(%[[INI]] : tensor<5x2x4xf32>) -> tensor<5x2x4xf32>
// INNERPARALLELCHECK: %[[G:.*]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP2]]], iterator_types = ["parallel", "reduction", "parallel", "parallel"]}
// INNERPARALLELCHECK-SAME: ins(%[[I1]], %[[I2]] : tensor<8x4x2xf32>, tensor<5x8x4xf32>) outs(%[[F]] : tensor<5x2x4xf32>) {
Expand Down
8 changes: 4 additions & 4 deletions mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ func.func @conv_tensors_static(%input: tensor<1x225x225x3xf32>, %filter: tensor<
%c0 = arith.constant 0 : index
%cst = arith.constant 0.0 : f32

%init = linalg.init_tensor [1, 112, 112, 32] : tensor<1x112x112x32xf32>
%init = tensor.empty() : tensor<1x112x112x32xf32>
%fill = linalg.fill ins(%cst : f32) outs(%init : tensor<1x112x112x32xf32>) -> tensor<1x112x112x32xf32>

%conv = linalg.conv_2d_nhwc_hwcf
Expand Down Expand Up @@ -109,7 +109,7 @@ func.func @conv_tensors_static(%input: tensor<1x225x225x3xf32>, %filter: tensor<
// CHECK: func @conv_tensors_static
// CHECK-SAME: (%[[INPUT:.+]]: tensor<1x225x225x3xf32>, %[[FILTER:.+]]: tensor<3x3x3x32xf32>, %[[ELEM:.+]]: tensor<1x112x112x32xf32>)

// CHECK: %[[INIT:.+]] = linalg.init_tensor [1, 112, 112, 32] : tensor<1x112x112x32xf32>
// CHECK: %[[INIT:.+]] = tensor.empty() : tensor<1x112x112x32xf32>
// CHECK-NEXT: %[[FILL:.+]] = linalg.fill ins(%cst : f32) outs(%[[INIT]] : tensor<1x112x112x32xf32>) -> tensor<1x112x112x32xf32>

// CHECK-NEXT: scf.for %[[IV0:.+]] = %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[ARG0:.+]] = %[[FILL]])
Expand Down Expand Up @@ -147,7 +147,7 @@ func.func @conv_tensors_dynamic(%input: tensor<?x?x?x?xf32>, %filter: tensor<?x?
%ow = tensor.dim %elementwise, %c2 : tensor<?x?x?x?xf32>
%oc = tensor.dim %elementwise, %c3 : tensor<?x?x?x?xf32>

%init = linalg.init_tensor [%n, %oh, %ow, %oc] : tensor<?x?x?x?xf32>
%init = tensor.empty(%n, %oh, %ow, %oc) : tensor<?x?x?x?xf32>
%fill = linalg.fill ins(%cst : f32) outs(%init : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32>

%conv = linalg.conv_2d_nhwc_hwcf
Expand Down Expand Up @@ -216,7 +216,7 @@ func.func @conv_tensors_dynamic(%input: tensor<?x?x?x?xf32>, %filter: tensor<?x?
// CHECK-DAG: %[[ELEM_OW:.+]] = tensor.dim %[[ELEM]], %[[C2]] : tensor<?x?x?x?xf32>
// CHECK-DAG: %[[ELEM_OC:.+]] = tensor.dim %[[ELEM]], %[[C3]] : tensor<?x?x?x?xf32>

// CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[ELEM_N]], %[[ELEM_OH]], %[[ELEM_OW]], %[[ELEM_OC]]] : tensor<?x?x?x?xf32>
// CHECK: %[[INIT:.+]] = tensor.empty(%[[ELEM_N]], %[[ELEM_OH]], %[[ELEM_OW]], %[[ELEM_OC]]) : tensor<?x?x?x?xf32>
// CHECK: %[[FILL:.+]] = linalg.fill ins(%cst : f32) outs(%[[INIT]] : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32>

// CHECK-DAG: %[[FILTER_H:.+]] = tensor.dim %[[FILTER]], %[[C0]] : tensor<?x?x?x?xf32>
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/Linalg/tile-and-peel-tensors.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
// CHECK-PEEL-12: }
func.func @matmul_static_tensor(%arg0: tensor<1500x1600xf32>, %arg1: tensor<1600x1700xf32>)
-> tensor<1500x1700xf32> {
%out = linalg.init_tensor [1500, 1700] : tensor<1500x1700xf32>
%out = tensor.empty() : tensor<1500x1700xf32>
%r = linalg.matmul {__internal_linalg_transform__ = "tile"}
ins(%arg0, %arg1: tensor<1500x1600xf32>, tensor<1600x1700xf32>)
outs(%out: tensor<1500x1700xf32>) -> tensor<1500x1700xf32>
Expand Down Expand Up @@ -102,7 +102,7 @@ func.func @matmul_dynamic_tensor(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>)
%c1 = arith.constant 1 : index
%d0 = tensor.dim %arg0, %c0 : tensor<?x?xf32>
%d1 = tensor.dim %arg1, %c1 : tensor<?x?xf32>
%out = linalg.init_tensor [%d0, %d1] : tensor<?x?xf32>
%out = tensor.empty(%d0, %d1) : tensor<?x?xf32>
%r = linalg.matmul {__internal_linalg_transform__ = "tile"}
ins(%arg0, %arg1: tensor<?x?xf32>, tensor<?x?xf32>)
outs(%out: tensor<?x?xf32>) -> tensor<?x?xf32>
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/Linalg/tile-fuse-and-distribute.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ func.func @fill_matmul_tensors(
// CHECK-DAG: %[[NBLOCKSY:.*]] = gpu.grid_dim y
// CHECK-DAG: %[[BIDX:.*]] = gpu.block_id x
// CHECK-DAG: %[[NBLOCKSX:.*]] = gpu.grid_dim x
// CHECK-DAG: %[[INIT:.+]] = linalg.init_tensor
// CHECK-DAG: %[[INIT:.+]] = tensor.empty
// CHECK: %[[MUL:.+]] = affine.apply #[[MULMAP]]()[%[[BIDY]], %[[C8]]]
// CHECK: %[[LBY:.+]] = affine.apply #[[ADDMAP]]()[%[[MUL]], %[[C0]]]
// CHECK: %[[STEPY:.+]] = affine.apply #[[MULMAP]]()[%[[NBLOCKSY]], %[[C8]]]
Expand Down Expand Up @@ -43,7 +43,7 @@ func.func @fill_matmul_tensors(
%cst = arith.constant 0.0 : f32
%0 = tensor.dim %arg0, %c0 : tensor<?x?xf32>
%1 = tensor.dim %arg1, %c1 : tensor<?x?xf32>
%2 = linalg.init_tensor [%0, %1] : tensor<?x?xf32>
%2 = tensor.empty(%0, %1) : tensor<?x?xf32>
%3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<?x?xf32>) -> tensor<?x?xf32>
%4 = linalg.matmul {__internal_linalg_transform__ = "tensors_fuse_distribute1"}
ins(%arg0, %arg1: tensor<?x?xf32>, tensor<?x?xf32>)
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Linalg/tile-scalarize-dynamic-dims.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ func.func @matmul_partly_dynamic_tensor(%arg0: tensor<?x?xf32>, %arg1: tensor<?x
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%d0 = tensor.dim %arg0, %c0 : tensor<?x?xf32>
%out = linalg.init_tensor [%d0, 2000] : tensor<?x2000xf32>
%out = tensor.empty(%d0) : tensor<?x2000xf32>
%r = linalg.matmul {__internal_linalg_transform__ = "tile"}
ins(%arg0, %arg1: tensor<?x?xf32>, tensor<?x2000xf32>)
outs(%out: tensor<?x2000xf32>) -> tensor<?x2000xf32>
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/Linalg/tile-tensors.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func.func @generic_op_tensors(
%0 = tensor.dim %arg0, %c0 : tensor<?x?x?xf32>
%1 = tensor.dim %arg0, %c1 : tensor<?x?x?xf32>
%2 = tensor.dim %arg0, %c2 : tensor<?x?x?xf32>
%3 = linalg.init_tensor [%0, %1, %2] : tensor<?x?x?xf32>
%3 = tensor.empty(%0, %1, %2) : tensor<?x?x?xf32>
%4 = linalg.generic
{indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
affine_map<(d0, d1, d2) -> (d0, d2, d1)>,
Expand All @@ -55,7 +55,7 @@ func.func @generic_op_tensors(
// CHECK-LABEL: func @generic_op_tensors
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
// CHECK: %[[INIT:.+]] = linalg.init_tensor
// CHECK: %[[INIT:.+]] = tensor.empty
// CHECK: %[[TD0:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC0:.+]] = %[[INIT]]) -> (tensor<?x?x?xf32>) {
// CHECK: %[[TD1:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC1:.+]] = %[[TC0]]) -> (tensor<?x?x?xf32>) {
// CHECK: %[[TD2:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC2:.+]] = %[[TC1]]) -> (tensor<?x?x?xf32>) {
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/Linalg/transform-op-decompose.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ func.func @conv_2d_nchw_fchw(%input: tensor<?x?x1x?xf32>, %filter: tensor<?x?x1x
// CHECK-SAME: %[[ARG0:.+]]: tensor<1x1x113x96xf32>
// CHECK-SAME: %[[ARG1:.+]]: tensor<1x3x96xf32>
func.func @depthwise_conv_2d_nhwc_hwc(%input: tensor<1x1x113x96xf32>, %filter: tensor<1x3x96xf32>) -> tensor<1x1x56x96xf32> {
// CHECK: %[[RES:.+]] = linalg.init_tensor
%init = linalg.init_tensor [1, 1, 56, 96] : tensor<1x1x56x96xf32>
// CHECK: %[[RES:.+]] = tensor.empty
%init = tensor.empty() : tensor<1x1x56x96xf32>
// CHECK: %[[SLICE0:.+]] = tensor.extract_slice %[[ARG0]]
// CHECK: %[[SLICE1:.+]] = tensor.extract_slice %[[ARG1]]
// CHECK: %[[SLICERES:.+]] = tensor.extract_slice %[[RES]]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,12 +66,12 @@ module {
// CHECK-SAME: %[[IN:[0-9a-z]+]]: tensor<64xf32>
// CHECK-SAME: %[[OUT:[0-9a-z]+]]: tensor<64xf32>
func.func @fuse_untileable_op(%arg0: index, %arg1: tensor<64xf32>, %arg2: tensor<64xf32>) -> tensor<64xf32> {
%0 = linalg.init_tensor [%arg0] : tensor<?xf32>
%0 = tensor.empty(%arg0) : tensor<?xf32>
%1 = affine.apply #map0()[%arg0]

// CHECK: scf.foreach_thread {{.*}} {
%2 = scf.foreach_thread (%arg3) in (%1) shared_outs(%o = %arg2) -> (tensor<64xf32>) {
// CHECK: %[[INIT_TENSOR:.*]] = linalg.init_tensor
// CHECK: %[[INIT_TENSOR:.*]] = tensor.empty
%3 = affine.apply #map1(%arg3)[%arg0]
%4 = affine.min #map2(%arg3)[%arg0]
%5 = tensor.extract_slice %o[%3] [%4] [1] : tensor<64xf32> to tensor<?xf32>
Expand All @@ -91,10 +91,10 @@ module {
^bb0(%arg0: !pdl.operation):
transform.sequence %arg0 failures(propagate) {
^bb1(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.init_tensor"]} in %arg1
%0 = transform.structured.match ops{["tensor.empty"]} in %arg1
%1 = transform.structured.match ops{["scf.foreach_thread"]} in %arg1

// linalg.init_tensor is not tileable. The op is cloned and fused.
// tensor.empty is not tileable. The op is cloned and fused.
transform.structured.fuse_into_containing_op %0 into %1
}
}
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/Linalg/transform-op-fuse.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ transform.with_pdl_patterns {
// CHECK-SAME: (%[[INPUT:.+]]: tensor<12x7x25xf32>)
func.func @interchange_reduction(%input: tensor<12x7x25xf32>) -> tensor<12x25xf32> {
%five = arith.constant 5.0 : f32
%init = linalg.init_tensor [12, 25] : tensor<12x25xf32>
%init = tensor.empty() : tensor<12x25xf32>

// CHECK: %[[INIT:.+]] = linalg.init_tensor [12, 25]
// CHECK: %[[INIT:.+]] = tensor.empty()
// CHECK-DAG: %[[C5:.+]] = arith.constant 5 : index
// CHECK-DAG: %[[C7:.+]] = arith.constant 7 : index
// CHECK: scf.for %[[IV0:.+]] = %{{.+}} to %{{.+}} step %[[C5]] iter_args(%[[FOR_ARG0:.+]] = %[[INIT]])
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Linalg/transform-op-match.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ transform.with_pdl_patterns {
#map1 = affine_map<(d0, d1, d2) -> (d1, d0, d2)>
func.func @match_complex_attribute(%arg0: tensor<12x128x32xf32>)
-> tensor<128x12x32xf32> {
%0 = linalg.init_tensor [128, 12, 32] : tensor<128x12x32xf32>
%0 = tensor.empty() : tensor<128x12x32xf32>
// expected-remark @below {{matched complex attr}}
%1 = linalg.generic {indexing_maps = [#map0, #map1],
iterator_types = ["parallel", "parallel", "parallel"]}
Expand Down
42 changes: 21 additions & 21 deletions mlir/test/Dialect/Linalg/vectorization.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ transform.with_pdl_patterns {
func.func @generic_interchanged_transpose(%arg0: tensor<12x128x32xf32>) -> tensor<128x12x32xf32> {
// CHECK: %[[IN:.+]] = vector.transfer_read
// CHECK: vector.transfer_write %[[IN]], {{.+}} permutation_map = #[[MAP]]
%0 = linalg.init_tensor [128, 12, 32] : tensor<128x12x32xf32>
%0 = tensor.empty() : tensor<128x12x32xf32>
%1 = linalg.generic {indexing_maps = [#map0, #map1],
iterator_types = ["parallel", "parallel", "parallel"]}
ins(%arg0 : tensor<12x128x32xf32>)
Expand Down Expand Up @@ -786,7 +786,7 @@ transform.with_pdl_patterns {
// CHECK-NOT: tensor.pad
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
// CHECK-DAG: %[[INIT:.*]] = linalg.init_tensor [2, 3, 4] : tensor<2x3x4xf32>
// CHECK-DAG: %[[INIT:.*]] = tensor.empty() : tensor<2x3x4xf32>
// CHECK-DAG: %[[VEC:.*]] = vector.broadcast %[[PAD]] : f32 to vector<2x3x4xf32>
// CHECK: %[[FILL:.*]] = vector.transfer_write %[[VEC]], %[[INIT]]{{.*}} : vector<2x3x4xf32>, tensor<2x3x4xf32>
// CHECK: %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]], %[[C0]]], %[[PAD]] {in_bounds = [true, false, true]} : tensor<2x?x2xf32>, vector<2x3x2xf32>
Expand Down Expand Up @@ -818,7 +818,7 @@ transform.with_pdl_patterns {
// CHECK-NOT: tensor.pad
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
// CHECK: %[[INIT:.*]] = linalg.init_tensor [2, 6, 4] : tensor<2x6x4xf32>
// CHECK: %[[INIT:.*]] = tensor.empty() : tensor<2x6x4xf32>
// CHECK: %[[VEC:.*]] = vector.broadcast %[[PAD]] : f32 to vector<2x6x4xf32>
// CHECK: %[[FILL:.*]] = vector.transfer_write %[[VEC]], %[[INIT]][%[[C0]], %[[C0]], %[[C0]]] {in_bounds = [true, true, true]} : vector<2x6x4xf32>, tensor<2x6x4xf32>
// CHECK: %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]], %[[C0]]], %{{.*}} {in_bounds = [true, true, true]} : tensor<2x5x2xf32>, vector<2x5x2xf32>
Expand Down Expand Up @@ -858,7 +858,7 @@ transform.with_pdl_patterns {
// CHECK: %[[DIM3:.*]] = tensor.dim %[[SRC]], %[[C3]] : tensor<1x2x2x?xf32>
// CHECK: %[[V4:.*]] = arith.addi %[[DIM3]], %[[C3]] : index
// CHECK: %[[V5:.*]] = arith.addi %[[V4]], %[[C2]] : index
// CHECK: %[[INIT:.*]] = linalg.init_tensor [6, %[[V1]], %[[V2]], %[[V5]]] : tensor<6x?x?x?xf32>
// CHECK: %[[INIT:.*]] = tensor.empty(%[[V1]], %[[V2]], %[[V5]]) : tensor<6x?x?x?xf32>
// CHECK: %[[FILL:.*]] = linalg.fill ins(%{{.*}} : f32) outs(%[[INIT]] : tensor<6x?x?x?xf32>) -> tensor<6x?x?x?xf32>
// CHECK: %[[SRCDIM:.*]] = tensor.dim %[[SRC]], %[[C3]] : tensor<1x2x2x?xf32>
// CHECK: %[[RESULT:.*]] = tensor.insert_slice %[[SRC]] into %[[FILL]][2, %[[LOW]], 3, 3] [1, 2, 2, %[[SRCDIM]]] [1, 1, 1, 1] : tensor<1x2x2x?xf32> into tensor<6x?x?x?xf32>
Expand Down Expand Up @@ -1192,11 +1192,11 @@ transform.with_pdl_patterns {
// CHECK-LABEL: func @red_max_2d(
func.func @red_max_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
// CHECK: %[[CMINF:.+]] = arith.constant dense<-3.402820e+38> : vector<4xf32>
// CHECK: linalg.init_tensor [4] : tensor<4xf32>
// CHECK: tensor.empty() : tensor<4xf32>
// CHECK: vector.multi_reduction <maxf>, {{.*}}, %[[CMINF]] [1] : vector<4x4xf32> to vector<4xf32>
// CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32>
%ident = arith.constant -3.40282e+38 : f32
%init = linalg.init_tensor [4] : tensor<4xf32>
%init = tensor.empty() : tensor<4xf32>
%fill = linalg.fill ins(%ident : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32>
%red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
affine_map<(d0, d1) -> (d0)>],
Expand Down Expand Up @@ -1225,12 +1225,12 @@ transform.with_pdl_patterns {
// CHECK-LABEL: func @red_min_2d(
func.func @red_min_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
// CHECK: %[[CMAXF:.+]] = arith.constant dense<3.402820e+38> : vector<4xf32>
// CHECK: linalg.init_tensor [4] : tensor<4xf32>
// CHECK: tensor.empty() : tensor<4xf32>
// CHECK: vector.transfer_read {{.*}} : tensor<4x4xf32>, vector<4x4xf32>
// CHECK: vector.multi_reduction <minf>, {{.*}}, %[[CMAXF]] [1] : vector<4x4xf32> to vector<4xf32>
// CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32>
%maxf32 = arith.constant 3.40282e+38 : f32
%init = linalg.init_tensor [4] : tensor<4xf32>
%init = tensor.empty() : tensor<4xf32>
%fill = linalg.fill ins(%maxf32 : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32>
%red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
affine_map<(d0, d1) -> (d0)>],
Expand Down Expand Up @@ -1258,12 +1258,12 @@ transform.with_pdl_patterns {

// CHECK-LABEL: func @red_mul_2d(
func.func @red_mul_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
// CHECK: linalg.init_tensor [4] : tensor<4xf32>
// CHECK: tensor.empty() : tensor<4xf32>
// CHECK: vector.transfer_read {{.*}} : tensor<4x4xf32>, vector<4x4xf32>
// CHECK: vector.multi_reduction <mul>, {{.*}}, {{.*}} [1] : vector<4x4xf32> to vector<4xf32>
// CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32>
%ident = arith.constant 1.0 : f32
%init = linalg.init_tensor [4] : tensor<4xf32>
%init = tensor.empty() : tensor<4xf32>
%fill = linalg.fill ins(%ident : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32>
%red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
affine_map<(d0, d1) -> (d0)>],
Expand Down Expand Up @@ -1291,12 +1291,12 @@ transform.with_pdl_patterns {

// CHECK-LABEL: func @red_or_2d(
func.func @red_or_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
// CHECK: linalg.init_tensor [4] : tensor<4xi1>
// CHECK: tensor.empty() : tensor<4xi1>
// CHECK: vector.transfer_read {{.*}} : tensor<4x4xi1>, vector<4x4xi1>
// CHECK: vector.multi_reduction <or>, {{.*}}, {{.*}} [1] : vector<4x4xi1> to vector<4xi1>
// CHECK: vector.transfer_write {{.*}} : vector<4xi1>, tensor<4xi1>
%ident = arith.constant false
%init = linalg.init_tensor [4] : tensor<4xi1>
%init = tensor.empty() : tensor<4xi1>
%fill = linalg.fill ins(%ident : i1) outs(%init : tensor<4xi1>) -> tensor<4xi1>
%red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
affine_map<(d0, d1) -> (d0)>],
Expand Down Expand Up @@ -1324,12 +1324,12 @@ transform.with_pdl_patterns {

// CHECK-LABEL: func @red_and_2d(
func.func @red_and_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
// CHECK: linalg.init_tensor [4] : tensor<4xi1>
// CHECK: tensor.empty() : tensor<4xi1>
// CHECK: vector.transfer_read {{.*}} : tensor<4x4xi1>, vector<4x4xi1>
// CHECK: vector.multi_reduction <and>, {{.*}}, {{.*}} [1] : vector<4x4xi1> to vector<4xi1>
// CHECK: vector.transfer_write {{.*}} : vector<4xi1>, tensor<4xi1>
%ident = arith.constant true
%init = linalg.init_tensor [4] : tensor<4xi1>
%init = tensor.empty() : tensor<4xi1>
%fill = linalg.fill ins(%ident : i1) outs(%init : tensor<4xi1>) -> tensor<4xi1>
%red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
affine_map<(d0, d1) -> (d0)>],
Expand Down Expand Up @@ -1357,12 +1357,12 @@ transform.with_pdl_patterns {

// CHECK-LABEL: func @red_xor_2d(
func.func @red_xor_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
// CHECK: linalg.init_tensor [4] : tensor<4xi1>
// CHECK: tensor.empty() : tensor<4xi1>
// CHECK: vector.transfer_read {{.*}} : tensor<4x4xi1>, vector<4x4xi1>
// CHECK: vector.multi_reduction <xor>, {{.*}}, {{.*}} [1] : vector<4x4xi1> to vector<4xi1>
// CHECK: vector.transfer_write {{.*}} : vector<4xi1>, tensor<4xi1>
%ident = arith.constant false
%init = linalg.init_tensor [4] : tensor<4xi1>
%init = tensor.empty() : tensor<4xi1>
%fill = linalg.fill ins(%ident : i1) outs(%init : tensor<4xi1>) -> tensor<4xi1>
%red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
affine_map<(d0, d1) -> (d0)>],
Expand Down Expand Up @@ -1397,7 +1397,7 @@ func.func @explicit_broadcast(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>) ->
// CHECK: subf {{.*}} : vector<4x4xf32>
// CHECK: vector.transfer_write {{.*}} {in_bounds = [true, true]} : vector<4x4xf32>, tensor<4x4xf32>
%c0 = arith.constant 0.0 : f32
%init = linalg.init_tensor [4, 4] : tensor<4x4xf32>
%init = tensor.empty() : tensor<4x4xf32>
%fill = linalg.fill ins(%c0 : f32) outs(%init : tensor<4x4xf32>) -> tensor<4x4xf32>
%red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
affine_map<(d0, d1) -> (d0, 0)>,
Expand Down Expand Up @@ -1436,7 +1436,7 @@ func.func @fused_broadcast_red_2d(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>
// CHECK: vector.multi_reduction <add>, {{.*}}, {{.*}} : vector<4x4xf32> to vector<4xf32>
// CHECK: vector.transfer_write {{.*}} {in_bounds = [true]} : vector<4xf32>, tensor<4xf32>
%c0 = arith.constant 0.0 : f32
%init = linalg.init_tensor [4] : tensor<4xf32>
%init = tensor.empty() : tensor<4xf32>
%fill = linalg.fill ins(%c0 : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32>
%red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
affine_map<(d0, d1) -> (d0, 0)>,
Expand Down Expand Up @@ -1478,8 +1478,8 @@ func.func @reduce_1d(%arg0: tensor<32xf32>) -> tensor<f32> {
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
%f0 = arith.constant 0.000000e+00 : f32

// CHECK: %[[init:.*]] = linalg.init_tensor [] : tensor<f32>
%0 = linalg.init_tensor [] : tensor<f32>
// CHECK: %[[init:.*]] = tensor.empty() : tensor<f32>
%0 = tensor.empty() : tensor<f32>

%1 = linalg.fill ins(%f0 : f32) outs(%0 : tensor<f32>) -> tensor<f32>
// CHECK: %[[r:.*]] = vector.transfer_read %[[A]][%[[C0]]]
Expand Down Expand Up @@ -1524,7 +1524,7 @@ transform.with_pdl_patterns {
// CHECK-LABEL: func @not_projected_permutation
func.func @not_projected_permutation(%arg0: tensor<8x8xf32>) -> tensor<6x6x3x3xf32> {
%c0 = arith.constant 0.0 : f32
%init = linalg.init_tensor [6, 6, 3, 3] : tensor<6x6x3x3xf32>
%init = tensor.empty() : tensor<6x6x3x3xf32>
%fill = linalg.fill ins(%c0 : f32) outs(%init : tensor<6x6x3x3xf32>) -> tensor<6x6x3x3xf32>
// CHECK: linalg.generic
%result = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0 + d2, d1 + d3)>,
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/SparseTensor/sparse_1d.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ func.func @add_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>)
// CHECK: %[[VAL_3:.*]] = arith.constant 0.000000e+00 : f32
// CHECK: %[[VAL_4:.*]] = arith.constant 0 : index
// CHECK: %[[VAL_5:.*]] = arith.constant 1 : index
// CHECK: %[[VAL_INITTENSOR:.*]] = linalg.init_tensor [32] : tensor<32xf32>
// CHECK: %[[VAL_INITTENSOR:.*]] = tensor.empty() : tensor<32xf32>
// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }>> to memref<?xf32>
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_INITTENSOR]] : memref<32xf32>
// CHECK: linalg.fill ins(%[[VAL_3]] : f32) outs(%[[VAL_7]] : memref<32xf32>)
Expand All @@ -62,7 +62,7 @@ func.func @add_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>)
// CHECK: return %[[VAL_11]] : tensor<32xf32>
// CHECK: }
func.func @add_d_init(%arga: tensor<32xf32, #DV>, %argb: f32) -> tensor<32xf32> {
%u = linalg.init_tensor [32] : tensor<32xf32>
%u = tensor.empty() : tensor<32xf32>
%0 = linalg.generic #trait1
ins(%arga: tensor<32xf32, #DV>)
outs(%u: tensor<32xf32>) {
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
// CHECK: }
func.func @fold_yield_arg_zero() -> tensor<1024x1024xf64> {
%cst = arith.constant 0.000000e+00 : f64
%0 = linalg.init_tensor [1024, 1024] : tensor<1024x1024xf64>
%0 = tensor.empty() : tensor<1024x1024xf64>
%1 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> ()>,
affine_map<(d0, d1) -> (d0, d1)>],
iterator_types = ["parallel", "parallel"]}
Expand All @@ -46,7 +46,7 @@ func.func @fold_yield_arg_zero() -> tensor<1024x1024xf64> {
// CHECK: }
func.func @fold_yield_direct_zero() -> tensor<32xf64> {
%cst = arith.constant 0.000000e+00 : f64
%0 = linalg.init_tensor [32] : tensor<32xf64>
%0 = tensor.empty() : tensor<32xf64>
%1 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>],
iterator_types = ["parallel"]}
outs(%0 : tensor<32xf64>) {
Expand Down
8 changes: 4 additions & 4 deletions mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xi64>
// CHECK-DAG: %[[VAL_10a:.*]] = linalg.init_tensor [8] : tensor<8xi64>
// CHECK-DAG: %[[VAL_10a:.*]] = tensor.empty() : tensor<8xi64>
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_10a]] : memref<8xi64>
// CHECK-DAG: linalg.fill ins(%[[VAL_5]] : i64) outs(%[[VAL_10]] : memref<8xi64>)
// CHECK-DAG: %[[VAL_11:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
Expand All @@ -50,7 +50,7 @@
// CHECK: return %[[VAL_20]] : tensor<8xi64>
// CHECK: }
func.func @sparse_index_1d_conj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8xi64> {
%init = linalg.init_tensor [8] : tensor<8xi64>
%init = tensor.empty() : tensor<8xi64>
%r = linalg.generic #trait_1d
ins(%arga: tensor<8xi64, #SparseVector>)
outs(%init: tensor<8xi64>) {
Expand All @@ -73,7 +73,7 @@ func.func @sparse_index_1d_conj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xi64>
// CHECK-DAG: %[[VAL_9a:.*]] = linalg.init_tensor [8] : tensor<8xi64>
// CHECK-DAG: %[[VAL_9a:.*]] = tensor.empty() : tensor<8xi64>
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_9a]] : memref<8xi64>
// CHECK-DAG: linalg.fill ins(%[[VAL_3]] : i64) outs(%[[VAL_9]] : memref<8xi64>)
// CHECK-DAG: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
Expand Down Expand Up @@ -112,7 +112,7 @@ func.func @sparse_index_1d_conj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8
// CHECK: return %[[VAL_35]] : tensor<8xi64>
// CHECK: }
func.func @sparse_index_1d_disj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8xi64> {
%init = linalg.init_tensor [8] : tensor<8xi64>
%init = tensor.empty() : tensor<8xi64>
%r = linalg.generic #trait_1d
ins(%arga: tensor<8xi64, #SparseVector>)
outs(%init: tensor<8xi64>) {
Expand Down
105 changes: 105 additions & 0 deletions mlir/test/Dialect/Tensor/canonicalize.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -1523,3 +1523,108 @@ func.func @dont_fold_mismatched_parameters(%input: tensor<1x2x2x4xf32>) -> tenso
%1 = tensor.insert_slice %0 into %input[%c0, 1, %c0, 0] [1, 1, 2, 4] [1, 1, 1, 1] : tensor<1x2x4xf32> into tensor<1x2x2x4xf32>
return %1: tensor<1x2x2x4xf32>
}

// -----

func.func @empty_canonicalize() -> (tensor<4x5x?xf32>) {
%c6 = arith.constant 6 : index
%0 = tensor.empty(%c6) : tensor<4x5x?xf32>
return %0 : tensor<4x5x?xf32>
}
// CHECK: func @empty_canonicalize
// CHECK: %[[T0:.+]] = tensor.empty() : tensor<4x5x6xf32>
// CHECK: %[[T1:.+]] = tensor.cast %[[T0]] : tensor<4x5x6xf32> to tensor<4x5x?xf32>
// CHECK: return %[[T1]]

// -----

func.func @empty_reshape_expansion(%arg0 : index) -> tensor<2x3x5x4x?x7xf32> {
%0 = tensor.empty(%arg0) : tensor<6x5x?xf32>
%1 = tensor.expand_shape %0 [[0, 1], [2], [3, 4, 5]]
: tensor<6x5x?xf32> into tensor<2x3x5x4x?x7xf32>
return %1 : tensor<2x3x5x4x?x7xf32>
}
// CHECK: #[[MAP:.+]] = affine_map<()[s0] -> (s0 floordiv 28)>
// CHECK: func @empty_reshape_expansion
// CHECK-SAME: %[[ARG0:.+]]: index
// CHECK-NEXT: %[[D:.+]] = affine.apply #[[MAP]]()[%[[ARG0]]]
// CHECK-NEXT: %[[INIT:.+]] = tensor.empty(%[[D]])
// CHECK-NEXT: return %[[INIT]]

// -----

func.func @empty_reshape_collapse(%arg0 : index) -> tensor<6x5x?xf32> {
%0 = tensor.empty(%arg0) : tensor<2x3x5x4x?x7xf32>
%1 = tensor.collapse_shape %0 [[0, 1], [2], [3, 4, 5]]
: tensor<2x3x5x4x?x7xf32> into tensor<6x5x?xf32>
return %1 : tensor<6x5x?xf32>
}
// CHECK: #[[MAP:.+]] = affine_map<()[s0] -> (s0 * 28)>
// CHECK: func @empty_reshape_collapse
// CHECK-SAME: %[[ARG0:.+]]: index
// CHECK-NEXT: %[[D:.+]] = affine.apply #[[MAP]]()[%[[ARG0]]]
// CHECK-NEXT: %[[INIT:.+]] = tensor.empty(%[[D]])
// CHECK-NEXT: return %[[INIT]]

// -----

func.func @fold_empty_tensor_with_slice
(%arg0 : index, %arg1 : index) -> tensor<5x?x20xf32>
{
%0 = tensor.empty(%arg0) : tensor<?x10x40xf32>
%1 = tensor.extract_slice %0[0, 0, 0] [5, %arg1, 20] [1, 1, 1]
: tensor<?x10x40xf32> to tensor<5x?x20xf32>
return %1 : tensor<5x?x20xf32>
}
// CHECK: func @fold_empty_tensor_with_slice
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: index
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: index
// CHECK: %[[T0:.+]] = tensor.empty(%[[ARG1]])
// CHECK: return %[[T0]]

// -----

func.func @fold_empty_tensor_with_cast(%arg0 : index) -> tensor<1x12xf32> {
%0 = tensor.empty(%arg0) : tensor<?x12xf32>
%1 = tensor.cast %0 : tensor<?x12xf32> to tensor<1x12xf32>
return %1 : tensor<1x12xf32>
}
// CHECK: func @fold_empty_tensor_with_cast(%[[ARG0:.+]]: index)
// CHECK: %[[T0:.+]] = tensor.empty() : tensor<1x12xf32>
// CHECK: return %[[T0]] : tensor<1x12xf32>

// -----

func.func private @some_use(%i : index, %j : index)

// CHECK-LABEL: func @empty_tensor_canonicalize
// CHECK-SAME: %[[I:.*]]: index
func.func @empty_tensor_canonicalize(%i : index) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index

// CHECK-NOT: tensor.empty
%0 = tensor.empty(%i) : tensor<?x42xf32>

// CHECK-NOT: tensor.dim
%1 = tensor.dim %0, %c0: tensor<?x42xf32>
%2 = tensor.dim %0, %c1: tensor<?x42xf32>

// CHECK: %[[c42:.*]] = arith.constant 42 : index
// CHECK: call @some_use(%[[I]], %[[c42]])
call @some_use(%1, %2) : (index, index) -> ()

return
}

// -----

// CHECK-LABEL: func @rank_reducing_empty_tensor_extract
func.func @rank_reducing_empty_tensor_extract(%sz : index, %idx : index) -> tensor<2xf32> {
// CHECK: tensor.empty() : tensor<2xf32>
%a = tensor.empty(%sz) : tensor<?x2xf32>

// CHECK-NOT: extract
%r = tensor.extract_slice %a[%idx, 0] [1, 2] [1, 1] : tensor<?x2xf32> to tensor<2xf32>
return %r: tensor<2xf32>
}
14 changes: 7 additions & 7 deletions mlir/test/Dialect/Tensor/extract-slice-from-collapse-shape.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ func.func @extract_slice_static(%input: tensor<3x5x7x11xf32>) -> tensor<20x11xf3
// CHECK-DAG: %[[c3:.+]] = arith.constant 3 : index
// CHECK-DAG: %[[c5:.+]] = arith.constant 5 : index
// CHECK-DAG: %[[c7:.+]] = arith.constant 7 : index
// CHECK-DAG: %[[init:.+]] = linalg.init_tensor [20, 11] :
// CHECK-DAG: %[[init:.+]] = tensor.empty() : tensor<20x11xf32>
// CHECK-DAG: %[[tile:.+]] = scf.for %[[iv:.+]] = %[[c0]] to %[[c20]] step %[[c1]] iter_args(%[[iterArg:.+]] = %[[init]])
// CHECK: %[[multiIndex:.+]]:3 = affine.delinearize_index %[[iv]] into (%[[c3]], %[[c5]], %[[c7]]
// CHECK: %[[slice:.+]] = tensor.extract_slice %[[arg0]][%[[multiIndex]]#0, %[[multiIndex]]#1, %[[multiIndex]]#2, 0] [1, 1, 1, 11] [1, 1, 1, 1] :
Expand All @@ -28,7 +28,7 @@ func.func @extract_slice_static(%input: tensor<3x5x7x11xf32>) -> tensor<20x11xf3
// FOREACH-DAG: %[[c3:.+]] = arith.constant 3 : index
// FOREACH-DAG: %[[c5:.+]] = arith.constant 5 : index
// FOREACH-DAG: %[[c7:.+]] = arith.constant 7 : index
// FOREACH-DAG: %[[init:.+]] = linalg.init_tensor [20, 11] :
// FOREACH-DAG: %[[init:.+]] = tensor.empty() : tensor<20x11xf32>
// FOREACH: %[[tile:.+]] = scf.foreach_thread (%[[iv:.+]]) in (%[[c20]]) shared_outs(%[[dest:.+]] = %[[init]])
// FOREACH: %[[multiIndex:.+]]:3 = affine.delinearize_index %[[iv]] into (%[[c3]], %[[c5]], %[[c7]]
// FOREACH: %[[slice:.+]] = tensor.extract_slice %[[arg0]][%[[multiIndex]]#0, %[[multiIndex]]#1, %[[multiIndex]]#2, 0] [1, 1, 1, 11] [1, 1, 1, 1] :
Expand All @@ -54,7 +54,7 @@ func.func @extract_slice_static_strided(%input: tensor<3x5x7x11xf32>) -> tensor<
// CHECK-DAG: %[[c3:.+]] = arith.constant 3 : index
// CHECK-DAG: %[[c5:.+]] = arith.constant 5 : index
// CHECK-DAG: %[[c7:.+]] = arith.constant 7 : index
// CHECK: %[[init:.+]] = linalg.init_tensor [10, 5] :
// CHECK: %[[init:.+]] = tensor.empty() : tensor<10x5xf32>
// CHECK: %[[tile:.+]] = scf.for %[[iv:.+]] = %[[c0]] to %[[c10]] step %[[c1]] iter_args(%[[iterArg:.+]] = %[[init]])
// CHECK: %[[inputIv:.+]] = affine.apply #[[$map0]](%[[iv]])
// CHECK: %[[multiIndex:.+]]:3 = affine.delinearize_index %[[inputIv]] into (%[[c3]], %[[c5]], %[[c7]]
Expand All @@ -80,7 +80,7 @@ func.func @extract_slice_dynamic(%input: tensor<3x?x?x11xf32>, %offt: index, %si
// CHECK-DAG: %[[c1:.+]] = arith.constant 1 : index
// CHECK-DAG: %[[c2:.+]] = arith.constant 2 : index
// CHECK-DAG: %[[c3:.+]] = arith.constant 3 : index
// CHECK: %[[init:.+]] = linalg.init_tensor [%[[sz]], 5] : tensor<?x5xf32>
// CHECK: %[[init:.+]] = tensor.empty(%[[sz]]) : tensor<?x5xf32>
// CHECK-DAG: %[[d1:.+]] = tensor.dim %arg0, %[[c1]] : tensor<3x?x?x11xf32>
// CHECK-DAG: %[[d2:.+]] = tensor.dim %arg0, %[[c2]] : tensor<3x?x?x11xf32>
// CHECK: %[[tile:.+]] = scf.for %[[iv:.+]] = %[[c0]] to %[[sz]] step %[[c1]] iter_args(%[[iterArg:.+]] = %[[init]])
Expand Down Expand Up @@ -109,7 +109,7 @@ func.func @extract_slice_dynamic_multidim(%input: tensor<3x?x?x11x?xf32>, %offt0
// CHECK-DAG: %[[c3:.+]] = arith.constant 3 : index
// CHECK-DAG: %[[c4:.+]] = arith.constant 4 : index
// CHECK-DAG: %[[c11:.+]] = arith.constant 11 : index
// CHECK: %[[init:.+]] = linalg.init_tensor [%[[sz1]], %[[sz2]]] : tensor<?x?xf32>
// CHECK: %[[init:.+]] = tensor.empty(%[[sz1]], %[[sz2]]) : tensor<?x?xf32>
// CHECK-DAG: %[[d1:.+]] = tensor.dim %[[arg0]], %[[c1]] :
// CHECK-DAG: %[[d2:.+]] = tensor.dim %[[arg0]], %[[c2]] :
// CHECK-DAG: %[[d4:.+]] = tensor.dim %[[arg0]], %[[c4]] :
Expand All @@ -133,7 +133,7 @@ func.func @extract_slice_dynamic_multidim(%input: tensor<3x?x?x11x?xf32>, %offt0
// FOREACH-DAG: %[[c3:.+]] = arith.constant 3 : index
// FOREACH-DAG: %[[c4:.+]] = arith.constant 4 : index
// FOREACH-DAG: %[[c11:.+]] = arith.constant 11 : index
// FOREACH: %[[init:.+]] = linalg.init_tensor [%[[sz1]], %[[sz2]]] : tensor<?x?xf32>
// FOREACH: %[[init:.+]] = tensor.empty(%[[sz1]], %[[sz2]]) : tensor<?x?xf32>
// FOREACH-DAG: %[[d1:.+]] = tensor.dim %[[arg0]], %[[c1]] :
// FOREACH-DAG: %[[d2:.+]] = tensor.dim %[[arg0]], %[[c2]] :
// FOREACH-DAG: %[[d4:.+]] = tensor.dim %[[arg0]], %[[c4]] :
Expand Down Expand Up @@ -170,7 +170,7 @@ func.func @no_sliced_linearized_dims(%input: tensor<30x11x100xf32>, %offt: index
%collapsed = tensor.collapse_shape %input [[0, 1], [2]] : tensor<30x11x100xf32> into tensor<330x100xf32>
%slice = tensor.extract_slice %collapsed [0, %offt] [330, %size] [1, 1] : tensor<330x100xf32> to tensor<330x?xf32>
// CHECK-NOT: scf.for
// CHECK: %[[init:.+]] = linalg.init_tensor [330, %[[arg2]]]
// CHECK: %[[init:.+]] = tensor.empty(%[[arg2]])
// CHECK: %[[e:.+]] = tensor.extract_slice %[[arg0]][0, 0, %[[arg1]]] [30, 11, %[[arg2]]] [1, 1, 1]
// CHECK: %[[c:.+]] = tensor.collapse_shape %[[e]] {{\[}}[0, 1], [2]]
// CHECK: %[[res:.+]] = tensor.insert_slice %[[c]] into %[[init]]
Expand Down
8 changes: 8 additions & 0 deletions mlir/test/Dialect/Tensor/invalid.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -514,3 +514,11 @@ func.func @scatter_wrong_result_type(
(tensor<f32>, tensor<4x5x6xf32>, tensor<1x2x3xindex>) -> tensor<1x2x1xf32>
return
}

// -----

func.func @empty_wrong_number_of_operands(%sz : index) {
// expected-error@+1 {{incorrect number of dynamic sizes, has 1, expected 2}}
%out = tensor.empty(%sz) : tensor<2x?x?x5xf32>
return
}
8 changes: 8 additions & 0 deletions mlir/test/Dialect/Tensor/ops.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,14 @@ func.func @cast(%arg0: tensor<*xf32>, %arg1 : tensor<4x4xf32>, %arg2: tensor<?x?
return
}

// CHECK-LABEL: func @empty(
// CHECK-SAME: %[[sz:.*]]: index
func.func @empty(%sz: index) -> tensor<5x?x6xf32> {
// CHECK: tensor.empty(%[[sz]]) : tensor<5x?x6xf32>
%0 = tensor.empty(%sz) : tensor<5x?x6xf32>
return %0 : tensor<5x?x6xf32>
}

// CHECK-LABEL: func @extract(
// CHECK-SAME: %[[TENSOR:.*]]: tensor<?x?x?xf32>,
// CHECK-SAME: %[[INDEX:.*]]: index) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// RUN: mlir-opt %s -test-linalg-transform-patterns=test-linalg-to-vector-patterns \
// RUN: -linalg-init-tensor-to-alloc-tensor -linalg-bufferize -arith-bufferize \
// RUN: -empty-tensor-to-alloc-tensor -linalg-bufferize -arith-bufferize \
// RUN: -bufferization-bufferize -tensor-bufferize -func-bufferize \
// RUN: -finalizing-bufferize -buffer-deallocation \
// RUN: -convert-linalg-to-loops -convert-scf-to-cf -convert-linalg-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
from mlir.dialects import func
from mlir.dialects import linalg
from mlir.dialects import sparse_tensor
from mlir.dialects import tensor
from mlir.dialects.linalg.opdsl import lang

from . import mlir_pytaco_utils as utils
Expand Down Expand Up @@ -899,9 +900,9 @@ def emit_tensor_init(self) -> ir.RankedTensorType:
if self.dst_format is None or self.dst_format.rank() == 0:
# Initialize the dense tensor.
ir_type = _mlir_type_from_taco_type(self.dst_dtype)
tensor = linalg.InitTensorOp(self.dst_dims, ir_type).result
empty = tensor.EmptyOp(self.dst_dims, ir_type).result
zero = arith.ConstantOp(ir_type, 0.0)
return linalg.fill(zero, outs=[tensor])
return linalg.fill(zero, outs=[empty])

# Initialize the sparse tensor.
mlir_type = _mlir_tensor_type(self.dst_dtype, self.dst_dims,
Expand Down Expand Up @@ -1194,12 +1195,12 @@ def from_array(array: np.ndarray) -> "Tensor":
"""
if array.dtype != np.float32 and array.dtype != np.float64:
raise ValueError(f"Expected floating point value type: {array.dtype}.")
tensor = Tensor(
t = Tensor(
array.shape,
dtype=_nptype_to_taco_type(array.dtype.type),
is_dense=True)
tensor._dense_storage = np.copy(array)
return tensor
t._dense_storage = np.copy(array)
return t

@staticmethod
def from_coo(
Expand Down Expand Up @@ -1234,9 +1235,9 @@ def from_coo(
# The size of each dimension is one more that such a maximum coordinate
# value.
shape = [c + 1 for c in max_coordinate]
tensor = Tensor(shape, fmt, dtype=dtype)
tensor._coords = coordinates
tensor._values = values
t = Tensor(shape, fmt, dtype=dtype)
t._coords = coordinates
t._values = values

return tensor

Expand All @@ -1261,10 +1262,10 @@ def from_file(
sparse_tensor, shape = utils.create_sparse_tensor(filename,
fmt.format_pack.formats,
_dtype_to_mlir_str(dtype))
tensor = Tensor(shape.tolist(), fmt, dtype=dtype)
tensor._set_packed_sparse_tensor(sparse_tensor)
t = Tensor(shape.tolist(), fmt, dtype=dtype)
t._set_packed_sparse_tensor(sparse_tensor)

return tensor
return t

def to_file(self, filename: str) -> None:
"""Output the tensor value to a file.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ func.func @gemm_fill_fusion(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>) ->
%cst = arith.constant 0.0 : f32
%d0 = tensor.dim %arg0, %c0 : tensor<?x?xf32>
%d1 = tensor.dim %arg1, %c1 : tensor<?x?xf32>
%init = linalg.init_tensor [%d0, %d1] : tensor<?x?xf32>
%init = tensor.empty(%d0, %d1) : tensor<?x?xf32>
%fill = linalg.fill ins(%cst : f32) outs(%init : tensor<?x?xf32>) -> tensor<?x?xf32>
%gemm = linalg.matmul {__internal_linalg_transform__ = "fusion"}
ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>)
Expand All @@ -16,7 +16,7 @@ func.func @gemm_fill_fusion(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>) ->
// CHECK: func.func @gemm_fill_fusion(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: tensor<?x?xf32>)
// CHECK: %[[INIT:.+]] = linalg.init_tensor
// CHECK: %[[INIT:.+]] = tensor.empty
// CHECK: scf.for %[[IV0:[a-zA-Z0-9]+]] =
// CHECK-SAME: iter_args(%[[ITERARG0:.+]] = %[[INIT]])
// CHECK: scf.for %[[IV1:[a-zA-Z0-9]+]] =
Expand All @@ -41,7 +41,7 @@ func.func @gemm_generic_fusion(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
%cst = arith.constant 0.0 : f32
%d0 = tensor.dim %arg0, %c0 : tensor<?x?xf32>
%d1 = tensor.dim %arg1, %c1 : tensor<?x?xf32>
%init = linalg.init_tensor [%d0, %d1] : tensor<?x?xf32>
%init = tensor.empty(%d0, %d1) : tensor<?x?xf32>
%fill = linalg.fill ins(%cst : f32) outs(%init : tensor<?x?xf32>) -> tensor<?x?xf32>
%gemm = linalg.matmul
ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>)
Expand All @@ -61,7 +61,7 @@ func.func @gemm_generic_fusion(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: tensor<?x?xf32>,
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: tensor<?xf32>)
// CHECK: %[[INIT:.+]] = linalg.init_tensor
// CHECK: %[[INIT:.+]] = tensor.empty
// CHECK: scf.for %[[IV0:[a-zA-Z0-9]+]] =
// CHECK-SAME: iter_args(%[[ITERARG0:.+]] = %[[INIT]])
// CHECK: scf.for %[[IV1:[a-zA-Z0-9]+]] =
Expand Down Expand Up @@ -90,12 +90,12 @@ func.func @gemm_gemm_fusion(%lhs0 : tensor<?x?xf32>, %rhs0 : tensor<?x?xf32>, %r
%cst = arith.constant 0.0 : f32
%d0 = tensor.dim %lhs0, %c0 : tensor<?x?xf32>
%d1 = tensor.dim %rhs0, %c1 : tensor<?x?xf32>
%init0 = linalg.init_tensor [%d0, %d1] : tensor<?x?xf32>
%init0 = tensor.empty(%d0, %d1) : tensor<?x?xf32>
%fill0 = linalg.fill ins(%cst : f32) outs(%init0 : tensor<?x?xf32>) -> tensor<?x?xf32>
%gemm0 = linalg.matmul
ins(%lhs0, %rhs0 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%fill0 : tensor<?x?xf32>) -> tensor<?x?xf32>
%d2 = tensor.dim %rhs1, %c1 : tensor<?x?xf32>
%init1 = linalg.init_tensor [%d0, %d2] : tensor<?x?xf32>
%init1 = tensor.empty(%d0, %d2) : tensor<?x?xf32>
%fill1 = linalg.fill ins(%cst : f32) outs(%init1 : tensor<?x?xf32>) -> tensor<?x?xf32>
%gemm1 = linalg.matmul {__internal_linalg_transform__ = "gemm_fusion"}
ins(%gemm0, %rhs1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%fill1 : tensor<?x?xf32>) -> tensor<?x?xf32>
Expand All @@ -109,9 +109,9 @@ func.func @gemm_gemm_fusion(%lhs0 : tensor<?x?xf32>, %rhs0 : tensor<?x?xf32>, %r
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
// CHECK-DAG: %[[D0:.+]] = tensor.dim %[[LHS0]], %[[C0]]
// CHECK-DAG: %[[D1:.+]] = tensor.dim %[[RHS0]], %[[C1]]
// CHECK-DAG: %[[INIT0:.+]] = linalg.init_tensor [%[[D0]], %[[D1]]]
// CHECK-DAG: %[[INIT0:.+]] = tensor.empty(%[[D0]], %[[D1]])
// CHECK-DAG: %[[D2:.+]] = tensor.dim %[[RHS1]], %[[C1]]
// CHECK: %[[INIT1:.+]] = linalg.init_tensor [%[[D0]], %[[D2]]]
// CHECK: %[[INIT1:.+]] = tensor.empty(%[[D0]], %[[D2]])
// CHECK: scf.for %[[IV:[a-zA-Z0-9]+]] =
// CHECK-SAME: iter_args(%[[ITERARG:.+]] = %[[INIT1]])
// CHECK-DAG: %[[LHS0_TILE:.+]] = tensor.extract_slice %[[LHS0]][%[[IV]], 0]
Expand Down Expand Up @@ -140,12 +140,12 @@ func.func @gemm_transpose_fusion(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32
%cst = arith.constant 0.0 : f32
%d0 = tensor.dim %arg0, %c0 : tensor<?x?xf32>
%d1 = tensor.dim %arg1, %c1 : tensor<?x?xf32>
%init0 = linalg.init_tensor [%d0, %d1] : tensor<?x?xf32>
%init0 = tensor.empty(%d0, %d1) : tensor<?x?xf32>
%fill = linalg.fill ins(%cst : f32) outs(%init0 : tensor<?x?xf32>) -> tensor<?x?xf32>
%gemm = linalg.matmul
ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>)
outs(%fill : tensor<?x?xf32>) -> tensor<?x?xf32>
%init1 = linalg.init_tensor [%d1, %d0] : tensor<?x?xf32>
%init1 = tensor.empty(%d1, %d0) : tensor<?x?xf32>
%transpose = linalg.generic {
__internal_linalg_transform__ = "fusion",
indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d1, d0)>],
Expand All @@ -163,8 +163,8 @@ func.func @gemm_transpose_fusion(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
// CHECK-DAG: %[[D0:.+]] = tensor.dim %[[ARG0]], %[[C0]]
// CHECK-DAG: %[[D1:.+]] = tensor.dim %[[ARG1]], %[[C1]]
// CHECK-DAG: %[[INIT0:.+]] = linalg.init_tensor [%[[D0]], %[[D1]]]
// CHECK-DAG: %[[INIT1:.+]] = linalg.init_tensor [%[[D1]], %[[D0]]]
// CHECK-DAG: %[[INIT0:.+]] = tensor.empty(%[[D0]], %[[D1]])
// CHECK-DAG: %[[INIT1:.+]] = tensor.empty(%[[D1]], %[[D0]])
// CHECK: scf.for %[[IV0:[a-zA-Z0-9]+]] =
// CHECK-SAME: iter_args(%[[ITERARG0:.+]] = %[[INIT1]])
// CHECK: scf.for %[[IV1:[a-zA-Z0-9]+]] =
Expand Down Expand Up @@ -192,7 +192,7 @@ func.func @interchange_matmul_fusion(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?
%d0 = tensor.dim %arg0, %c0 : tensor<?x?xf32>
%d1 = tensor.dim %arg1, %c1 : tensor<?x?xf32>
%cst = arith.constant 0.0 : f32
%0 = linalg.init_tensor [%d0, %d1] : tensor<?x?xf32>
%0 = tensor.empty(%d0, %d1) : tensor<?x?xf32>
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<?x?xf32>) -> tensor<?x?xf32>
%2 = linalg.matmul
ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>)
Expand All @@ -211,7 +211,7 @@ func.func @interchange_matmul_fusion(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?
// CHECK: func.func @interchange_matmul_fusion(
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<?x?xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: tensor<?x?xf32>)
// CHECK: %[[INIT:.+]] = linalg.init_tensor
// CHECK: %[[INIT:.+]] = tensor.empty
// CHECK: scf.for %[[IV0:[a-zA-Z0-9]+]] =
// CHECK-SAME: iter_args(%[[ITERARG0:.+]] = %[[INIT]])
// CHECK: scf.for %[[IV1:[a-zA-Z0-9]+]] =
Expand Down Expand Up @@ -243,7 +243,7 @@ func.func @matmul_plus_matmul(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>,
outs(%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
%3 = tensor.dim %2, %c0 : tensor<?x?xf32>
%4 = tensor.dim %2, %c1 : tensor<?x?xf32>
%5 = linalg.init_tensor [%3, %4] : tensor<?x?xf32>
%5 = tensor.empty(%3, %4) : tensor<?x?xf32>
%6 = linalg.generic
{indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
affine_map<(d0, d1) -> (d0, d1)>,
Expand Down Expand Up @@ -302,7 +302,7 @@ func.func @matmul_plus_transpose_matmul(%arg0: tensor<?x?xf32>, %arg1: tensor<?x
outs(%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
%3 = tensor.dim %2, %c0 : tensor<?x?xf32>
%4 = tensor.dim %2, %c1 : tensor<?x?xf32>
%5 = linalg.init_tensor [%3, %4] : tensor<?x?xf32>
%5 = tensor.empty(%3, %4) : tensor<?x?xf32>
%6 = linalg.generic
{indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
affine_map<(d0, d1) -> (d1, d0)>,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ func.func @simple_matmul_memref(%arg0 : memref<?x?xf32>, %arg1 : memref<?x?xf32>
#map1 = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
#map2 = affine_map<(d0, d1, d2) -> (d2, d0, d1)>
func.func @multi_result(%arg0 : tensor<128x200x300xf32>) -> (tensor<128x300x200xf32>, tensor<300x128x200xf32>) {
%init0 = linalg.init_tensor [128, 300, 200] : tensor<128x300x200xf32>
%init1 = linalg.init_tensor [300, 128, 200] : tensor<300x128x200xf32>
%init0 = tensor.empty() : tensor<128x300x200xf32>
%init1 = tensor.empty() : tensor<300x128x200xf32>
%0:2 = linalg.generic {
indexing_maps = [#map0, #map1, #map2],
iterator_types = ["parallel", "parallel", "parallel"]}
Expand All @@ -108,8 +108,8 @@ func.func @multi_result(%arg0 : tensor<128x200x300xf32>) -> (tensor<128x300x200x
// CHECK-DAG: %[[C20:.+]] = arith.constant 20 : index
// CHECK-DAG: %[[C128:.+]] = arith.constant 128 : index
// CHECK-DAG: %[[C300:.+]] = arith.constant 300 : index
// CHECK-DAG: %[[INIT0:.+]] = linalg.init_tensor [128, 300, 200]
// CHECK-DAG: %[[INIT1:.+]] = linalg.init_tensor [300, 128, 200]
// CHECK-DAG: %[[INIT0:.+]] = tensor.empty()
// CHECK-DAG: %[[INIT1:.+]] = tensor.empty()
// CHECK: %[[OUTER:[a-zA-Z0-9]+]]:2 = scf.for %[[IV0:[a-zA-Z0-9]+]] = %[[C0]] to %[[C128]] step %[[C10]]
// CHECK-SAME: iter_args(%[[ARG1:[a-zA-Z0-9]+]] = %[[INIT0]], %[[ARG2:[a-zA-Z0-9]+]] = %[[INIT1]])
// CHECK: %[[TS_Y:.+]] = affine.min #[[MAP0]](%[[IV0]])[%[[C10]], %[[C128]]]
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -136,8 +136,8 @@ struct RewriteExtractSliceFromCollapseShapeBase
// Create the destination tensor using the above values.
Type elementType = op.getSourceType().getElementType();
SmallVector<OpFoldResult> outputShape = getAsOpFoldResult(reifiedShapes[0]);
Value dest = rewriter.create<linalg::InitTensorOp>(
op->getLoc(), outputShape, elementType);
Value dest = rewriter.create<tensor::EmptyOp>(op->getLoc(), outputShape,
elementType);

// Calculate the parameters for the tile loop nest.
FailureOr<tensor::ExtractSliceFromCollapseHelper> params =
Expand Down
5 changes: 3 additions & 2 deletions mlir/test/python/dialects/linalg/opdsl/emit_matmul.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from mlir.dialects import builtin
from mlir.dialects import func
from mlir.dialects import linalg
from mlir.dialects import tensor

from mlir.dialects.linalg.opdsl.lang import *

Expand Down Expand Up @@ -50,7 +51,7 @@ def matmul_poly(
# CHECK-LABEL: func @test_matmul_mono
# CHECK-SAME: %[[A:.+]]: tensor<4x16xf32>
# CHECK-SAME: %[[B:.+]]: tensor<16x8xf32>
# CHECK: %[[INITC:.+]] = linalg.init_tensor [4, 8] : tensor<4x8xf32>
# CHECK: %[[INITC:.+]] = tensor.empty() : tensor<4x8xf32>
# CHECK: linalg.generic
# CHECK-SAME: indexing_maps = [#[[$MUL_MAP_A]], #[[$MUL_MAP_B]], #[[$MUL_MAP_C]]]
# CHECK-SAME: iterator_types = ["parallel", "parallel", "reduction"]
Expand All @@ -59,7 +60,7 @@ def matmul_poly(
@func.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f32), RankedTensorType.get((16, 8), f32))
def test_matmul_mono(lhs, rhs):
init_result = linalg.InitTensorOp([4, 8], f32)
init_result = tensor.EmptyOp([4, 8], f32)
return matmul_mono(lhs, rhs, outs=[init_result.result])

# CHECK-LABEL: @test_i8i8i32_matmul
Expand Down
52 changes: 6 additions & 46 deletions mlir/test/python/dialects/linalg/ops.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# RUN: %PYTHON %s | FileCheck %s

from mlir.dialects import arith, builtin, func, linalg
from mlir.dialects import arith, builtin, func, linalg, tensor
from mlir.dialects.linalg.opdsl.lang import *
from mlir.ir import *

Expand All @@ -11,46 +11,6 @@ def run(f):
return f


# CHECK-LABEL: TEST: testInitTensor
@run
def testInitTensor():
with Context() as ctx, Location.unknown():
module = Module.create()
f32 = F32Type.get()
with InsertionPoint(module.body):
# CHECK-LABEL: func @static_sizes
# CHECK: %0 = linalg.init_tensor [3, 4] : tensor<3x4xf32>
@func.FuncOp.from_py_func()
def static_sizes():
return linalg.InitTensorOp([3, 4], f32)

# CHECK-LABEL: func @dynamic_sizes
# CHECK: %0 = linalg.init_tensor [%arg0, %arg1] : tensor<?x?xf32>
@func.FuncOp.from_py_func(IndexType.get(), IndexType.get())
def dynamic_sizes(d0, d1):
return linalg.InitTensorOp([d0, d1], f32)

# CHECK-LABEL: func @zero_d
# CHECK: %0 = linalg.init_tensor [] : tensor<f32>
@func.FuncOp.from_py_func()
def zero_d():
return linalg.InitTensorOp([], f32)

print(module)


# CHECK-LABEL: TEST: testInitTensorStaticSizesAttribute
@run
def testInitTensorStaticSizesAttribute():
with Context() as ctx, Location.unknown():
module = Module.create()
f32 = F32Type.get()
with InsertionPoint(module.body):
op = linalg.InitTensorOp([3, 4], f32)
# CHECK: [3, 4]
print(op.attributes["static_sizes"])


# CHECK-LABEL: TEST: testFill
@run
def testFill():
Expand Down Expand Up @@ -92,7 +52,7 @@ def testNamedStructuredOpCustomForm():
@func.FuncOp.from_py_func(
RankedTensorType.get((4, 8), f32), RankedTensorType.get((4, 8), f32))
def named_form(lhs, rhs):
init_result = linalg.InitTensorOp([4, 8], f32)
init_result = tensor.EmptyOp([4, 8], f32)
# Check for the named form with custom format
# CHECK: linalg.elemwise_unary
# CHECK-SAME: cast = #linalg.type_fn<cast_signed>
Expand Down Expand Up @@ -127,7 +87,7 @@ def testNamedStructuredOpGenericForm():
RankedTensorType.get((4, 16), f32), RankedTensorType.get((16, 8),
f32))
def named_form(lhs, rhs):
init_result = linalg.InitTensorOp([4, 8], f32)
init_result = tensor.EmptyOp([4, 8], f32)
# CHECK: "linalg.matmul"(%{{.*}})
# CHECK-NEXT: ^bb0(%{{.*}}: f32, %{{.*}}: f32, %{{.*}}: f32):
# CHECK-NEXT: arith.mulf{{.*}} (f32, f32) -> f32
Expand All @@ -153,7 +113,7 @@ def testNamedStructuredAsGenericOp():
RankedTensorType.get((4, 16), f32), RankedTensorType.get((16, 8),
f32))
def generic_form(lhs, rhs):
init_result = linalg.InitTensorOp([4, 8], f32)
init_result = tensor.EmptyOp([4, 8], f32)
# CHECK: linalg.generic
return linalg.matmul(
lhs, rhs, outs=[init_result.result], emit_generic=True)
Expand All @@ -178,8 +138,8 @@ def pass_an_op_directly(arg0, arg1):
lhs = linalg.fill(one, outs=[arg0])
# CHECK: %[[RHS:.*]] = linalg.fill
rhs = linalg.fill(one, outs=[arg1])
# CHECK: %[[INIT:.*]] = linalg.init_tensor
init = linalg.InitTensorOp([4, 8], f32)
# CHECK: %[[INIT:.*]] = tensor.empty
init = tensor.EmptyOp([4, 8], f32)
# CHECK: linalg.matmul
# CHECK: ins(%[[LHS]], %[[RHS]]
# CHECK: outs(%[[INIT]]
Expand Down
34 changes: 34 additions & 0 deletions mlir/test/python/dialects/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,37 @@ def tensor_static_dim(t):
return [d0.result, d1.result]

print(module)


# CHECK-LABEL: TEST: testEmptyOp
@run
def testEmptyOp():
with Context() as ctx, Location.unknown():
module = Module.create()
f32 = F32Type.get()
with InsertionPoint(module.body):
# CHECK-LABEL: func @static_sizes
# CHECK: %0 = tensor.empty() : tensor<3x4xf32>
@func.FuncOp.from_py_func()
def static_sizes():
return tensor.EmptyOp([3, 4], f32)

# CHECK-LABEL: func @dynamic_sizes
# CHECK: %0 = tensor.empty(%arg0, %arg1) : tensor<?x?xf32>
@func.FuncOp.from_py_func(IndexType.get(), IndexType.get())
def dynamic_sizes(d0, d1):
return tensor.EmptyOp([d0, d1], f32)

# CHECK-LABEL: func @mixed_static_dynamic_sizes
# CHECK: %0 = tensor.empty(%arg0) : tensor<?x4xf32>
@func.FuncOp.from_py_func(IndexType.get())
def mixed_static_dynamic_sizes(d0):
return tensor.EmptyOp([d0, 4], f32)

# CHECK-LABEL: func @zero_d
# CHECK: %0 = tensor.empty() : tensor<f32>
@func.FuncOp.from_py_func()
def zero_d():
return tensor.EmptyOp([], f32)

print(module)
1 change: 1 addition & 0 deletions utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -5034,6 +5034,7 @@ cc_library(
hdrs = ["include/mlir/Dialect/Tensor/IR/Tensor.h"],
includes = ["include"],
deps = [
":AffineDialect",
":ArithDialect",
":ArithUtils",
":CastOpInterfaces",
Expand Down