44 changes: 22 additions & 22 deletions mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -21,19 +21,19 @@ func @drop_one_trip_loops(%arg0 : tensor<?x1x?xf32>) -> tensor<?x1x?x1x?xf32>
} : tensor<?x1x?xf32> -> tensor<?x1x?x1x?xf32>
return %0 : tensor<?x1x?x1x?xf32>
}
// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1, d2) -> (d2)>
// CHECK-DAG: #[[MAP2:.*]] = affine_map<(d0, d1, d2) -> (d0, d2)>
// CHECK-DAG: #[[MAP3:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
// CHECK-DAG: #[[MAP4:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>
// CHECK-DAG: #[[MAP5:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2, d3)>
// CHECK-DAG: #[[MAP6:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d4)>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (d2)>
// CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1, d2) -> (d0, d2)>
// CHECK-DAG: #[[$MAP3:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
// CHECK-DAG: #[[$MAP4:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)>
// CHECK-DAG: #[[$MAP5:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2, d3)>
// CHECK-DAG: #[[$MAP6:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d4)>
// CHECK-LABEL: func @drop_one_trip_loops
// CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]], #[[MAP1]]]
// CHECK: linalg.tensor_reshape %{{.*}} [#[[$MAP0]], #[[$MAP1]]]
// CHECK: linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP2]], #[[MAP3]]]
// CHECK-SAME: indexing_maps = [#[[$MAP2]], #[[$MAP3]]]
// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel"]
// CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP4]], #[[MAP5]], #[[MAP6]]]
// CHECK: linalg.tensor_reshape %{{.*}} [#[[$MAP4]], #[[$MAP5]], #[[$MAP6]]]

// -----

Expand All @@ -55,11 +55,11 @@ func @drop_all_loops(%arg0 : tensor<1x1xf32>) -> tensor<1x1xf32>
} : tensor<1x1xf32> -> tensor<1x1xf32>
return %0 : tensor<1x1xf32>
}
// CHECK-DAG: #[[MAP0:.*]] = affine_map<() -> ()>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<() -> ()>
// CHECK-LABEL: func @drop_all_loops
// CHECK: linalg.tensor_reshape %{{.*}} []
// CHECK: linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP0]]]
// CHECK-SAME: indexing_maps = [#[[$MAP0]], #[[$MAP0]]]
// CHECK-SAME: iterator_types = []

// -----
Expand All @@ -84,11 +84,11 @@ func @leading_dim_1_canonicalization(%arg0: tensor<1x5xf32>) -> tensor<5xf32> {
} : tensor<1x5xf32> -> tensor<5xf32>
return %0 : tensor<5xf32>
}
// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @leading_dim_1_canonicalization
// CHECK: linalg.tensor_reshape %{{.*}} [#[[MAP0]]]
// CHECK: linalg.tensor_reshape %{{.*}} [#[[$MAP0]]]
// CHECK: linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP1]], #[[MAP1]]]
// CHECK-SAME: indexing_maps = [#[[$MAP1]], #[[$MAP1]]]
// CHECK-SAME: iterator_types = ["parallel"]

// -----
Expand Down Expand Up @@ -120,13 +120,13 @@ func @broadcast_test(%arg0 : tensor<5xf32>, %arg1 : tensor<5xf32>) -> tensor<5x5
} : tensor<1x5xf32>, tensor<5x1xf32> -> tensor<5x5xf32>
return %2 : tensor<5x5xf32>
}
// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d1)>
// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1) -> (d0)>
// CHECK-DAG: #[[MAP2:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d1)>
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d0)>
// CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @broadcast_test
// CHECK-NOT: linalg.tensor_reshape
// CHECK: linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]], #[[MAP2]]]
// CHECK-SAME: indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP2]]]
// CHECK-SAME: iterator_types = ["parallel", "parallel"]
// CHECK-NOT: linalg.tensor_reshape

Expand All @@ -153,13 +153,13 @@ func @broadcast_scalar(%arg0 : tensor<1x1xf32>) -> tensor<?x?xf32>
} : tensor<1x1xf32> -> tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}
// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> ()>
// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> ()>
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @broadcast_scalar
// CHECK-SAME: %[[ARG0:.*]]: tensor<1x1xf32>
// CHECK: %[[A:.*]] = linalg.tensor_reshape %[[ARG0]] []
// CHECK-SAME: tensor<1x1xf32> into tensor<f32>
// CHECK: linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]]]
// CHECK-SAME: indexing_maps = [#[[$MAP0]], #[[$MAP1]]]
// CHECK-SAME: iterator_types = ["parallel", "parallel"]
// CHECK-SAME: %[[A]]
20 changes: 10 additions & 10 deletions mlir/test/Dialect/Linalg/fold-unit-trip-loops.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,11 @@ func @drop_one_trip_loops(%arg0 : tensor<?x1x?xf32>) -> tensor<?x1x?x1x?xf32>
} : tensor<?x1x?xf32> -> tensor<?x1x?x1x?xf32>
return %0 : tensor<?x1x?x1x?xf32>
}
// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, 0, d2)>
// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1, d2) -> (d0, 0, d1, 0, d2)>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, 0, d2)>
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (d0, 0, d1, 0, d2)>
// CHECK-LABEL: func @drop_one_trip_loops
// CHECK: linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]]]
// CHECK-SAME: indexing_maps = [#[[$MAP0]], #[[$MAP1]]]
// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel"]

// -----
Expand All @@ -48,10 +48,10 @@ func @drop_all_loops(%arg0 : tensor<1x1xf32>) -> tensor<1x1xf32>
} : tensor<1x1xf32> -> tensor<1x1xf32>
return %0 : tensor<1x1xf32>
}
// CHECK-DAG: #[[MAP0:.*]] = affine_map<() -> (0, 0)>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<() -> (0, 0)>
// CHECK-LABEL: func @drop_all_loops
// CHECK: linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP0]]]
// CHECK-SAME: indexing_maps = [#[[$MAP0]], #[[$MAP0]]]
// CHECK-SAME: iterator_types = []

// -----
Expand All @@ -74,10 +74,10 @@ func @drop_all_loops(%arg0 : memref<1x1xf32>, %arg1 : memref<1x1xf32>)
} : memref<1x1xf32>, memref<1x1xf32>
return
}
// CHECK-DAG: #[[MAP0:.*]] = affine_map<() -> (0, 0)>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<() -> (0, 0)>
// CHECK-LABEL: func @drop_all_loops
// CHECK: linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP0]]]
// CHECK-SAME: indexing_maps = [#[[$MAP0]], #[[$MAP0]]]
// CHECK-SAME: iterator_types = []

// -----
Expand All @@ -102,9 +102,9 @@ func @leading_dim_1_canonicalization(%arg0: tensor<1x5xf32>) -> tensor<5xf32> {
} : tensor<1x5xf32> -> tensor<5xf32>
return %0 : tensor<5xf32>
}
// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0) -> (0, d0)>
// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0) -> (d0)>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0) -> (0, d0)>
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0) -> (d0)>
// CHECK-LABEL: func @leading_dim_1_canonicalization
// CHECK: linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]]]
// CHECK-SAME: indexing_maps = [#[[$MAP0]], #[[$MAP1]]]
// CHECK-SAME: iterator_types = ["parallel"]
64 changes: 32 additions & 32 deletions mlir/test/Dialect/Linalg/fusion-tensor.mlir
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// RUN: mlir-opt %s -linalg-fusion-for-tensor-ops -split-input-file | FileCheck %s

// CHECK-DAG: [[MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-DAG: [[$MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
#map0 = affine_map<(d0, d1) -> (d0, d1)>

// CHECK-LABEL: @add_mul_fusion
Expand All @@ -12,7 +12,7 @@ func @add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : te
linalg.yield %1 : f32
}: tensor<?x?xf32>, tensor<?x?xf32> -> tensor<?x?xf32>
// CHECK: linalg.generic {args_in = 3 : i64, args_out = 1 : i64
// CHECK-SAME: indexing_maps = {{\[}}[[MAP0]], [[MAP0]], [[MAP0]], [[MAP0]]{{\]}}
// CHECK-SAME: indexing_maps = {{\[}}[[$MAP0]], [[$MAP0]], [[$MAP0]], [[$MAP0]]{{\]}}
%2 = linalg.generic {args_in = 2 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0, #map0], iterator_types = ["parallel", "parallel"]} %0, %arg2 {
// CHECK: ^{{[a-zA-Z0-9_]*}}
// CHECK-SAME: [[ARG0:%[a-zA-Z0-9_]*]]
Expand All @@ -31,8 +31,8 @@ func @add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : te

// -----

// CHECK-DAG: [[MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-DAG: [[MAP1:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d1, d0)>
// CHECK-DAG: [[$MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-DAG: [[$MAP1:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d1, d0)>
#map0 = affine_map<(d0, d1) -> (d0, d1)>
#map1 = affine_map<(d0, d1) -> (d1, d0)>

Expand All @@ -45,7 +45,7 @@ func @transpose_add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
linalg.yield %1 : f32
}: tensor<?x?xf32>, tensor<?x?xf32> -> tensor<?x?xf32>
// CHECK: linalg.generic {args_in = 3 : i64, args_out = 1 : i64
// CHECK-SAME: indexing_maps = {{\[}}[[MAP0]], [[MAP1]], [[MAP0]], [[MAP0]]{{\]}}
// CHECK-SAME: indexing_maps = {{\[}}[[$MAP0]], [[$MAP1]], [[$MAP0]], [[$MAP0]]{{\]}}
%2 = linalg.generic {args_in = 2 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0, #map0], iterator_types = ["parallel", "parallel"]} %0, %arg2 {
^bb0(%arg5: f32, %arg6: f32): // no predecessors
%3 = mulf %arg5, %arg6 : f32
Expand All @@ -56,8 +56,8 @@ func @transpose_add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,

// -----

// CHECK-DAG: [[MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-DAG: [[MAP1:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d1, d0)>
// CHECK-DAG: [[$MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-DAG: [[$MAP1:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d1, d0)>
#map0 = affine_map<(d0, d1) -> (d0, d1)>
#map1 = affine_map<(d0, d1) -> (d1, d0)>

Expand All @@ -70,7 +70,7 @@ func @add_transpose_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
linalg.yield %1 : f32
}: tensor<?x?xf32>, tensor<?x?xf32> -> tensor<?x?xf32>
// CHECK: linalg.generic {args_in = 3 : i64, args_out = 1 : i64
// CHECK-SAME: indexing_maps = {{\[}}[[MAP1]], [[MAP0]], [[MAP0]], [[MAP0]]{{\]}}
// CHECK-SAME: indexing_maps = {{\[}}[[$MAP1]], [[$MAP0]], [[$MAP0]], [[$MAP0]]{{\]}}
%2 = linalg.generic {args_in = 2 : i64, args_out = 1 : i64, indexing_maps = [#map1, #map0, #map0], iterator_types = ["parallel", "parallel"]} %0, %arg2 {
^bb0(%arg5: f32, %arg6: f32): // no predecessors
%3 = mulf %arg5, %arg6 : f32
Expand All @@ -81,8 +81,8 @@ func @add_transpose_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,

// -----

// CHECK-DAG: [[MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-DAG: [[MAP1:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0)>
// CHECK-DAG: [[$MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-DAG: [[$MAP1:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0)>
#map0 = affine_map<(d0, d1) -> (d0, d1)>
#map1 = affine_map<(d0, d1) -> (d0)>
#map2 = affine_map<(d0) -> (d0)>
Expand All @@ -96,7 +96,7 @@ func @add_broadcast_mul_fusion(%arg0: tensor<?xf32>, %arg1 : tensor<?xf32>, %arg
linalg.yield %1 : f32
}: tensor<?xf32>, tensor<?xf32> -> tensor<?xf32>
// CHECK: linalg.generic {args_in = 3 : i64, args_out = 1 : i64
// CHECK-SAME: indexing_maps = {{\[}}[[MAP1]], [[MAP1]], [[MAP0]], [[MAP0]]
// CHECK-SAME: indexing_maps = {{\[}}[[$MAP1]], [[$MAP1]], [[$MAP0]], [[$MAP0]]
%2 = linalg.generic {args_in = 2 : i64, args_out = 1 : i64, indexing_maps = [#map1, #map0, #map0], iterator_types = ["parallel", "parallel"]} %0, %arg2 {
^bb0(%arg5: f32, %arg6: f32): // no predecessors
%3 = mulf %arg5, %arg6 : f32
Expand All @@ -107,7 +107,7 @@ func @add_broadcast_mul_fusion(%arg0: tensor<?xf32>, %arg1 : tensor<?xf32>, %arg

// -----

// CHECK: #[[MAP0:.*]] = affine_map<() -> ()>
// CHECK: #[[$MAP0:.*]] = affine_map<() -> ()>
#map0 = affine_map<() -> ()>

// CHECK-LABEL: @add_mul_scalar_fusion
Expand All @@ -132,8 +132,8 @@ func @add_mul_scalar_fusion(%arg0: tensor<f32>, %arg1: tensor<f32>, %arg2: tenso

// -----

// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 4 + d2, d3)>
// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 4 + d2, d3)>
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>

#map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xf32>,
Expand All @@ -160,13 +160,13 @@ func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xf32>,
// CHECK: linalg.generic
// CHECK-SAME: args_in = 2
// CHECK-SAME: args_out = 1
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]], #[[MAP1]]]
// CHECK-SAME: indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP1]]]
// CHECK-NOT: linalg.generic

// -----

// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 20 + d2 * 5 + d3)>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 20 + d2 * 5 + d3)>

#map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xf32>,
Expand All @@ -192,7 +192,7 @@ func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xf32>,
// CHECK: linalg.generic
// CHECK-SAME: args_in = 2
// CHECK-SAME: args_out = 1
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP0]], #[[MAP1]]]
// CHECK-SAME: indexing_maps = [#[[$MAP0]], #[[$MAP0]], #[[$MAP1]]]
// CHECK-NOT: linalg.generic

// -----
Expand Down Expand Up @@ -238,7 +238,7 @@ func @generic_op_constant_fusion(%arg0 : tensor<5x?x?xf32>) -> tensor<5x?x?xf32>
}: tensor<5xf32>, tensor<5x?x?xf32> -> tensor<5x?x?xf32>
return %1 : tensor<5x?x?xf32>
}
// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
// CHECK-LABEL: func @generic_op_constant_fusion
// CHECK: %[[CST:.*]] = constant {{.*}} : f32
// CHECK: linalg.generic
Expand Down Expand Up @@ -266,7 +266,7 @@ func @generic_op_zero_dim_constant_fusion(%arg0 : tensor<5x?x?xf32>)
}: tensor<f32>, tensor<5x?x?xf32> -> tensor<5x?x?xf32>
return %1 : tensor<5x?x?xf32>
}
// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
// CHECK-LABEL: func @generic_op_zero_dim_constant_fusion
// CHECK: %[[CST:.*]] = constant {{.*}} : f32
// CHECK: linalg.generic
Expand Down Expand Up @@ -303,13 +303,13 @@ func @generic_op_indexed_generic_op_fusion(%arg0: tensor<?x?xi32>,
}: tensor<?x?xi32> -> tensor<?x?xi32>
return
}
// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @generic_op_indexed_generic_op_fusion
// CHECK-NOT: linalg.generic
// CHECK: linalg.indexed_generic
// CHECK-SAME: args_in = 2
// CHECK-SAME: args_out = 1
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP0]], #[[MAP0]]]
// CHECK-SAME: indexing_maps = [#[[$MAP0]], #[[$MAP0]], #[[$MAP0]]]
// CHECK: ^{{[a-zA-Z0-9_]*}}
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: index
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: index
Expand Down Expand Up @@ -350,12 +350,12 @@ func @indexed_generic_op_generic_op_fusion(%arg0: tensor<?x?xi32>,
} : tensor<?x?xi32>, tensor<?x?xi32> -> tensor<?x?xi32>
return
}
// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @indexed_generic_op_generic_op_fusion
// CHECK: linalg.indexed_generic
// CHECK-SAME: args_in = 2
// CHECK-SAME: args_out = 1
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP0]], #[[MAP0]]]
// CHECK-SAME: indexing_maps = [#[[$MAP0]], #[[$MAP0]], #[[$MAP0]]]
// CHECK: ^{{[a-zA-Z0-9_]*}}
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: index
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: index
Expand Down Expand Up @@ -401,12 +401,12 @@ func @indexed_generic_op_fusion(%arg0: tensor<?x?xi32>) {
}: tensor<?x?xi32> -> tensor<?x?xi32>
return
}
// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-LABEL: func @indexed_generic_op_fusion
// CHECK: linalg.indexed_generic
// CHECK-SAME: args_in = 1
// CHECK-SAME: args_out = 1
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP0]]]
// CHECK-SAME: indexing_maps = [#[[$MAP0]], #[[$MAP0]]]
// CHECK: ^{{[a-zA-Z0-9_]*}}
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: index
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: index
Expand All @@ -424,8 +424,8 @@ func @indexed_generic_op_fusion(%arg0: tensor<?x?xi32>) {

// -----

// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 4 + d2, d3)>
// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 4 + d2, d3)>
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>

#map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
func @indexed_generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xi32>)
Expand All @@ -452,13 +452,13 @@ func @indexed_generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xi32>)
// CHECK: linalg.indexed_generic
// CHECK-SAME: args_in = 1
// CHECK-SAME: args_out = 1
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]]]
// CHECK-SAME: indexing_maps = [#[[$MAP0]], #[[$MAP1]]]
// CHECK-NOT: linalg.tensor_reshape

// -----

// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 20 + d2 * 5 + d3)>
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1 * 20 + d2 * 5 + d3)>

#map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
func @indexed_generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xi32>)
Expand All @@ -484,5 +484,5 @@ func @indexed_generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xi32>)
// CHECK: linalg.indexed_generic
// CHECK-SAME: args_in = 1
// CHECK-SAME: args_out = 1
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]]]
// CHECK-SAME: indexing_maps = [#[[$MAP0]], #[[$MAP1]]]
// CHECK-NOT: linalg.tensor_reshape
38 changes: 19 additions & 19 deletions mlir/test/Dialect/Linalg/fusion.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ func @f1(%A: memref<?x?xf32, offset: 0, strides: [?, 1]>,

// -----

// CHECK-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s0 + d1 * s1)>
// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s0 + d1 * s1)>
func @f2(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
%B: memref<?x?xf32, offset: 0, strides: [?, ?]>,
%C: memref<?x?xf32, offset: 0, strides: [?, ?]>,
Expand Down Expand Up @@ -91,9 +91,9 @@ func @f2(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
}
// CHECK-LABEL: func @f2
// CHECK: (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}})
// CHECK-DAG: %[[C_0:.*]] = dim %[[C]], %c0{{[_0-9]*}} : memref<?x?xf32, #[[strided2D]]>
// CHECK-DAG: %[[C_1:.*]] = dim %[[C]], %c1{{[_0-9]*}} : memref<?x?xf32, #[[strided2D]]>
// CHECK-DAG: %[[D_1:.*]] = dim %[[D]], %c1{{[_0-9]*}} : memref<?x?xf32, #[[strided2D]]>
// CHECK-DAG: %[[C_0:.*]] = dim %[[C]], %c0{{[_0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
// CHECK-DAG: %[[C_1:.*]] = dim %[[C]], %c1{{[_0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
// CHECK-DAG: %[[D_1:.*]] = dim %[[D]], %c1{{[_0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
// CHECK: scf.for %{{.*}} = %{{.*}} to %[[C_0]] step %{{.*}} {
// CHECK: scf.for %{{.*}} = %{{.*}} to %[[D_1]] step %{{.*}} {
// CHECK: scf.for %{{.*}} = %{{.*}} to %[[C_1]] step %{{.*}} {
Expand Down Expand Up @@ -143,9 +143,9 @@ func @f3(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
}
// CHECK-LABEL: func @f3
// CHECK: (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}})
// CHECK: %[[D_0:.*]] = dim %[[D]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
// CHECK: %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
// CHECK: %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
// CHECK: %[[D_0:.*]] = dim %[[D]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
// CHECK: %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
// CHECK: %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
// CHECK: scf.for %{{.*}} = %{{.*}} to %[[D_0]] step %{{.*}} {
// CHECK: scf.for %{{.*}} = %{{.*}} to %[[C_1]] step %{{.*}} {
// CHECK: scf.for %{{.*}} = %{{.*}} to %[[D_1]] step %{{.*}} {
Expand Down Expand Up @@ -199,9 +199,9 @@ func @f4(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
}
// CHECK-LABEL: func @f4
// CHECK: (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}})
// CHECK: %[[C_0:.*]] = dim %[[C]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
// CHECK: %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
// CHECK: %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
// CHECK: %[[C_0:.*]] = dim %[[C]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
// CHECK: %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
// CHECK: %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
// CHECK: scf.for %{{.*}} = %{{.*}} to %[[C_0]] step %{{.*}} {
// CHECK: scf.for %{{.*}} = %{{.*}} to %[[D_1]] step %{{.*}} {
// CHECK: scf.for %{{.*}} = %{{.*}} to %[[C_1]] step %{{.*}} {
Expand All @@ -212,7 +212,7 @@ func @f4(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,

// -----

// CHECK-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s0 + d1 * s1)>
// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s0 + d1 * s1)>
func @f5(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
%B: memref<?x?xf32, offset: 0, strides: [?, ?]>,
%C: memref<?x?xf32, offset: 0, strides: [?, ?]>,
Expand Down Expand Up @@ -258,9 +258,9 @@ func @f5(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
}
// CHECK-LABEL: func @f5
// CHECK: (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}})
// CHECK-DAG: %[[B_1:.*]] = dim %[[B]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
// CHECK-DAG: %[[D_0:.*]] = dim %[[D]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
// CHECK-DAG: %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
// CHECK-DAG: %[[B_1:.*]] = dim %[[B]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
// CHECK-DAG: %[[D_0:.*]] = dim %[[D]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
// CHECK-DAG: %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
// CHECK: scf.for %[[I:.*]] = %{{.*}} to %[[D_0]] step %{{.*}} {
// CHECK: scf.for %[[J:.*]] = %{{.*}} to %[[B_1]] step %{{.*}} {
// CHECK: scf.for %[[K:.*]] = %{{.*}} to %[[D_1]] step %{{.*}} {
Expand Down Expand Up @@ -409,11 +409,11 @@ func @f7(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
}
// CHECK-LABEL: func @f7
// CHECK: (%[[A:.*]]:{{.*}}, %[[B:.*]]:{{.*}}, %[[C:.*]]:{{.*}}, %[[D:.*]]:{{.*}}, %[[E:.*]]:{{.*}})
// CHECK: %[[A_0:.*]] = dim %[[A]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
// CHECK: %[[A_1:.*]] = dim %[[A]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
// CHECK: %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
// CHECK: %[[C_0:.*]] = dim %[[C]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
// CHECK: %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[strided2D]]>
// CHECK: %[[A_0:.*]] = dim %[[A]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
// CHECK: %[[A_1:.*]] = dim %[[A]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
// CHECK: %[[C_1:.*]] = dim %[[C]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
// CHECK: %[[C_0:.*]] = dim %[[C]], %c0{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
// CHECK: %[[D_1:.*]] = dim %[[D]], %c1{{_[0-9]*}} : memref<?x?xf32, #[[$strided2D]]>
// CHECK: linalg.matmul(%[[A]], %[[C]], %[[E]])
// CHECK: scf.for %{{.*}} = %{{.*}} to %[[A_0]] step %{{.*}} {
// CHECK: scf.for %{{.*}} = %{{.*}} to %[[C_1]] step %{{.*}} {
Expand Down
256 changes: 128 additions & 128 deletions mlir/test/Dialect/Linalg/loops.mlir

Large diffs are not rendered by default.

52 changes: 26 additions & 26 deletions mlir/test/Dialect/Linalg/promote.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
#map2 = affine_map<(d0) -> (d0 + 4)>
#map3 = affine_map<(d0) -> (d0 + 3)>

// CHECK-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
// CHECK-DAG: #[[strided2D_dynamic:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
// CHECK-DAG: #[[$strided2D_dynamic:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>

func @matmul_f32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
%c4 = constant 4 : index
Expand Down Expand Up @@ -44,25 +44,25 @@ func @matmul_f32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
// CHECK: %[[tmpA:.*]] = alloc() : memref<32xi8>
// CHECK: %[[fullA:.*]] = std.view %[[tmpA]][{{.*}}][{{.*}}] : memref<32xi8> to memref<?x?xf32>
// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xf32>
// CHECK: %[[partialA:.*]] = subview %[[fullA]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, #[[strided2D_dynamic]]>
// CHECK: %[[partialA:.*]] = subview %[[fullA]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, #[[$strided2D_dynamic]]>
///
// CHECK: %[[tmpB:.*]] = alloc() : memref<48xi8>
// CHECK: %[[fullB:.*]] = std.view %[[tmpB]][{{.*}}][{{.*}}] : memref<48xi8> to memref<?x?xf32>
// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xf32>
// CHECK: %[[partialB:.*]] = subview %[[fullB]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, #[[strided2D_dynamic]]>
// CHECK: %[[partialB:.*]] = subview %[[fullB]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, #[[$strided2D_dynamic]]>
///
// CHECK: %[[tmpC:.*]] = alloc() : memref<24xi8>
// CHECK: %[[fullC:.*]] = std.view %[[tmpC]][{{.*}}][{{.*}}] : memref<24xi8> to memref<?x?xf32>
// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xf32>
// CHECK: %[[partialC:.*]] = subview %[[fullC]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, #[[strided2D_dynamic]]>
// CHECK: %[[partialC:.*]] = subview %[[fullC]]{{.*}} : memref<?x?xf32> to memref<?x?xf32, #[[$strided2D_dynamic]]>

// CHECK: linalg.copy(%[[vA]], %[[partialA]]) : memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D_dynamic]]>
// CHECK: linalg.copy(%[[vB]], %[[partialB]]) : memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D_dynamic]]>
// CHECK: linalg.copy(%[[vC]], %[[partialC]]) : memref<?x?xf32, #[[strided2D]]>, memref<?x?xf32, #[[strided2D_dynamic]]>
// CHECK: linalg.copy(%[[vA]], %[[partialA]]) : memref<?x?xf32, #[[$strided2D]]>, memref<?x?xf32, #[[$strided2D_dynamic]]>
// CHECK: linalg.copy(%[[vB]], %[[partialB]]) : memref<?x?xf32, #[[$strided2D]]>, memref<?x?xf32, #[[$strided2D_dynamic]]>
// CHECK: linalg.copy(%[[vC]], %[[partialC]]) : memref<?x?xf32, #[[$strided2D]]>, memref<?x?xf32, #[[$strided2D_dynamic]]>
//
// CHECK: linalg.matmul(%[[partialA]], %[[partialB]], %[[partialC]]) : memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?x?xf32, #[[strided2D_dynamic]]>
// CHECK: linalg.matmul(%[[partialA]], %[[partialB]], %[[partialC]]) : memref<?x?xf32, #[[$strided2D_dynamic]]>, memref<?x?xf32, #[[$strided2D_dynamic]]>, memref<?x?xf32, #[[$strided2D_dynamic]]>
//
// CHECK: linalg.copy(%[[partialC]], %[[vC]]) : memref<?x?xf32, #[[strided2D_dynamic]]>, memref<?x?xf32, #[[strided2D]]>
// CHECK: linalg.copy(%[[partialC]], %[[vC]]) : memref<?x?xf32, #[[$strided2D_dynamic]]>, memref<?x?xf32, #[[$strided2D]]>
//
// CHECK: dealloc %[[tmpA]] : memref<32xi8>
// CHECK: dealloc %[[tmpB]] : memref<48xi8>
Expand Down Expand Up @@ -106,25 +106,25 @@ func @matmul_f64(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
// CHECK: %[[tmpA_f64:.*]] = alloc() : memref<64xi8>
// CHECK: %[[fullA_f64:.*]] = std.view %[[tmpA_f64]][{{.*}}][{{.*}}] : memref<64xi8> to memref<?x?xf64>
// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xf64>
// CHECK: %[[partialA_f64:.*]] = subview %[[fullA_f64]][%{{.*}}, %{{.*}}] : memref<?x?xf64> to memref<?x?xf64, #[[strided2D_dynamic]]>
// CHECK: %[[partialA_f64:.*]] = subview %[[fullA_f64]][%{{.*}}, %{{.*}}] : memref<?x?xf64> to memref<?x?xf64, #[[$strided2D_dynamic]]>
///
// CHECK: %[[tmpB_f64:.*]] = alloc() : memref<96xi8>
// CHECK: %[[fullB_f64:.*]] = std.view %[[tmpB_f64]][{{.*}}][{{.*}}] : memref<96xi8> to memref<?x?xf64>
// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xf64>
// CHECK: %[[partialB_f64:.*]] = subview %[[fullB_f64]][%{{.*}}, %{{.*}}] : memref<?x?xf64> to memref<?x?xf64, #[[strided2D_dynamic]]>
// CHECK: %[[partialB_f64:.*]] = subview %[[fullB_f64]][%{{.*}}, %{{.*}}] : memref<?x?xf64> to memref<?x?xf64, #[[$strided2D_dynamic]]>
///
// CHECK: %[[tmpC_f64:.*]] = alloc() : memref<48xi8>
// CHECK: %[[fullC_f64:.*]] = std.view %[[tmpC_f64]][{{.*}}][{{.*}}] : memref<48xi8> to memref<?x?xf64>
// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xf64>
// CHECK: %[[partialC_f64:.*]] = subview %[[fullC_f64]][%{{.*}}, %{{.*}}] : memref<?x?xf64> to memref<?x?xf64, #[[strided2D_dynamic]]>
// CHECK: %[[partialC_f64:.*]] = subview %[[fullC_f64]][%{{.*}}, %{{.*}}] : memref<?x?xf64> to memref<?x?xf64, #[[$strided2D_dynamic]]>

// CHECK: linalg.copy(%[[vA_f64]], %[[partialA_f64]]) : memref<?x?xf64, #[[strided2D]]>, memref<?x?xf64, #[[strided2D_dynamic]]>
// CHECK: linalg.copy(%[[vB_f64]], %[[partialB_f64]]) : memref<?x?xf64, #[[strided2D]]>, memref<?x?xf64, #[[strided2D_dynamic]]>
// CHECK: linalg.copy(%[[vC_f64]], %[[partialC_f64]]) : memref<?x?xf64, #[[strided2D]]>, memref<?x?xf64, #[[strided2D_dynamic]]>
// CHECK: linalg.copy(%[[vA_f64]], %[[partialA_f64]]) : memref<?x?xf64, #[[$strided2D]]>, memref<?x?xf64, #[[$strided2D_dynamic]]>
// CHECK: linalg.copy(%[[vB_f64]], %[[partialB_f64]]) : memref<?x?xf64, #[[$strided2D]]>, memref<?x?xf64, #[[$strided2D_dynamic]]>
// CHECK: linalg.copy(%[[vC_f64]], %[[partialC_f64]]) : memref<?x?xf64, #[[$strided2D]]>, memref<?x?xf64, #[[$strided2D_dynamic]]>
//
// CHECK: linalg.matmul(%[[partialA_f64]], %[[partialB_f64]], %[[partialC_f64]]) : memref<?x?xf64, #[[strided2D_dynamic]]>, memref<?x?xf64, #[[strided2D_dynamic]]>, memref<?x?xf64, #[[strided2D_dynamic]]>
// CHECK: linalg.matmul(%[[partialA_f64]], %[[partialB_f64]], %[[partialC_f64]]) : memref<?x?xf64, #[[$strided2D_dynamic]]>, memref<?x?xf64, #[[$strided2D_dynamic]]>, memref<?x?xf64, #[[$strided2D_dynamic]]>
//
// CHECK: linalg.copy(%[[partialC_f64]], %[[vC_f64]]) : memref<?x?xf64, #[[strided2D_dynamic]]>, memref<?x?xf64, #[[strided2D]]>
// CHECK: linalg.copy(%[[partialC_f64]], %[[vC_f64]]) : memref<?x?xf64, #[[$strided2D_dynamic]]>, memref<?x?xf64, #[[$strided2D]]>
//
// CHECK: dealloc %[[tmpA_f64]] : memref<64xi8>
// CHECK: dealloc %[[tmpB_f64]] : memref<96xi8>
Expand Down Expand Up @@ -168,25 +168,25 @@ func @matmul_i32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
// CHECK: %[[tmpA_i32:.*]] = alloc() : memref<32xi8>
// CHECK: %[[fullA_i32:.*]] = std.view %[[tmpA_i32]][{{.*}}][{{.*}}] : memref<32xi8> to memref<?x?xi32>
// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xi32>
// CHECK: %[[partialA_i32:.*]] = subview %[[fullA_i32]][%{{.*}}, %{{.*}}] : memref<?x?xi32> to memref<?x?xi32, #[[strided2D_dynamic]]>
// CHECK: %[[partialA_i32:.*]] = subview %[[fullA_i32]][%{{.*}}, %{{.*}}] : memref<?x?xi32> to memref<?x?xi32, #[[$strided2D_dynamic]]>
///
// CHECK: %[[tmpB_i32:.*]] = alloc() : memref<48xi8>
// CHECK: %[[fullB_i32:.*]] = std.view %[[tmpB_i32]][{{.*}}][{{.*}}] : memref<48xi8> to memref<?x?xi32>
// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xi32>
// CHECK: %[[partialB_i32:.*]] = subview %[[fullB_i32]][%{{.*}}, %{{.*}}] : memref<?x?xi32> to memref<?x?xi32, #[[strided2D_dynamic]]>
// CHECK: %[[partialB_i32:.*]] = subview %[[fullB_i32]][%{{.*}}, %{{.*}}] : memref<?x?xi32> to memref<?x?xi32, #[[$strided2D_dynamic]]>
///
// CHECK: %[[tmpC_i32:.*]] = alloc() : memref<24xi8>
// CHECK: %[[fullC_i32:.*]] = std.view %[[tmpC_i32]][{{.*}}][{{.*}}] : memref<24xi8> to memref<?x?xi32>
// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref<?xi8> to memref<?x?xi32>
// CHECK: %[[partialC_i32:.*]] = subview %[[fullC_i32]][%{{.*}}, %{{.*}}] : memref<?x?xi32> to memref<?x?xi32, #[[strided2D_dynamic]]>
// CHECK: %[[partialC_i32:.*]] = subview %[[fullC_i32]][%{{.*}}, %{{.*}}] : memref<?x?xi32> to memref<?x?xi32, #[[$strided2D_dynamic]]>

// CHECK: linalg.copy(%[[vA_i32]], %[[partialA_i32]]) : memref<?x?xi32, #[[strided2D]]>, memref<?x?xi32, #[[strided2D_dynamic]]>
// CHECK: linalg.copy(%[[vB_i32]], %[[partialB_i32]]) : memref<?x?xi32, #[[strided2D]]>, memref<?x?xi32, #[[strided2D_dynamic]]>
// CHECK: linalg.copy(%[[vC_i32]], %[[partialC_i32]]) : memref<?x?xi32, #[[strided2D]]>, memref<?x?xi32, #[[strided2D_dynamic]]>
// CHECK: linalg.copy(%[[vA_i32]], %[[partialA_i32]]) : memref<?x?xi32, #[[$strided2D]]>, memref<?x?xi32, #[[$strided2D_dynamic]]>
// CHECK: linalg.copy(%[[vB_i32]], %[[partialB_i32]]) : memref<?x?xi32, #[[$strided2D]]>, memref<?x?xi32, #[[$strided2D_dynamic]]>
// CHECK: linalg.copy(%[[vC_i32]], %[[partialC_i32]]) : memref<?x?xi32, #[[$strided2D]]>, memref<?x?xi32, #[[$strided2D_dynamic]]>
//
// CHECK: linalg.matmul(%[[partialA_i32]], %[[partialB_i32]], %[[partialC_i32]]) : memref<?x?xi32, #[[strided2D_dynamic]]>, memref<?x?xi32, #[[strided2D_dynamic]]>, memref<?x?xi32, #[[strided2D_dynamic]]>
// CHECK: linalg.matmul(%[[partialA_i32]], %[[partialB_i32]], %[[partialC_i32]]) : memref<?x?xi32, #[[$strided2D_dynamic]]>, memref<?x?xi32, #[[$strided2D_dynamic]]>, memref<?x?xi32, #[[$strided2D_dynamic]]>
//
// CHECK: linalg.copy(%[[partialC_i32]], %[[vC_i32]]) : memref<?x?xi32, #[[strided2D_dynamic]]>, memref<?x?xi32, #[[strided2D]]>
// CHECK: linalg.copy(%[[partialC_i32]], %[[vC_i32]]) : memref<?x?xi32, #[[$strided2D_dynamic]]>, memref<?x?xi32, #[[$strided2D]]>
//
// CHECK: dealloc %[[tmpA_i32]] : memref<32xi8>
// CHECK: dealloc %[[tmpB_i32]] : memref<48xi8>
Expand Down
158 changes: 79 additions & 79 deletions mlir/test/Dialect/Linalg/roundtrip.mlir

Large diffs are not rendered by default.

54 changes: 27 additions & 27 deletions mlir/test/Dialect/Linalg/standard.mlir
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
// RUN: mlir-opt %s -convert-linalg-to-std | FileCheck %s

// CHECK-DAG: #[[map0:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
// CHECK-DAG: #[[map1:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
// CHECK-DAG: #[[map2:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d2 * s2 + d1)>
// CHECK-DAG: #[[map3:.*]] = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
// CHECK-DAG: #[[map4:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d2 * s1 + s0 + d1 * s2 + d0)>
// CHECK-DAG: #[[map5:.*]] = affine_map<(d0, d1, d2) -> (d2, d1, d0)>
// CHECK-DAG: #[[map6:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
// CHECK-DAG: #[[map7:.*]] = affine_map<()[s0] -> (s0)>
// CHECK-DAG: #[[map8:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>
// CHECK-DAG: #[[$map0:.*]] = affine_map<(d0)[s0] -> (d0 + s0)>
// CHECK-DAG: #[[$map1:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>
// CHECK-DAG: #[[$map2:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d2 * s2 + d1)>
// CHECK-DAG: #[[$map3:.*]] = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
// CHECK-DAG: #[[$map4:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d2 * s1 + s0 + d1 * s2 + d0)>
// CHECK-DAG: #[[$map5:.*]] = affine_map<(d0, d1, d2) -> (d2, d1, d0)>
// CHECK-DAG: #[[$map6:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
// CHECK-DAG: #[[$map7:.*]] = affine_map<()[s0] -> (s0)>
// CHECK-DAG: #[[$map8:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>

func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>,
%arg1: memref<?xf32, offset: ?, strides: [1]>,
Expand All @@ -19,32 +19,32 @@ func @dot(%arg0: memref<?xf32, offset: ?, strides: [1]>,
return
}
// CHECK-LABEL: func @dot(
// CHECK-SAME: %[[arg0:[a-zA-z0-9]*]]: memref<?xf32, #[[map0]]>,
// CHECK-SAME: %[[arg1:[a-zA-z0-9]*]]: memref<?xf32, #[[map0]]>,
// CHECK-SAME: %[[arg0:[a-zA-z0-9]*]]: memref<?xf32, #[[$map0]]>,
// CHECK-SAME: %[[arg1:[a-zA-z0-9]*]]: memref<?xf32, #[[$map0]]>,
// CHECK-SAME: %[[arg2:[a-zA-z0-9]*]]: memref<f32>) {
// CHECK: %[[o0:.*]] = memref_cast %[[arg0]] :
// CHECK-SAME: memref<?xf32, #[[map0]]> to memref<?xf32, #[[map6]]>
// CHECK-SAME: memref<?xf32, #[[$map0]]> to memref<?xf32, #[[$map6]]>
// CHECK: %[[o1:.*]] = memref_cast %[[arg1]] :
// CHECK-SAME: memref<?xf32, #[[map0]]> to memref<?xf32, #[[map6]]>
// CHECK-SAME: memref<?xf32, #[[$map0]]> to memref<?xf32, #[[$map6]]>
// CHECK: %[[o2:.*]] = memref_cast %[[arg2]] :
// CHECK-SAME: memref<f32> to memref<f32, #[[map7]]>
// CHECK-SAME: memref<f32> to memref<f32, #[[$map7]]>
// CHECK: call @linalg_dot_viewsxf32_viewsxf32_viewf32(
// CHECK-SAME: %[[o0]], %[[o1]], %[[o2]]) :
// CHECK-SAME: memref<?xf32, #[[map6]]>, memref<?xf32, #[[map6]]>, memref<f32, #[[map7]]>
// CHECK-SAME: memref<?xf32, #[[$map6]]>, memref<?xf32, #[[$map6]]>, memref<f32, #[[$map7]]>

func @copy(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
linalg.copy(%arg0, %arg1) : memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>
return
}
// CHECK-LABEL: func @copy(
// CHECK-SAME: %[[arg0:[a-zA-z0-9]*]]: memref<?x?x?xf32, #[[map1]]>,
// CHECK-SAME: %[[arg1:[a-zA-z0-9]*]]: memref<?x?x?xf32, #[[map1]]>) {
// CHECK-SAME: %[[arg0:[a-zA-z0-9]*]]: memref<?x?x?xf32, #[[$map1]]>,
// CHECK-SAME: %[[arg1:[a-zA-z0-9]*]]: memref<?x?x?xf32, #[[$map1]]>) {
// CHECK: %[[o0:.*]] = memref_cast %[[arg0]] :
// CHECK-SAME: memref<?x?x?xf32, #[[map1]]> to memref<?x?x?xf32, #[[map8]]>
// CHECK-SAME: memref<?x?x?xf32, #[[$map1]]> to memref<?x?x?xf32, #[[$map8]]>
// CHECK: %[[o1:.*]] = memref_cast %[[arg1]] :
// CHECK-SAME: memref<?x?x?xf32, #[[map1]]> to memref<?x?x?xf32, #[[map8]]>
// CHECK-SAME: memref<?x?x?xf32, #[[$map1]]> to memref<?x?x?xf32, #[[$map8]]>
// CHECK: call @linalg_copy_viewsxsxsxf32_viewsxsxsxf32(%[[o0]], %[[o1]]) :
// CHECK-SAME: memref<?x?x?xf32, #[[map8]]>, memref<?x?x?xf32, #[[map8]]>
// CHECK-SAME: memref<?x?x?xf32, #[[$map8]]>, memref<?x?x?xf32, #[[$map8]]>

func @copy_transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %arg1: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
linalg.copy(%arg0, %arg1) {inputPermutation = affine_map<(i, j, k) -> (i, k, j)>,
Expand All @@ -53,18 +53,18 @@ func @copy_transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>, %a
return
}
// CHECK-LABEL: func @copy_transpose(
// CHECK-SAME: %[[arg0:[a-zA-z0-9]*]]: memref<?x?x?xf32, #[[map1]]>,
// CHECK-SAME: %[[arg1:[a-zA-z0-9]*]]: memref<?x?x?xf32, #[[map1]]>) {
// CHECK-SAME: %[[arg0:[a-zA-z0-9]*]]: memref<?x?x?xf32, #[[$map1]]>,
// CHECK-SAME: %[[arg1:[a-zA-z0-9]*]]: memref<?x?x?xf32, #[[$map1]]>) {
// CHECK: %[[t0:.*]] = linalg.transpose %[[arg0]]
// CHECK-SAME: (d0, d1, d2) -> (d0, d2, d1) : memref<?x?x?xf32, #[[map1]]>
// CHECK-SAME: (d0, d1, d2) -> (d0, d2, d1) : memref<?x?x?xf32, #[[$map1]]>
// CHECK: %[[t1:.*]] = linalg.transpose %[[arg1]]
// CHECK-SAME: (d0, d1, d2) -> (d2, d1, d0) : memref<?x?x?xf32, #[[map1]]>
// CHECK-SAME: (d0, d1, d2) -> (d2, d1, d0) : memref<?x?x?xf32, #[[$map1]]>
// CHECK: %[[o0:.*]] = memref_cast %[[t0]] :
// CHECK-SAME: memref<?x?x?xf32, #[[map2]]> to memref<?x?x?xf32, #[[map8]]>
// CHECK-SAME: memref<?x?x?xf32, #[[$map2]]> to memref<?x?x?xf32, #[[$map8]]>
// CHECK: %[[o1:.*]] = memref_cast %[[t1]] :
// CHECK-SAME: memref<?x?x?xf32, #[[map4]]> to memref<?x?x?xf32, #[[map8]]>
// CHECK-SAME: memref<?x?x?xf32, #[[$map4]]> to memref<?x?x?xf32, #[[$map8]]>
// CHECK: call @linalg_copy_viewsxsxsxf32_viewsxsxsxf32(%[[o0]], %[[o1]]) :
// CHECK-SAME: memref<?x?x?xf32, #[[map8]]>, memref<?x?x?xf32, #[[map8]]>
// CHECK-SAME: memref<?x?x?xf32, #[[$map8]]>, memref<?x?x?xf32, #[[$map8]]>

#matmul_accesses = [
affine_map<(m, n, k) -> (m, k)>,
Expand Down
218 changes: 109 additions & 109 deletions mlir/test/Dialect/Linalg/tile.mlir

Large diffs are not rendered by default.

50 changes: 25 additions & 25 deletions mlir/test/Dialect/Linalg/tile_conv.mlir
Original file line number Diff line number Diff line change
@@ -1,44 +1,44 @@
// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,0,0,4" | FileCheck %s -check-prefix=TILE-23004

// TILE-23004-DAG: #[[D0x30pS0x10:.*]] = affine_map<(d0) -> (d0 * 30)>
// TILE-23004-DAG: #[[S0x10p90D0x30pS1:.*]] = affine_map<(d0)[s0, s1] -> (s0 * 10 + 90, d0 * -30 + s1)>
// TILE-23004-DAG: #[[strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
// TILE-23004-DAG: #[[bound_map_4:.*]] = affine_map<(d0)[s0] -> (4, -d0 + s0)>
// TILE-23004-DAG: #[[$D0x30pS0x10:.*]] = affine_map<(d0) -> (d0 * 30)>
// TILE-23004-DAG: #[[$S0x10p90D0x30pS1:.*]] = affine_map<(d0)[s0, s1] -> (s0 * 10 + 90, d0 * -30 + s1)>
// TILE-23004-DAG: #[[$strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
// TILE-23004-DAG: #[[$bound_map_4:.*]] = affine_map<(d0)[s0] -> (4, -d0 + s0)>

func @conv(%arg0: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, %arg1: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, %arg2: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>) {
linalg.conv(%arg0, %arg1, %arg2) {dilations = [10, 20], strides = [30, 40]} : memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>
return
}
// TILE-23004-LABEL: func @conv(
// TILE-23004: %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[strided4D]]>) {
// TILE-23004: %{{.*}}: memref<?x?x?x?xf32, #[[$strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[$strided4D]]>, %{{.*}}: memref<?x?x?x?xf32, #[[$strided4D]]>) {
// TILE-23004-DAG: %[[C0:.*]] = constant 0 : index
// TILE-23004-DAG: %[[C2:.*]] = constant 2 : index
// TILE-23004-DAG: %[[C3:.*]] = constant 3 : index
// TILE-23004-DAG: %[[C4:.*]] = constant 4 : index
// TILE-23004: %[[Q:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[B:.*]] = dim %{{.*}}, %c0 : memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[PaddedInput0:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[X0:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[Q:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[$strided4D]]>
// TILE-23004: %[[B:.*]] = dim %{{.*}}, %c0 : memref<?x?x?x?xf32, #[[$strided4D]]>
// TILE-23004: %[[PaddedInput0:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[$strided4D]]>
// TILE-23004: %[[X0:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[$strided4D]]>
// TILE-23004: scf.for %[[ivI:.*]] = %{{.*}} to %[[B]] step %{{.*}} {
// TILE-23004: scf.for %[[ivJ:.*]] = %{{.*}} to %[[X0]] step %{{.*}} {
// TILE-23004: scf.for %[[ivK:.*]] = %{{.*}} to %[[Q]] step %{{.*}} {
// TILE-23004: %[[Z0:.*]] = dim %{{.*}}, %c0 : memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[Z1:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[Z2:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[szK:.*]] = affine.min #[[bound_map_4]](%[[ivK]])[%[[Z2]]]
// TILE-23004: %[[K:.*]] = dim %{{.*}}, %c3 : memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[FilterView:.*]] = subview %{{.*}}[0, 0, %[[ivK]], 0] [%[[Z0]], %[[Z1]], %[[szK]], %[[K]]] [1, 1, 1, 1] : memref<?x?x?x?xf32, #[[strided4D]]> to memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[Z0:.*]] = dim %{{.*}}, %c0 : memref<?x?x?x?xf32, #[[$strided4D]]>
// TILE-23004: %[[Z1:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[$strided4D]]>
// TILE-23004: %[[Z2:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[$strided4D]]>
// TILE-23004: %[[szK:.*]] = affine.min #[[$bound_map_4]](%[[ivK]])[%[[Z2]]]
// TILE-23004: %[[K:.*]] = dim %{{.*}}, %c3 : memref<?x?x?x?xf32, #[[$strided4D]]>
// TILE-23004: %[[FilterView:.*]] = subview %{{.*}}[0, 0, %[[ivK]], 0] [%[[Z0]], %[[Z1]], %[[szK]], %[[K]]] [1, 1, 1, 1] : memref<?x?x?x?xf32, #[[$strided4D]]> to memref<?x?x?x?xf32, #[[$strided4D]]>
//
// TILE-23004: %[[J1:.*]] = affine.apply #[[D0x30pS0x10]](%[[ivJ]])
// TILE-23004: %[[PaddedInput0b:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[I1pStep:.*]] = affine.min #[[S0x10p90D0x30pS1]](%[[ivJ]])[%[[PaddedInput0]], %[[PaddedInput0b]]]
// TILE-23004: %[[SZ2:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[J1:.*]] = affine.apply #[[$D0x30pS0x10]](%[[ivJ]])
// TILE-23004: %[[PaddedInput0b:.*]] = dim %{{.*}}, %c1 : memref<?x?x?x?xf32, #[[$strided4D]]>
// TILE-23004: %[[I1pStep:.*]] = affine.min #[[$S0x10p90D0x30pS1]](%[[ivJ]])[%[[PaddedInput0]], %[[PaddedInput0b]]]
// TILE-23004: %[[SZ2:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[$strided4D]]>
// TILE-23004: %[[dim3:.*]] = dim %{{.*}}, %c3
// TILE-23004: %[[sz3:.*]] = affine.min #[[bound_map_4]](%[[ivK]])[%[[dim3]]]
// TILE-23004: %[[InputView:.*]] = subview %{{.*}}[%[[ivI]], %[[J1]], 0, %[[ivK]]] [%{{.*}}, %{{.*}}, %[[SZ2]], %[[sz3]]] [1, 1, 1, 1] : memref<?x?x?x?xf32, #[[strided4D]]> to memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[sz3:.*]] = affine.min #[[$bound_map_4]](%[[ivK]])[%[[dim3]]]
// TILE-23004: %[[InputView:.*]] = subview %{{.*}}[%[[ivI]], %[[J1]], 0, %[[ivK]]] [%{{.*}}, %{{.*}}, %[[SZ2]], %[[sz3]]] [1, 1, 1, 1] : memref<?x?x?x?xf32, #[[$strided4D]]> to memref<?x?x?x?xf32, #[[$strided4D]]>
//
// TILE-23004: %[[X0:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[X1:.*]] = dim %{{.*}}, %c3 : memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[OutputView:.*]] = subview %{{.*}}[%[[ivI]], %[[ivJ]], 0, 0] [%{{.*}}, %{{.*}}, %[[X0]], %[[X1]]] [1, 1, 1, 1] : memref<?x?x?x?xf32, #[[strided4D]]> to memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: %[[X0:.*]] = dim %{{.*}}, %c2 : memref<?x?x?x?xf32, #[[$strided4D]]>
// TILE-23004: %[[X1:.*]] = dim %{{.*}}, %c3 : memref<?x?x?x?xf32, #[[$strided4D]]>
// TILE-23004: %[[OutputView:.*]] = subview %{{.*}}[%[[ivI]], %[[ivJ]], 0, 0] [%{{.*}}, %{{.*}}, %[[X0]], %[[X1]]] [1, 1, 1, 1] : memref<?x?x?x?xf32, #[[$strided4D]]> to memref<?x?x?x?xf32, #[[$strided4D]]>
//
// TILE-23004: linalg.conv(%[[FilterView]], %[[InputView]], %[[OutputView]]) {dilations = [10, 20], strides = [30, 40]} : memref<?x?x?x?xf32, #[[strided4D]]>, memref<?x?x?x?xf32, #[[strided4D]]>, memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004: linalg.conv(%[[FilterView]], %[[InputView]], %[[OutputView]]) {dilations = [10, 20], strides = [30, 40]} : memref<?x?x?x?xf32, #[[$strided4D]]>, memref<?x?x?x?xf32, #[[$strided4D]]>, memref<?x?x?x?xf32, #[[$strided4D]]>
22 changes: 11 additions & 11 deletions mlir/test/Dialect/Linalg/tile_conv_padding.mlir
Original file line number Diff line number Diff line change
@@ -1,36 +1,36 @@
// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,0,0,4" | FileCheck %s -check-prefix=TILE-23004
// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2" | FileCheck %s -check-prefix=TILE-20000

// TILE-23004-DAG: #[[strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
// TILE-20000-DAG: #[[strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
// TILE-20000-DAG: #[[minmap:.*]] = affine_map<(d0)[s0] -> (2, -d0 + s0)>
// TILE-23004-DAG: #[[$strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
// TILE-20000-DAG: #[[$strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)>
// TILE-20000-DAG: #[[$minmap:.*]] = affine_map<(d0)[s0] -> (2, -d0 + s0)>

func @conv_padding(%arg0: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, %arg1: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, %arg2: memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>) {
linalg.conv(%arg0, %arg1, %arg2) {dilations = [10, 20], padding = dense<[[1, 1], [0, 1]]> : tensor<2x2xi64>, strides = [30, 40]} : memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>, memref<?x?x?x?xf32, offset: ?, strides: [?, ?, ?, 1]>
return
}
// TILE-23004-LABEL: func @conv_padding(
// TILE-23004-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-23004-SAME: %[[ARG2:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[strided4D]]>)
// TILE-23004-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[$strided4D]]>
// TILE-23004-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[$strided4D]]>
// TILE-23004-SAME: %[[ARG2:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[$strided4D]]>)
// TILE-23004: linalg.conv(%[[ARG0]], %[[ARG1]], %[[ARG2]])

// TILE-20000-LABEL: func @conv_padding(
// TILE-20000-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-20000-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[strided4D]]>
// TILE-20000-SAME: %[[ARG2:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[strided4D]]>)
// TILE-20000-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[$strided4D]]>
// TILE-20000-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[$strided4D]]>
// TILE-20000-SAME: %[[ARG2:[a-zA-Z0-9_]*]]: memref<?x?x?x?xf32, #[[$strided4D]]>)
// TILE-20000-DAG: %[[C0:.*]] = constant 0 : index
// TILE-20000-DAG: %[[C2:.*]] = constant 2 : index
// TILE-20000: %[[B:.*]] = dim %[[ARG1]], %c0
// TILE-20000: scf.for %[[ivI:.*]] = %[[C0]] to %[[B]] step %[[C2]] {
// TILE-20000: %[[DIM10:.*]] = dim %[[ARG1]], %c0
// TILE-20000: %[[EXTENT:.*]] = affine.min #[[minmap]](%[[ivI]])[%[[DIM10]]]
// TILE-20000: %[[EXTENT:.*]] = affine.min #[[$minmap]](%[[ivI]])[%[[DIM10]]]
// TILE-20000: %[[DIM11:.*]] = dim %[[ARG1]], %c1
// TILE-20000: %[[DIM12:.*]] = dim %[[ARG1]], %c2
// TILE-20000: %[[DIM13:.*]] = dim %[[ARG1]], %c3
// TILE-20000: %[[SUBVIEW1:.*]] = subview %[[ARG1]][%[[ivI]], 0, 0, 0] [%[[EXTENT]], %[[DIM11]], %[[DIM12]], %[[DIM13]]]
// TILE-20000: %[[DIM20:.*]] = dim %[[ARG2]], %c0
// TILE-20000: %[[EXTENT:.*]] = affine.min #[[minmap]](%[[ivI]])[%[[DIM20]]]
// TILE-20000: %[[EXTENT:.*]] = affine.min #[[$minmap]](%[[ivI]])[%[[DIM20]]]
// TILE-20000: %[[DIM21:.*]] = dim %[[ARG2]], %c1
// TILE-20000: %[[DIM22:.*]] = dim %[[ARG2]], %c2
// TILE-20000: %[[DIM23:.*]] = dim %[[ARG2]], %c3
Expand Down
66 changes: 37 additions & 29 deletions mlir/test/Dialect/Linalg/transform-patterns.mlir
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
// RUN: mlir-opt %s -test-linalg-transform-patterns=test-patterns | FileCheck %s

// CHECK-DAG: #[[STRIDED_1D:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
// CHECK-DAG: #[[$STRIDED_1D:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
// Map corresponding to a 2D memory access where the stride along the last dim is known to be 1.
// CHECK-DAG: #[[STRIDED_2D_u_1:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
// CHECK-DAG: #[[$STRIDED_2D_u_1:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
// Map corresponding to a 2D memory access where the stride along all dims are unknown.
// CHECK-DAG: #[[STRIDED_2D:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
// CHECK-DAG: #[[mk:.*]] = affine_map<(d0, d1, d2) -> (d0, d2)>
// CHECK-DAG: #[[kn:.*]] = affine_map<(d0, d1, d2) -> (d2, d1)>
// CHECK-DAG: #[[mn:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
// CHECK-DAG: #[[nm:.*]] = affine_map<(d0, d1, d2) -> (d1, d0)>
// CHECK-DAG: #[[km:.*]] = affine_map<(d0, d1, d2) -> (d2, d0)>
// CHECK-DAG: #[[$STRIDED_2D:.*]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
// CHECK-DAG: #[[$mk:.*]] = affine_map<(d0, d1, d2) -> (d0, d2)>
// CHECK-DAG: #[[$kn:.*]] = affine_map<(d0, d1, d2) -> (d2, d1)>
// CHECK-DAG: #[[$mn:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)>
// CHECK-DAG: #[[$nm:.*]] = affine_map<(d0, d1, d2) -> (d1, d0)>
// CHECK-DAG: #[[$km:.*]] = affine_map<(d0, d1, d2) -> (d2, d0)>

func @dot(%x: memref<?xf32, offset: ?, strides: [1]>,
%y: memref<?xf32, offset: ?, strides: [1]>,
Expand Down Expand Up @@ -48,7 +48,7 @@ func @matvec(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
// CHECK-DAG: %[[c6:.*]] = constant 6 : index
// CHECK: scf.parallel {{.*}} step (%[[c5]])
// CHECK: scf.for {{.*}} step %[[c6]]
// CHECK: linalg.matvec({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?xf32, #[[STRIDED_1D]]>, memref<?xf32, #[[STRIDED_1D]]>
// CHECK: linalg.matvec({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[$STRIDED_2D]]>, memref<?xf32, #[[$STRIDED_1D]]>, memref<?xf32, #[[$STRIDED_1D]]>

func @matmul(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%B: memref<?x?xf32, offset: ?, strides: [?, 1]>,
Expand Down Expand Up @@ -85,7 +85,7 @@ func @matmul(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c2]] {
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c3]] {
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c4]] {
// CHECK: linalg.matmul({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>
// CHECK: linalg.matmul({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[$STRIDED_2D]]>, memref<?x?xf32, #[[$STRIDED_2D]]>, memref<?x?xf32, #[[$STRIDED_2D]]>

#matmul_trait = {
args_in = 2,
Expand All @@ -112,7 +112,7 @@ func @vectorization_test(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
// CHECK: vector.transfer_read %{{.*}} : memref<8x16xf32>, vector<8x16xf32>
// CHECK: vector.transfer_read %{{.*}} : memref<16x32xf32>, vector<16x32xf32>
// CHECK: vector.transfer_read %{{.*}} : memref<8x32xf32>, vector<8x32xf32>
// CHECK: vector.contract {indexing_maps = [#[[mk]], #[[kn]], #[[mn]]], iterator_types = ["parallel", "parallel", "reduction"]} %{{.*}}, %{{.*}}, %{{.*}} : vector<8x16xf32>, vector<16x32xf32> into vector<8x32xf32>
// CHECK: vector.contract {indexing_maps = [#[[$mk]], #[[$kn]], #[[$mn]]], iterator_types = ["parallel", "parallel", "reduction"]} %{{.*}}, %{{.*}}, %{{.*}} : vector<8x16xf32>, vector<16x32xf32> into vector<8x32xf32>
// CHECK: vector.transfer_write %{{.*}}, %{{.*}} : vector<8x32xf32>, memref<8x32xf32>

func @vectorization_test_2(%A: memref<8x16xf32>, %B: memref<16x32xf32>,
Expand Down Expand Up @@ -159,12 +159,12 @@ func @permute_generic(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
}
// CHECK-LABEL: func @permute_generic
// CHECK: linalg.generic {args_in = 2 : i64, args_out = 1 : i64,
// CHECK-SAME: indexing_maps = [#[[kn]], #[[nm]], #[[km]]],
// CHECK-SAME: indexing_maps = [#[[$kn]], #[[$nm]], #[[$km]]],
// CHECK-SAME: iterator_types = ["parallel", "reduction", "parallel"],
// CHECK-SAME: library_call = "linalg_matmul"} %{{.*}}, %{{.*}}, %{{.*}}
// CHECK: memref<?x?xf32, #[[STRIDED_2D_u_1]]>,
// CHECK-SAME: memref<?x?xf32, #[[STRIDED_2D_u_1]]>,
// CHECK-SAME: memref<?x?xf32, #[[STRIDED_2D_u_1]]>
// CHECK: memref<?x?xf32, #[[$STRIDED_2D_u_1]]>,
// CHECK-SAME: memref<?x?xf32, #[[$STRIDED_2D_u_1]]>,
// CHECK-SAME: memref<?x?xf32, #[[$STRIDED_2D_u_1]]>

#indexed_matmul_trait = {
args_in = 2,
Expand All @@ -189,12 +189,12 @@ func @permute_generic_indexed(
}
// CHECK-LABEL: func @permute_generic_indexed
// CHECK: linalg.indexed_generic {args_in = 2 : i64, args_out = 1 : i64,
// CHECK-SAME: indexing_maps = [#[[kn]], #[[nm]], #[[km]]],
// CHECK-SAME: indexing_maps = [#[[$kn]], #[[$nm]], #[[$km]]],
// CHECK-SAME: iterator_types = ["parallel", "reduction", "parallel"],
// CHECK-SAME: library_call = "linalg_matmul_indexed"} %{{.*}}, %{{.*}}, %{{.*}}
// CHECK: memref<?x?xf32, #[[STRIDED_2D_u_1]]>,
// CHECK-SAME: memref<?x?xf32, #[[STRIDED_2D_u_1]]>,
// CHECK-SAME: memref<?x?xf32, #[[STRIDED_2D_u_1]]>
// CHECK: memref<?x?xf32, #[[$STRIDED_2D_u_1]]>,
// CHECK-SAME: memref<?x?xf32, #[[$STRIDED_2D_u_1]]>,
// CHECK-SAME: memref<?x?xf32, #[[$STRIDED_2D_u_1]]>

func @matvec_perm(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%x: memref<?xf32, offset: ?, strides: [1]>,
Expand All @@ -211,7 +211,7 @@ func @matvec_perm(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
// CHECK-DAG: %[[c6:.*]] = constant 6 : index
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c6]]
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c5]]
// CHECK: linalg.matvec({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?xf32, #[[STRIDED_1D]]>, memref<?xf32, #[[STRIDED_1D]]>
// CHECK: linalg.matvec({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[$STRIDED_2D]]>, memref<?xf32, #[[$STRIDED_1D]]>, memref<?xf32, #[[$STRIDED_1D]]>

func @matmul_perm(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%B: memref<?x?xf32, offset: ?, strides: [?, 1]>,
Expand Down Expand Up @@ -242,7 +242,7 @@ func @matmul_perm(%A: memref<?x?xf32, offset: ?, strides: [?, 1]>,
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c20]] {
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c30]] {
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c40]] {
// CHECK: linalg.matmul({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>
// CHECK: linalg.matmul({{.*}}, {{.*}}, {{.*}}) : memref<?x?xf32, #[[$STRIDED_2D]]>, memref<?x?xf32, #[[$STRIDED_2D]]>, memref<?x?xf32, #[[$STRIDED_2D]]>

func @promote_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
%arg1: memref<?x?xf32, offset: ?, strides: [?, 1]>,
Expand Down Expand Up @@ -274,6 +274,10 @@ func @promote_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
return
}
// CHECK-LABEL: func @promote_subview_matmul
// CHECK-DAG: %[[c0:.*]] = constant 0 : index
// CHECK-DAG: %[[c2000:.*]] = constant 2000 : index
// CHECK-DAG: %[[c3000:.*]] = constant 3000 : index
// CHECK-DAG: %[[c4000:.*]] = constant 4000 : index
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c2000]] {
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c3000]] {
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c4000]] {
Expand All @@ -282,13 +286,13 @@ func @promote_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>,
// CHECK: %[[s2:.*]] = subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32, #map{{.*}}> to memref<?x?xf32, #map{{.*}}>
// CHECK: %[[a0:.*]] = alloc({{%.*}}) : memref<?xi8>
// CHECK: %[[v0:.*]] = std.view %[[a0]][{{.*}}][{{%.*}}, {{%.*}}] : memref<?xi8> to memref<?x?xf32>
// CHECK: %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[STRIDED_2D]]>
// CHECK: %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D]]>
// CHECK: %[[a1:.*]] = alloc({{%.*}}) : memref<?xi8>
// CHECK: %[[v1:.*]] = std.view %[[a1]][{{.*}}][{{%.*}}, {{%.*}}] : memref<?xi8> to memref<?x?xf32>
// CHECK: %[[l1:.*]] = subview %[[v1]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[STRIDED_2D]]>
// CHECK: %[[l1:.*]] = subview %[[v1]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D]]>
// CHECK: %[[a2:.*]] = alloc({{%.*}}) : memref<?xi8>
// CHECK: %[[v2:.*]] = std.view %[[a2]][{{.*}}][{{%.*}}, {{%.*}}] : memref<?xi8> to memref<?x?xf32>
// CHECK: %[[l2:.*]] = subview %[[v2]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[STRIDED_2D]]>
// CHECK: %[[l2:.*]] = subview %[[v2]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D]]>
// CHECK: linalg.copy(%[[s0]], %[[l0]]) : memref<?x?xf32, #map{{.*}}>, memref<?x?xf32, #map{{.*}}>
// CHECK: linalg.copy(%[[s1]], %[[l1]]) : memref<?x?xf32, #map{{.*}}>, memref<?x?xf32, #map{{.*}}>
// CHECK: linalg.copy(%[[s2]], %[[l2]]) : memref<?x?xf32, #map{{.*}}>, memref<?x?xf32, #map{{.*}}>
Expand Down Expand Up @@ -324,6 +328,10 @@ func @promote_first_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?
return
}
// CHECK-LABEL: func @promote_first_subview_matmul
// CHECK-DAG: %[[c0:.*]] = constant 0 : index
// CHECK-DAG: %[[c2000:.*]] = constant 2000 : index
// CHECK-DAG: %[[c3000:.*]] = constant 3000 : index
// CHECK-DAG: %[[c4000:.*]] = constant 4000 : index
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c2000]] {
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c3000]] {
// CHECK: scf.for {{.*}} = %[[c0]] to {{.*}} step %[[c4000]] {
Expand All @@ -332,17 +340,17 @@ func @promote_first_subview_matmul(%arg0: memref<?x?xf32, offset: ?, strides: [?
// CHECK: %[[s2:.*]] = subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32, #map{{.*}}> to memref<?x?xf32, #map{{.*}}>
// CHECK: %[[a0:.*]] = alloc({{%.*}}) : memref<?xi8>
// CHECK: %[[v0:.*]] = std.view %[[a0]][{{.*}}][{{%.*}}, {{%.*}}] : memref<?xi8> to memref<?x?xf32>
// CHECK: %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[STRIDED_2D]]>
// CHECK: %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D]]>
// CHECK-NOT: %[[a1:.*]] = alloc({{%.*}}) : memref<?xi8>
// CHECK-NOT: %[[v1:.*]] = std.view %[[a1]][{{.*}}][{{%.*}}, {{%.*}}] : memref<?xi8> to memref<?x?xf32>
// CHECK-NOT: %[[l0:.*]] = subview %[[v1]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[STRIDED_2D]]>
// CHECK-NOT: %[[l0:.*]] = subview %[[v1]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D]]>
// CHECK-NOT: %[[a2:.*]] = alloc({{%.*}}) : memref<?xi8>
// CHECK-NOT: %[[v2:.*]] = std.view %[[a2]][{{.*}}][{{%.*}}, {{%.*}}] : memref<?xi8> to memref<?x?xf32>
// CHECK-NOT: %[[l0:.*]] = subview %[[v2]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[STRIDED_2D]]>
// CHECK-NOT: %[[l0:.*]] = subview %[[v2]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D]]>
// CHECK: linalg.copy(%[[s0]], %[[l0]]) : memref<?x?xf32, #map{{.*}}>, memref<?x?xf32, #map{{.*}}>
// CHECK-NOT: linalg.copy(%[[s1]], %[[l1]]) : memref<?x?xf32, #map{{.*}}>, memref<?x?xf32, #map{{.*}}>
// CHECK-NOT: linalg.copy(%[[s2]], %[[l2]]) : memref<?x?xf32, #map{{.*}}>, memref<?x?xf32, #map{{.*}}>^
// CHECK: linalg.matmul(%[[v0]], %[[s1]], %[[s2]]) : memref<?x?xf32>, memref<?x?xf32, #[[STRIDED_2D]]>, memref<?x?xf32, #[[STRIDED_2D]]>
// CHECK: linalg.matmul(%[[v0]], %[[s1]], %[[s2]]) : memref<?x?xf32>, memref<?x?xf32, #[[$STRIDED_2D]]>, memref<?x?xf32, #[[$STRIDED_2D]]>

func @aligned_promote_fill(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
%c2000 = constant 2000 : index
Expand All @@ -361,7 +369,7 @@ func @aligned_promote_fill(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>) {
// CHECK: %[[s0:.*]] = subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32, #map{{.*}}> to memref<?x?xf32, #map{{.*}}>
// CHECK: %[[a0:.*]] = alloc({{%.*}}) {alignment = 32 : i64} : memref<?xi8>
// CHECK: %[[v0:.*]] = std.view %[[a0]][{{.*}}][{{%.*}}, {{%.*}}] : memref<?xi8> to memref<?x?xf32>
// CHECK: %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[STRIDED_2D]]>
// CHECK: %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref<?x?xf32> to memref<?x?xf32, #[[$STRIDED_2D]]>
// CHECK: linalg.fill(%[[v0]], {{%.*}}) : memref<?x?xf32>, f32
// CHECK: linalg.copy(%[[s0]], %[[l0]]) : memref<?x?xf32, #map{{.*}}>, memref<?x?xf32, #map{{.*}}>
// CHECK: linalg.fill(%[[v0]], %[[cf]]) : memref<?x?xf32>, f32
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Quant/canonicalize.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -split-input-file -pass-pipeline='func(canonicalize)' | FileCheck %s --dump-input=fail
// RUN: mlir-opt %s -split-input-file -pass-pipeline='func(canonicalize)' | FileCheck %s

// -----
// CHECK-LABEL: redundant_scast
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Quant/convert-const.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -split-input-file -quant-convert-const | FileCheck %s --dump-input=fail
// RUN: mlir-opt %s -split-input-file -quant-convert-const | FileCheck %s

// Magic numbers:
// 7.8125e-03 = 1/128 = 2/256 : real range = [-1.0, 0.9921875] (for 8bit, zeroPoint=128)
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Quant/convert-fakequant.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -split-input-file -quant-convert-simulated-quantization | FileCheck %s --dump-input=fail
// RUN: mlir-opt %s -split-input-file -quant-convert-simulated-quantization | FileCheck %s

// -----
// Verifies a quint8 single point.
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/Shape/canonicalize.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -split-input-file -allow-unregistered-dialect -canonicalize <%s | FileCheck %s --dump-input=fail
// RUN: mlir-opt -split-input-file -allow-unregistered-dialect -canonicalize <%s | FileCheck %s

// CHECK-LABEL: func @f
func @f(%arg0: tensor<2x3x4xf32>) -> !shape.shape {
Expand Down Expand Up @@ -343,7 +343,7 @@ func @f(%arg0 : !shape.shape) -> !shape.shape {
// Folding of any with partially constant operands is not yet implemented.
// CHECK-LABEL: func @f
func @f(%arg0 : !shape.shape, %arg1 : !shape.shape) -> !shape.shape {
// CHECK-NEXT: shape.any
// CHECK-NEXT: %[[CS:.*]] = shape.any
// CHECK-NEXT: return %[[CS]]
%1 = shape.any %arg0, %arg1
return %1 : !shape.shape
Expand Down
12 changes: 6 additions & 6 deletions mlir/test/Dialect/Vector/vector-contract-transforms.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -616,7 +616,7 @@ func @broadcast_stretch_at_start(%arg0: vector<1x4xf32>) -> vector<3x4xf32> {

// CHECK-LABEL: func @broadcast_stretch_at_end
// CHECK-SAME: %[[A:.*0]]: vector<4x1xf32>
// CHECK: %[[C:.*]] = constant dense<0.000000e+00> : vector<4x3xf32>
// CHECK: %[[C0:.*]] = constant dense<0.000000e+00> : vector<4x3xf32>
// CHECK: %[[T0:.*]] = vector.extract %[[A]][0] : vector<4x1xf32>
// CHECK: %[[T1:.*]] = vector.extract %[[T0]][0] : vector<1xf32>
// CHECK: %[[T2:.*]] = splat %[[T1]] : vector<3xf32>
Expand Down Expand Up @@ -678,10 +678,10 @@ func @broadcast_stretch_in_middle(%arg0: vector<4x1x2xf32>) -> vector<4x3x2xf32>
// CHECK-LABEL: func @genbool_1d
// CHECK: %[[TT:.*]] = constant true
// CHECK: %[[C1:.*]] = constant dense<false> : vector<8xi1>
// CHECK: %[[T0.*]] = vector.insert %[[TT]], %[[C1]] [0] : i1 into vector<8xi1>
// CHECK: %[[T1.*]] = vector.insert %[[TT]], %[[T0]] [1] : i1 into vector<8xi1>
// CHECK: %[[T2.*]] = vector.insert %[[TT]], %[[T1]] [2] : i1 into vector<8xi1>
// CHECK: %[[T3.*]] = vector.insert %[[TT]], %[[T2]] [3] : i1 into vector<8xi1>
// CHECK: %[[T0:.*]] = vector.insert %[[TT]], %[[C1]] [0] : i1 into vector<8xi1>
// CHECK: %[[T1:.*]] = vector.insert %[[TT]], %[[T0]] [1] : i1 into vector<8xi1>
// CHECK: %[[T2:.*]] = vector.insert %[[TT]], %[[T1]] [2] : i1 into vector<8xi1>
// CHECK: %[[T3:.*]] = vector.insert %[[TT]], %[[T2]] [3] : i1 into vector<8xi1>
// CHECK: return %[[T3]] : vector<8xi1>

func @genbool_1d() -> vector<8xi1> {
Expand All @@ -705,7 +705,7 @@ func @genbool_2d() -> vector<4x4xi1> {
}

// CHECK-LABEL: func @genbool_3d
// CHECK: %[[Tt:.*]] = constant true
// CHECK: %[[TT:.*]] = constant true
// CHECK: %[[C1:.*]] = constant dense<false> : vector<4xi1>
// CHECK: %[[C2:.*]] = constant dense<false> : vector<3x4xi1>
// CHECK: %[[C3:.*]] = constant dense<false> : vector<2x3x4xi1>
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Vector/vector-slices-transforms.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ func @insert_slices(%arg0: vector<2x2xf32>,
// CHECK: %[[X1:.*]] = vector.extract_strided_slice %arg0 {offsets = [0, 2], sizes = [2, 1], strides = [1, 1]}
// CHECK: %[[X2:.*]] = vector.extract_strided_slice %arg0 {offsets = [2, 0], sizes = [1, 2], strides = [1, 1]}
// CHECK: %[[X3:.*]] = vector.extract_strided_slice %arg0 {offsets = [2, 2], sizes = [1, 1], strides = [1, 1]}
// CHECK: %[[X4:.*]] = vector.insert_strided_slice %[[X0]], %[[C0]] {offsets = [0, 0], strides = [1, 1]}
// CHECK: %[[X4:.*]] = vector.insert_strided_slice %[[X0]], %[[C]] {offsets = [0, 0], strides = [1, 1]}
// CHECK: %[[X5:.*]] = vector.insert_strided_slice %[[X1]], %[[X4]] {offsets = [0, 2], strides = [1, 1]}
// CHECK: %[[X6:.*]] = vector.insert_strided_slice %[[X2]], %[[X5]] {offsets = [2, 0], strides = [1, 1]}
// CHECK: %[[X7:.*]] = vector.insert_strided_slice %[[X3]], %[[X6]] {offsets = [2, 2], strides = [1, 1]}
Expand Down
44 changes: 22 additions & 22 deletions mlir/test/IR/core-ops.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,17 @@

// CHECK: #map1 = affine_map<()[s0] -> (s0 + 1)>

// CHECK-DAG: #[[BASE_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>
// CHECK-DAG: #[[BASE_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>
// CHECK-DAG: #[[$BASE_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>
// CHECK-DAG: #[[$BASE_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>

// CHECK-DAG: #[[BASE_MAP1:map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + s0)>
// CHECK-DAG: #[[SUBVIEW_MAP1:map[0-9]+]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
// CHECK-DAG: #[[$BASE_MAP1:map[0-9]+]] = affine_map<(d0)[s0] -> (d0 + s0)>
// CHECK-DAG: #[[$SUBVIEW_MAP1:map[0-9]+]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>

// CHECK-DAG: #[[BASE_MAP2:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 22 + d1)>
// CHECK-DAG: #[[SUBVIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
// CHECK-DAG: #[[SUBVIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 8)>
// CHECK-DAG: #[[SUBVIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
// CHECK-DAG: #[[SUBVIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 8 + s0 + d1 * 2)>
// CHECK-DAG: #[[$BASE_MAP2:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 22 + d1)>
// CHECK-DAG: #[[$SUBVIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
// CHECK-DAG: #[[$SUBVIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 8)>
// CHECK-DAG: #[[$SUBVIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
// CHECK-DAG: #[[$SUBVIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 8 + s0 + d1 * 2)>

// CHECK-LABEL: func @func_with_ops
// CHECK-SAME: %[[ARG:.*]]: f32
Expand Down Expand Up @@ -689,10 +689,10 @@ func @memref_cast(%arg0: memref<4xf32>, %arg1 : memref<?xf32>, %arg2 : memref<64
// CHECK: %1 = memref_cast %arg1 : memref<?xf32> to memref<4xf32>
%1 = memref_cast %arg1 : memref<?xf32> to memref<4xf32>

// CHECK: {{%.*}} = memref_cast %arg2 : memref<64x16x4xf32, #[[BASE_MAP0]]> to memref<64x16x4xf32, #[[BASE_MAP3]]>
// CHECK: {{%.*}} = memref_cast %arg2 : memref<64x16x4xf32, #[[$BASE_MAP0]]> to memref<64x16x4xf32, #[[$BASE_MAP3]]>
%2 = memref_cast %arg2 : memref<64x16x4xf32, offset: 0, strides: [64, 4, 1]> to memref<64x16x4xf32, offset: ?, strides: [?, ?, ?]>

// CHECK: {{%.*}} = memref_cast {{%.*}} : memref<64x16x4xf32, #[[BASE_MAP3]]> to memref<64x16x4xf32, #[[BASE_MAP0]]>
// CHECK: {{%.*}} = memref_cast {{%.*}} : memref<64x16x4xf32, #[[$BASE_MAP3]]> to memref<64x16x4xf32, #[[$BASE_MAP0]]>
%3 = memref_cast %2 : memref<64x16x4xf32, offset: ?, strides: [?, ?, ?]> to memref<64x16x4xf32, offset: 0, strides: [64, 4, 1]>

// CHECK: memref_cast %{{.*}} : memref<4xf32> to memref<*xf32>
Expand Down Expand Up @@ -728,52 +728,52 @@ func @memref_subview(%arg0 : index, %arg1 : index, %arg2 : index) {

%0 = alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>
// CHECK: subview %0[%c0, %c0, %c0] [%arg0, %arg1, %arg2] [%c1, %c1, %c1] :
// CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]>
// CHECK-SAME: to memref<?x?x?xf32, #[[BASE_MAP3]]>
// CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]>
// CHECK-SAME: to memref<?x?x?xf32, #[[$BASE_MAP3]]>
%1 = subview %0[%c0, %c0, %c0][%arg0, %arg1, %arg2][%c1, %c1, %c1]
: memref<8x16x4xf32, offset:0, strides: [64, 4, 1]> to
memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>

%2 = alloc()[%arg2] : memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>>
// CHECK: subview %2[%c1] [%arg0] [%c1] :
// CHECK-SAME: memref<64xf32, #[[BASE_MAP1]]>
// CHECK-SAME: to memref<?xf32, #[[SUBVIEW_MAP1]]>
// CHECK-SAME: memref<64xf32, #[[$BASE_MAP1]]>
// CHECK-SAME: to memref<?xf32, #[[$SUBVIEW_MAP1]]>
%3 = subview %2[%c1][%arg0][%c1]
: memref<64xf32, affine_map<(d0)[s0] -> (d0 + s0)>> to
memref<?xf32, affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>>

%4 = alloc() : memref<64x22xf32, affine_map<(d0, d1) -> (d0 * 22 + d1)>>
// CHECK: subview %4[%c0, %c1] [%arg0, %arg1] [%c1, %c0] :
// CHECK-SAME: memref<64x22xf32, #[[BASE_MAP2]]>
// CHECK-SAME: to memref<?x?xf32, #[[SUBVIEW_MAP2]]>
// CHECK-SAME: memref<64x22xf32, #[[$BASE_MAP2]]>
// CHECK-SAME: to memref<?x?xf32, #[[$SUBVIEW_MAP2]]>
%5 = subview %4[%c0, %c1][%arg0, %arg1][%c1, %c0]
: memref<64x22xf32, offset:0, strides: [22, 1]> to
memref<?x?xf32, offset:?, strides: [?, ?]>

// CHECK: subview %0[0, 2, 0] [4, 4, 4] [1, 1, 1] :
// CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]>
// CHECK-SAME: to memref<4x4x4xf32, #[[SUBVIEW_MAP3]]>
// CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]>
// CHECK-SAME: to memref<4x4x4xf32, #[[$SUBVIEW_MAP3]]>
%6 = subview %0[0, 2, 0][4, 4, 4][1, 1, 1]
: memref<8x16x4xf32, offset:0, strides: [64, 4, 1]> to
memref<4x4x4xf32, offset:8, strides: [64, 4, 1]>

%7 = alloc(%arg1, %arg2) : memref<?x?xf32>
// CHECK: subview {{%.*}}[0, 0] [4, 4] [1, 1] :
// CHECK-SAME: memref<?x?xf32>
// CHECK-SAME: to memref<4x4xf32, #[[SUBVIEW_MAP4]]>
// CHECK-SAME: to memref<4x4xf32, #[[$SUBVIEW_MAP4]]>
%8 = subview %7[0, 0][4, 4][1, 1]
: memref<?x?xf32> to memref<4x4xf32, offset: ?, strides:[?, 1]>

%9 = alloc() : memref<16x4xf32>
// CHECK: subview {{%.*}}[{{%.*}}, {{%.*}}] [4, 4] [{{%.*}}, {{%.*}}] :
// CHECK-SAME: memref<16x4xf32>
// CHECK-SAME: to memref<4x4xf32, #[[SUBVIEW_MAP2]]
// CHECK-SAME: to memref<4x4xf32, #[[$SUBVIEW_MAP2]]
%10 = subview %9[%arg1, %arg1][4, 4][%arg2, %arg2]
: memref<16x4xf32> to memref<4x4xf32, offset: ?, strides:[?, ?]>

// CHECK: subview {{%.*}}[{{%.*}}, {{%.*}}] [4, 4] [2, 2] :
// CHECK-SAME: memref<16x4xf32>
// CHECK-SAME: to memref<4x4xf32, #[[SUBVIEW_MAP5]]
// CHECK-SAME: to memref<4x4xf32, #[[$SUBVIEW_MAP5]]
%11 = subview %9[%arg1, %arg2][4, 4][2, 2]
: memref<16x4xf32> to memref<4x4xf32, offset: ?, strides:[8, 2]>

Expand Down
4 changes: 2 additions & 2 deletions mlir/test/IR/parser.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
// CHECK-DAG: #set{{[0-9]+}} = affine_set<(d0) : (d0 - 1 == 0)>
#set2 = affine_set<(d0) : (d0 - 1 == 0)>

// CHECK-DAG: [[SET_TRUE:#set[0-9]+]] = affine_set<() : (0 == 0)>
// CHECK-DAG: [[$SET_TRUE:#set[0-9]+]] = affine_set<() : (0 == 0)>

// CHECK-DAG: #set{{[0-9]+}} = affine_set<(d0)[s0] : (d0 - 2 >= 0, -d0 + 4 >= 0)>

Expand Down Expand Up @@ -826,7 +826,7 @@ func @type_alias() -> !i32_type_alias {

// CHECK-LABEL: func @no_integer_set_constraints(
func @no_integer_set_constraints() {
// CHECK: affine.if [[SET_TRUE]]() {
// CHECK: affine.if [[$SET_TRUE]]() {
affine.if affine_set<() : ()> () {
}
return
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/IR/test-func-set-type.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s -test-func-set-type -split-input-file | FileCheck %s --dump-input=fail
// RUN: mlir-opt %s -test-func-set-type -split-input-file | FileCheck %s

// It's currently not possible to have an attribute with a function type due to
// parser ambiguity. So instead we reference a function declaration to take the
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Transforms/canonicalize-dce.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -pass-pipeline='func(canonicalize)' | FileCheck %s --dump-input=fail
// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -pass-pipeline='func(canonicalize)' | FileCheck %s

// Test case: Simple case of deleting a dead pure op.

Expand Down
70 changes: 35 additions & 35 deletions mlir/test/Transforms/canonicalize.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -694,16 +694,16 @@ func @view(%arg0 : index) -> (f32, f32, f32, f32) {

// -----

// CHECK-DAG: #[[BASE_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>
// CHECK-DAG: #[[SUBVIEW_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * 64 + s0 + d1 * 4 + d2)>
// CHECK-DAG: #[[SUBVIEW_MAP1:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 79)>
// CHECK-DAG: #[[SUBVIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 128 + d1 * 28 + d2 * 11)>
// CHECK-DAG: #[[SUBVIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>
// CHECK-DAG: #[[SUBVIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * 128 + s0 + d1 * 28 + d2 * 11)>
// CHECK-DAG: #[[SUBVIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s0 + d1 * s1 + d2 * s2 + 79)>
// CHECK-DAG: #[[SUBVIEW_MAP6:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2 * 2)>
// CHECK-DAG: #[[SUBVIEW_MAP7:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 4 + s0 + d1)>
// CHECK-DAG: #[[SUBVIEW_MAP8:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1 + 12)>
// CHECK-DAG: #[[$BASE_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>
// CHECK-DAG: #[[$SUBVIEW_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * 64 + s0 + d1 * 4 + d2)>
// CHECK-DAG: #[[$SUBVIEW_MAP1:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 79)>
// CHECK-DAG: #[[$SUBVIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 128 + d1 * 28 + d2 * 11)>
// CHECK-DAG: #[[$SUBVIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)>
// CHECK-DAG: #[[$SUBVIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * 128 + s0 + d1 * 28 + d2 * 11)>
// CHECK-DAG: #[[$SUBVIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s0 + d1 * s1 + d2 * s2 + 79)>
// CHECK-DAG: #[[$SUBVIEW_MAP6:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2 * 2)>
// CHECK-DAG: #[[$SUBVIEW_MAP7:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 4 + s0 + d1)>
// CHECK-DAG: #[[$SUBVIEW_MAP8:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1 + 12)>


// CHECK-LABEL: func @subview
Expand Down Expand Up @@ -731,17 +731,17 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// Note that the subview uses the base memrefs layout map because it used
// zero offset and unit stride arguments.
// CHECK: subview %[[ALLOC0]][0, 0, 0] [7, 11, 2] [1, 1, 1] :
// CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]>
// CHECK-SAME: to memref<7x11x2xf32, #[[BASE_MAP0]]>
// CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]>
// CHECK-SAME: to memref<7x11x2xf32, #[[$BASE_MAP0]]>
%1 = subview %0[%c0, %c0, %c0] [%c7, %c11, %c2] [%c1, %c1, %c1]
: memref<8x16x4xf32, offset : 0, strides : [64, 4, 1]> to
memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
%v0 = load %1[%c0, %c0, %c0] : memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>

// Test: subview with one dynamic operand can also be folded.
// CHECK: subview %[[ALLOC0]][0, %[[ARG0]], 0] [7, 11, 15] [1, 1, 1] :
// CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]>
// CHECK-SAME: to memref<7x11x15xf32, #[[SUBVIEW_MAP0]]>
// CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]>
// CHECK-SAME: to memref<7x11x15xf32, #[[$SUBVIEW_MAP0]]>
%2 = subview %0[%c0, %arg0, %c0] [%c7, %c11, %c15] [%c1, %c1, %c1]
: memref<8x16x4xf32, offset : 0, strides : [64, 4, 1]> to
memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
Expand All @@ -751,35 +751,35 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
%3 = alloc(%arg0) : memref<?x16x4xf32, offset : 0, strides : [64, 4, 1]>
// Test: subview with constant operands but dynamic base memref is folded as long as the strides and offset of the base memref are static.
// CHECK: subview %[[ALLOC1]][0, 0, 0] [7, 11, 15] [1, 1, 1] :
// CHECK-SAME: memref<?x16x4xf32, #[[BASE_MAP0]]>
// CHECK-SAME: to memref<7x11x15xf32, #[[BASE_MAP0]]>
// CHECK-SAME: memref<?x16x4xf32, #[[$BASE_MAP0]]>
// CHECK-SAME: to memref<7x11x15xf32, #[[$BASE_MAP0]]>
%4 = subview %3[%c0, %c0, %c0] [%c7, %c11, %c15] [%c1, %c1, %c1]
: memref<?x16x4xf32, offset : 0, strides : [64, 4, 1]> to
memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
store %v0, %4[%c0, %c0, %c0] : memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>

// Test: subview offset operands are folded correctly w.r.t. base strides.
// CHECK: subview %[[ALLOC0]][1, 2, 7] [7, 11, 2] [1, 1, 1] :
// CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]> to
// CHECK-SAME: memref<7x11x2xf32, #[[SUBVIEW_MAP1]]>
// CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]> to
// CHECK-SAME: memref<7x11x2xf32, #[[$SUBVIEW_MAP1]]>
%5 = subview %0[%c1, %c2, %c7] [%c7, %c11, %c2] [%c1, %c1, %c1]
: memref<8x16x4xf32, offset : 0, strides : [64, 4, 1]> to
memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
store %v0, %5[%c0, %c0, %c0] : memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>

// Test: subview stride operands are folded correctly w.r.t. base strides.
// CHECK: subview %[[ALLOC0]][0, 0, 0] [7, 11, 2] [2, 7, 11] :
// CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]>
// CHECK-SAME: to memref<7x11x2xf32, #[[SUBVIEW_MAP2]]>
// CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]>
// CHECK-SAME: to memref<7x11x2xf32, #[[$SUBVIEW_MAP2]]>
%6 = subview %0[%c0, %c0, %c0] [%c7, %c11, %c2] [%c2, %c7, %c11]
: memref<8x16x4xf32, offset : 0, strides : [64, 4, 1]> to
memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
store %v0, %6[%c0, %c0, %c0] : memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>

// Test: subview shape are folded, but offsets and strides are not even if base memref is static
// CHECK: subview %[[ALLOC0]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [7, 11, 2] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] :
// CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]> to
// CHECK-SAME: memref<7x11x2xf32, #[[SUBVIEW_MAP3]]>
// CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]> to
// CHECK-SAME: memref<7x11x2xf32, #[[$SUBVIEW_MAP3]]>
%10 = subview %0[%arg0, %arg0, %arg0] [%c7, %c11, %c2] [%arg1, %arg1, %arg1] :
memref<8x16x4xf32, offset:0, strides:[64, 4, 1]> to
memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
Expand All @@ -788,8 +788,8 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {

// Test: subview strides are folded, but offsets and shape are not even if base memref is static
// CHECK: subview %[[ALLOC0]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] [2, 7, 11] :
// CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]> to
// CHECK-SAME: memref<?x?x?xf32, #[[SUBVIEW_MAP4]]
// CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]> to
// CHECK-SAME: memref<?x?x?xf32, #[[$SUBVIEW_MAP4]]
%11 = subview %0[%arg0, %arg0, %arg0] [%arg1, %arg1, %arg1] [%c2, %c7, %c11] :
memref<8x16x4xf32, offset:0, strides:[64, 4, 1]> to
memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
Expand All @@ -798,8 +798,8 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {

// Test: subview offsets are folded, but strides and shape are not even if base memref is static
// CHECK: subview %[[ALLOC0]][1, 2, 7] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] [%[[ARG0]], %[[ARG0]], %[[ARG0]]] :
// CHECK-SAME: memref<8x16x4xf32, #[[BASE_MAP0]]> to
// CHECK-SAME: memref<?x?x?xf32, #[[SUBVIEW_MAP5]]
// CHECK-SAME: memref<8x16x4xf32, #[[$BASE_MAP0]]> to
// CHECK-SAME: memref<?x?x?xf32, #[[$SUBVIEW_MAP5]]
%13 = subview %0[%c1, %c2, %c7] [%arg1, %arg1, %arg1] [%arg0, %arg0, %arg0] :
memref<8x16x4xf32, offset:0, strides:[64, 4, 1]> to
memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
Expand All @@ -811,7 +811,7 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// Test: subview shape are folded, even if base memref is not static
// CHECK: subview %[[ALLOC2]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [7, 11, 2] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] :
// CHECK-SAME: memref<?x?x?xf32> to
// CHECK-SAME: memref<7x11x2xf32, #[[SUBVIEW_MAP3]]>
// CHECK-SAME: memref<7x11x2xf32, #[[$SUBVIEW_MAP3]]>
%15 = subview %14[%arg0, %arg0, %arg0] [%c7, %c11, %c2] [%arg1, %arg1, %arg1] :
memref<?x?x?xf32> to
memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
Expand All @@ -820,7 +820,7 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// TEST: subview strides are folded, in the type only the most minor stride is folded.
// CHECK: subview %[[ALLOC2]][%[[ARG0]], %[[ARG0]], %[[ARG0]]] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] [2, 2, 2] :
// CHECK-SAME: memref<?x?x?xf32> to
// CHECK-SAME: memref<?x?x?xf32, #[[SUBVIEW_MAP6]]
// CHECK-SAME: memref<?x?x?xf32, #[[$SUBVIEW_MAP6]]
%16 = subview %14[%arg0, %arg0, %arg0] [%arg1, %arg1, %arg1] [%c2, %c2, %c2] :
memref<?x?x?xf32> to
memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
Expand All @@ -829,7 +829,7 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// TEST: subview offsets are folded but the type offset remains dynamic, when the base memref is not static
// CHECK: subview %[[ALLOC2]][1, 1, 1] [%[[ARG0]], %[[ARG0]], %[[ARG0]]] [%[[ARG1]], %[[ARG1]], %[[ARG1]]] :
// CHECK-SAME: memref<?x?x?xf32> to
// CHECK-SAME: memref<?x?x?xf32, #[[SUBVIEW_MAP3]]
// CHECK-SAME: memref<?x?x?xf32, #[[$SUBVIEW_MAP3]]
%17 = subview %14[%c1, %c1, %c1] [%arg0, %arg0, %arg0] [%arg1, %arg1, %arg1] :
memref<?x?x?xf32> to
memref<?x?x?xf32, offset: ?, strides: [?, ?, ?]>
Expand All @@ -842,7 +842,7 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// TEST: subview strides are maintained when sizes are folded
// CHECK: subview %[[ALLOC3]][%arg1, %arg1] [2, 4] [1, 1] :
// CHECK-SAME: memref<12x4xf32> to
// CHECK-SAME: memref<2x4xf32, #[[SUBVIEW_MAP7]]>
// CHECK-SAME: memref<2x4xf32, #[[$SUBVIEW_MAP7]]>
%19 = subview %18[%arg1, %arg1] [%c2, %c4] [1, 1] :
memref<12x4xf32> to
memref<?x?xf32, offset: ?, strides:[4, 1]>
Expand All @@ -851,7 +851,7 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
// TEST: subview strides and sizes are maintained when offsets are folded
// CHECK: subview %[[ALLOC3]][2, 4] [12, 4] [1, 1] :
// CHECK-SAME: memref<12x4xf32> to
// CHECK-SAME: memref<12x4xf32, #[[SUBVIEW_MAP8]]>
// CHECK-SAME: memref<12x4xf32, #[[$SUBVIEW_MAP8]]>
%20 = subview %18[%c2, %c4] [12, 4] [1, 1] :
memref<12x4xf32> to
memref<12x4xf32, offset: ?, strides:[4, 1]>
Expand Down Expand Up @@ -960,8 +960,8 @@ func @memref_cast_folding_subview(%arg0: memref<4x5xf32>, %i: index) -> (memref<

// -----

// CHECK-DAG: #[[map0:.*]] = affine_map<(d0, d1) -> (d0 * 16 + d1)>
// CHECK-DAG: #[[map1:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>
// CHECK-DAG: #[[$map0:.*]] = affine_map<(d0, d1) -> (d0 * 16 + d1)>
// CHECK-DAG: #[[$map1:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)>

// CHECK-LABEL: func @memref_cast_folding_subview_static(
func @memref_cast_folding_subview_static(%V: memref<16x16xf32>, %a: index, %b: index)
Expand All @@ -970,8 +970,8 @@ func @memref_cast_folding_subview_static(%V: memref<16x16xf32>, %a: index, %b: i
%0 = memref_cast %V : memref<16x16xf32> to memref<?x?xf32>
%1 = subview %0[0, 0][3, 4][1, 1] : memref<?x?xf32> to memref<3x4xf32, offset:?, strides:[?, 1]>

// CHECK: subview{{.*}}: memref<16x16xf32> to memref<3x4xf32, #[[map0]]>
// CHECK: memref_cast{{.*}}: memref<3x4xf32, #[[map0]]> to memref<3x4xf32, #[[map1]]>
// CHECK: subview{{.*}}: memref<16x16xf32> to memref<3x4xf32, #[[$map0]]>
// CHECK: memref_cast{{.*}}: memref<3x4xf32, #[[$map0]]> to memref<3x4xf32, #[[$map1]]>
return %1: memref<3x4xf32, offset:?, strides:[?, 1]>
}

Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Transforms/location-snapshot.mlir
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
// RUN: mlir-opt -allow-unregistered-dialect -snapshot-op-locations='filename=%/t' -mlir-print-debuginfo %s | FileCheck %s -DFILE=%/t
// RUN: mlir-opt -allow-unregistered-dialect -snapshot-op-locations='filename=%/t tag='tagged'' -mlir-print-debuginfo %s | FileCheck %s --check-prefix=TAG -DFILE=%/t

// CHECK-LABEL: func @function
// CHECK: func @function(
// CHECK-NEXT: loc("[[FILE]]":{{[0-9]+}}:{{[0-9]+}})
// CHECK-NEXT: loc("[[FILE]]":{{[0-9]+}}:{{[0-9]+}})
// CHECK-NEXT: } loc("[[FILE]]":{{[0-9]+}}:{{[0-9]+}})

// TAG-LABEL: func @function
// TAG: func @function(
// TAG-NEXT: loc(fused["original", "tagged"("[[FILE]]":{{[0-9]+}}:{{[0-9]+}})])
// TAG-NEXT: loc(fused["original", "tagged"("[[FILE]]":{{[0-9]+}}:{{[0-9]+}})])
// TAG-NEXT: } loc(fused["original", "tagged"("[[FILE]]":{{[0-9]+}}:{{[0-9]+}})])
Expand Down
Loading