Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bump IREE, mlir-aie, mlir-air, mlir-aie wheel #362

Merged
merged 2 commits into from
May 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ jobs:
run: |
python3 -m venv .venv
source .venv/bin/activate
pip install https://github.com/Xilinx/mlir-aie/releases/download/latest-wheels/mlir_aie-0.0.1.2024051421+b085480-py3-none-manylinux_2_35_x86_64.whl
pip install https://github.com/Xilinx/mlir-aie/releases/download/latest-wheels/mlir_aie-0.0.1.2024051921+7fb9fad-py3-none-manylinux_2_35_x86_64.whl
pip install -r tests/matmul/requirements.txt

- name: E2E correctness matmul test
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ func.func @func0() {
// CHECK: %[[ALLOC0:.*]] = memref.alloc() : memref<1x1x8x16xi32, 1>
// CHECK: %[[ALLOC1:.*]] = memref.alloc() : memref<1x1x2x2x4x8xi32, 2>
// CHECK: %[[EXPANDSHAPE0:.*]] = memref.expand_shape %[[ALLOC0]]
// CHECK-SAME{LITERAL}: [[0], [1], [2, 3], [4, 5]] : memref<1x1x8x16xi32, 1> into memref<1x1x2x4x2x8xi32, 1>
// CHECK-SAME: output_shape [1, 1, 2, 4, 2, 8] : memref<1x1x8x16xi32, 1> into memref<1x1x2x4x2x8xi32, 1>
// CHECK: %[[TRANSPOSE0:.*]] = memref.transpose %[[EXPANDSHAPE0]] (d0, d1, d2, d3, d4, d5) -> (d0, d1, d4, d2, d3, d5) : memref<1x1x2x4x2x8xi32, 1> to memref<1x1x2x2x4x8xi32, strided<[128, 128, 8, 64, 16, 1]>, 1>
// CHECK: air.dma_memcpy_nd (%[[ALLOC1]][] [] [], %[[TRANSPOSE0]][] [] []) : (memref<1x1x2x2x4x8xi32, 2>, memref<1x1x2x2x4x8xi32, strided<[128, 128, 8, 64, 16, 1]>, 1>)
func.func @func1() {
Expand Down Expand Up @@ -138,13 +138,13 @@ func.func @func6() {
// CHECK: %[[SUBVIEW6:.*]] = memref.subview %{{.*}}[0, %{{.*}}, 0, 0] [1, 1, 16, 16] [1, 1, 1, 1] : memref<1x1x16x16xi32, 1> to memref<1x1x16x16xi32, strided<[256, 256, 16, 1], offset: ?>, 1>
// CHECK: %[[SUBVIEW7:.*]] = memref.subview %{{.*}}[%{{.*}}, %{{.*}}, 0, 0] [1, 1, 8, 16] [1, 1, 1, 1] : memref<1x1x8x16xi32, 1> to memref<1x1x8x16xi32, strided<[128, 128, 16, 1], offset: ?>, 1>
// CHECK: %[[EXPANDSHAPE0:.*]] = memref.expand_shape %[[SUBVIEW5]]
// CHECK-SAME{LITERAL}: [[0], [1], [2, 3], [4, 5]] : memref<1x1x8x16xi32, strided<[128, 128, 16, 1], offset: ?>, 1> into memref<1x1x2x4x2x8xi32, strided<[128, 128, 64, 16, 8, 1], offset: ?>, 1>
// CHECK-SAME: output_shape [1, 1, 2, 4, 2, 8] : memref<1x1x8x16xi32, strided<[128, 128, 16, 1], offset: ?>, 1> into memref<1x1x2x4x2x8xi32, strided<[128, 128, 64, 16, 8, 1], offset: ?>, 1>
// CHECK: %[[TRANSPOSE2:.*]] = memref.transpose %[[EXPANDSHAPE0]] (d0, d1, d2, d3, d4, d5) -> (d0, d1, d4, d2, d3, d5) : memref<1x1x2x4x2x8xi32, strided<[128, 128, 64, 16, 8, 1], offset: ?>, 1> to memref<1x1x2x2x4x8xi32, strided<[128, 128, 8, 64, 16, 1], offset: ?>, 1>
// CHECK: air.dma_memcpy_nd (%{{.*}}[] [] [], %[[TRANSPOSE2]][] [] []) : (memref<1x1x2x2x4x8xi32, 2>, memref<1x1x2x2x4x8xi32, strided<[128, 128, 8, 64, 16, 1], offset: ?>, 1>)
iree_linalg_ext.pack %subview_4 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %alloc_7 : (memref<1x1x8x16xi32, strided<[128, 128, 16, 1], offset: ?>, 1> memref<1x1x2x2x4x8xi32, 2>)
%alloc_8 = memref.alloc() : memref<1x1x2x2x8x8xi32, 2>
// CHECK: %[[EXPANDSHAPE1:.*]] = memref.expand_shape %[[SUBVIEW6]]
// CHECK-SAME{LITERAL}: [[0], [1], [2, 3], [4, 5]] : memref<1x1x16x16xi32, strided<[256, 256, 16, 1], offset: ?>, 1> into memref<1x1x2x8x2x8xi32, strided<[256, 256, 128, 16, 8, 1], offset: ?>, 1>
// CHECK-SAME: output_shape [1, 1, 2, 8, 2, 8] : memref<1x1x16x16xi32, strided<[256, 256, 16, 1], offset: ?>, 1> into memref<1x1x2x8x2x8xi32, strided<[256, 256, 128, 16, 8, 1], offset: ?>, 1>
// CHECK: %[[TRANSPOSE3:.*]] = memref.transpose %[[EXPANDSHAPE1]] (d0, d1, d2, d3, d4, d5) -> (d0, d1, d4, d2, d3, d5) : memref<1x1x2x8x2x8xi32, strided<[256, 256, 128, 16, 8, 1], offset: ?>, 1> to memref<1x1x2x2x8x8xi32, strided<[256, 256, 8, 128, 16, 1], offset: ?>, 1>
// CHECK: air.dma_memcpy_nd (%{{.*}}[] [] [], %[[TRANSPOSE3]][] [] []) : (memref<1x1x2x2x8x8xi32, 2>, memref<1x1x2x2x8x8xi32, strided<[256, 256, 8, 128, 16, 1], offset: ?>, 1>)
iree_linalg_ext.pack %subview_5 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [8, 8] into %alloc_8 : (memref<1x1x16x16xi32, strided<[256, 256, 16, 1], offset: ?>, 1> memref<1x1x2x2x8x8xi32, 2>)
Expand Down
2 changes: 1 addition & 1 deletion sync_deps.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
### Update with: shark-workspace pin

PINNED_VERSIONS = {
"iree": "4c9cb3c336678fd8a6243da051dcf5ec617cc928",
"iree": "080b1fa520a73f835db37049a39a735d6b175b7b",
}

ORIGINS = {
Expand Down
2 changes: 1 addition & 1 deletion tests/transform_dialect/conv_fill_spec_pad.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ module attributes { transform.with_named_sequence } {
// N H W C K h w ===> 3 loops for K, h, w are inserted.

%tiled_reduction, %loop0, %loop1, %loop2 =
transform.structured.tile_using_for %padded_1 [0,0,0,0,8,1,1]
transform.structured.tile_using_for %padded_1 tile_sizes [0,0,0,0,8,1,1]
: (!any) -> (!any, !any, !any, !any)

transform.include @replace_conv2d_with_conv1d failures(propagate)
Expand Down
2 changes: 1 addition & 1 deletion tests/transform_dialect/matmul_fill_spec_pack_funcIR.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ module attributes { transform.with_named_sequence } {

// Tile reduction dimension.
%tiled_reduction, %loop =
transform.structured.tile_using_for %tiled_matmul [0, 0, 64]
transform.structured.tile_using_for %tiled_matmul tile_sizes [0, 0, 64]
: (!transform.any_op) -> (!transform.any_op, !transform.op<"scf.for">)

// Pack by applying data tiling, and the linalg.matmul becomes linalg.generic.
Expand Down
4 changes: 2 additions & 2 deletions tests/transform_dialect/matmul_fill_spec_pack_peel.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ module attributes { transform.with_named_sequence } {

// First level for loop.
%first_level_tiled_reduction_matmul, %outer_for_loop =
transform.structured.tile_using_for %l1_packed_matmul [0, 0, 1]
transform.structured.tile_using_for %l1_packed_matmul tile_sizes [0, 0, 1]
: (!transform.any_op) -> (!transform.any_op, !transform.any_op)

// Fuse the pack operations in the outer for loop.
Expand Down Expand Up @@ -134,7 +134,7 @@ module attributes { transform.with_named_sequence } {
// Second level for loop.
%generic_op1 = transform.structured.match ops{["linalg.generic"]} in %variant_op : (!transform.any_op) -> !transform.any_op
%second_level_tiled_reduction_matmul, %inner_for_loop =
transform.structured.tile_using_for %generic_op1 [0, 0, 0, 0, 0, 4]
transform.structured.tile_using_for %generic_op1 tile_sizes [0, 0, 0, 0, 0, 4]
: (!transform.any_op) -> (!transform.any_op, !transform.any_op)

// Fuse the pack operations in inner for loop.
Expand Down
2 changes: 1 addition & 1 deletion tests/transform_dialect/matmul_fill_spec_pad.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ module attributes { transform.with_named_sequence } {

// Tile reduction dimension.
%tiled_reduction, %loop =
transform.structured.tile_using_for %padded_1 [0, 0, 4]
transform.structured.tile_using_for %padded_1 tile_sizes [0, 0, 4]
: (!transform.any_op) -> (!transform.any_op, !transform.any_op)

// Clean up.
Expand Down
6 changes: 3 additions & 3 deletions tests/transform_dialect/matmul_fill_spec_pad_pack.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,10 @@ module attributes { transform.with_named_sequence } {
%copy_1 = transform.get_producer_of_operand %padded[0] : (!transform.any_op) -> (!transform.any_op)
%copy_2 = transform.get_producer_of_operand %padded[1] : (!transform.any_op) -> (!transform.any_op)
%tiled_copy_1, %tiled_copy_for_loop_1 =
transform.structured.tile_using_for %copy_1 [0, 256]
transform.structured.tile_using_for %copy_1 tile_sizes [0, 256]
: (!transform.any_op) -> (!transform.any_op, !transform.op<"scf.for">)
%tiled_copy_2, %tiled_copy_for_loop_2 =
transform.structured.tile_using_for %copy_2 [256, 0]
transform.structured.tile_using_for %copy_2 tile_sizes [256, 0]
: (!transform.any_op) -> (!transform.any_op, !transform.op<"scf.for">)

// Second level tile to forall with tile_sizes.
Expand Down Expand Up @@ -124,7 +124,7 @@ module attributes { transform.with_named_sequence } {

// Tile the reduction loop.
%tiled_reduction, %for_loop =
transform.structured.tile_using_for %packed_c [0, 0, 4]
transform.structured.tile_using_for %packed_c tile_sizes [0, 0, 4]
: (!transform.any_op) -> (!transform.any_op, !transform.any_op)

// Fuse pack ops into the for loop.
Expand Down
4 changes: 2 additions & 2 deletions tests/transform_dialect/matmul_fill_spec_pad_pack_peel.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ module attributes { transform.with_named_sequence } {

// First level for loop.
%tiled_reduction, %for_loop =
transform.structured.tile_using_for %tiled_matmul [0, 0, 256]
transform.structured.tile_using_for %tiled_matmul tile_sizes [0, 0, 256]
: (!transform.any_op) -> (!transform.any_op, !transform.op<"scf.for">)

// Pad operation.
Expand Down Expand Up @@ -117,7 +117,7 @@ module attributes { transform.with_named_sequence } {

// Second level for loop.
%tiled_reduction_1, %for_loop_1 =
transform.structured.tile_using_for %packed_c [0, 0, 4]
transform.structured.tile_using_for %packed_c tile_sizes [0, 0, 4]
: (!transform.any_op) -> (!transform.any_op, !transform.any_op)

// Fuse pack ops into the for loop.
Expand Down
2 changes: 1 addition & 1 deletion tests/transform_dialect/matmul_fill_spec_simple_pack.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ module attributes { transform.with_named_sequence } {

// Tile reduction dimension.
%tiled_reduction, %loop =
transform.structured.tile_using_for %packed_c[0, 0, 0, 0, 0, 4]
transform.structured.tile_using_for %packed_c tile_sizes [0, 0, 0, 0, 0, 4]
: (!transform.any_op) -> (!transform.any_op, !transform.any_op)

// Find the for op and fuse the pack ops into the loop.
Expand Down
Loading