Skip to content

Commit

Permalink
#8364: Split tests requiring fast runtime mode off
Browse files Browse the repository at this point in the history
  • Loading branch information
ayerofieiev-tt committed May 23, 2024
1 parent f0f9d20 commit 8090144
Show file tree
Hide file tree
Showing 11 changed files with 44 additions and 10 deletions.
17 changes: 11 additions & 6 deletions .github/workflows/ttnn-post-commit.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,17 @@ jobs:
# # N300
{arch: wormhole_b0, runs-on: ["wormhole_b0", "multi-chip-num-pcie-1", "multi-chip-num-chips-2"], name: N300},
]
test-group: [
{name: ttnn group 1, cmd: pytest $TT_METAL_HOME/tests/ttnn/unit_tests -v --splits 2 --group 1},
{name: ttnn group 2, cmd: pytest $TT_METAL_HOME/tests/ttnn/unit_tests -v --splits 2 --group 2},
{name: ttnn cpp tests, cmd: ./build/test/ttnn/unit_tests_ttnn},

]
test-group:
- name: ttnn group 1
cmd: pytest $TT_METAL_HOME/tests/ttnn/unit_tests -v --splits 2 --group 1 -m "not disable_fast_runtime_mode"
- name: ttnn group 2
cmd: pytest $TT_METAL_HOME/tests/ttnn/unit_tests -v --splits 2 --group 2 -m "not disable_fast_runtime_mode"
- name: ttnn group 3
cmd: pytest $TT_METAL_HOME/tests/ttnn/unit_tests -m requires_fast_runtime_mode_off
env:
TTNN_CONFIG_OVERRIDES: '{"enable_fast_runtime_mode": false}'
- name: ttnn cpp tests
cmd: ./build/test/ttnn/unit_tests_ttnn
name: ${{ matrix.test-group.name }} ${{ matrix.runner-info.arch }} ${{ matrix.runner-info.name }}
env:
TT_METAL_ENV: ${{ vars.TT_METAL_ENV }}
Expand Down
1 change: 1 addition & 0 deletions pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,4 @@ markers =
models_performance_virtual_machine: mark model silicon tests for performance on virtual_machine
models_device_performance_bare_metal: mark model silicon tests for device performance on bare metal
model_perf_t3000: mark model silicon tests for performance on t3000 bare metal
requires_fast_runtime_mode_off
14 changes: 14 additions & 0 deletions tests/ttnn/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

# SPDX-License-Identifier: Apache-2.0

import os
import copy
import datetime
import json
Expand All @@ -24,6 +25,19 @@ def pytest_make_parametrize_id(config, val, argname):
return f"{argname}={val}"


def pytest_collection_modifyitems(config, items):
if not ttnn.CONFIG.enable_fast_runtime_mode:
return

logger.warning("Fast Runtime Mode is ON. Skipping tests tagged with @pytest.mark.requires_fast_runtime_mode_off")
skip_unmarked = pytest.mark.skip(reason="Skipping test with requires_fast_runtime_mode_off")
for item in items:
logger.warning(item.keywords)
if "requires_fast_runtime_mode_off" in item.keywords:
logger.warning(f"Skipping {item}")
item.add_marker(skip_unmarked)


@pytest.fixture(autouse=True)
def pre_and_post(request):
original_config = copy.copy(ttnn.CONFIG)
Expand Down
2 changes: 2 additions & 0 deletions tests/ttnn/unit_tests/operations/test_experimental.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@


@skip_for_wormhole_b0()
@pytest.mark.requires_fast_runtime_mode_off
@pytest.mark.parametrize("height", [32])
@pytest.mark.parametrize("width", [32])
def test_ttnn_experimental_tensor_exp(device, height, width):
Expand Down Expand Up @@ -69,6 +70,7 @@ def test_ttnn_experimental_operations_primary_moreh_matmul(device, m_size, k_siz
assert_with_pcc(torch_output_tensor, output_tensor)


@pytest.mark.requires_fast_runtime_mode_off
@pytest.mark.parametrize("input_a_is_sharded", [True, False])
@pytest.mark.parametrize("output_is_sharded", [True, False])
@pytest.mark.parametrize("m_size, num_cores", [[25088, 98]])
Expand Down
9 changes: 5 additions & 4 deletions tests/ttnn/unit_tests/operations/test_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -496,10 +496,11 @@ def test_concatenate_heads_when_head_size_is_not_a_multiple_of_32(device):

with pytest.raises(RuntimeError) as e:
output_tensor = ttnn.transformer.concatenate_heads(input_tensor)
assert (
"Head size must be a multiple of 32! Update matmul that uses the output of this operation to have the padding in the weights!"
in str(e.value)
)

assert (
"Head size must be a multiple of 32! Update matmul that uses the output of this operation to have the padding in the weights!"
in str(e.value)
)

input_tensor = torch.nn.functional.pad(torch_input_tensor, (0, padded_head_size - head_size), "constant", 0)
input_tensor = ttnn.from_torch(
Expand Down
1 change: 1 addition & 0 deletions tests/ttnn/unit_tests/test_deallocate.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

@pytest.mark.parametrize("h", [32])
@pytest.mark.parametrize("w", [2 * 32])
@pytest.mark.requires_fast_runtime_mode_off
def test_deallocate(device, h, w):
torch_input_tensor = torch.rand((h, w), dtype=torch.bfloat16)

Expand Down
1 change: 1 addition & 0 deletions tests/ttnn/unit_tests/test_pre_and_post_operation_hook.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ def post_hook_to_print_output(operation, args, kwargs, output):


@skip_for_wormhole_b0()
@pytest.mark.requires_fast_runtime_mode_off
@pytest.mark.parametrize("batch_size", [1])
@pytest.mark.parametrize("h", [32])
@pytest.mark.parametrize("w", [32])
Expand Down
5 changes: 5 additions & 0 deletions tests/ttnn/unit_tests/test_reports.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import ttnn.database


@pytest.mark.requires_fast_runtime_mode_off
@pytest.mark.parametrize("height", [1024])
@pytest.mark.parametrize("width", [1024])
def test_enable_logging(device, height, width):
Expand Down Expand Up @@ -45,6 +46,7 @@ def test_enable_logging(device, height, width):
assert len(operations) == 5


@pytest.mark.requires_fast_runtime_mode_off
@pytest.mark.parametrize("height", [1024])
@pytest.mark.parametrize("width", [1024])
def test_enable_logging_and_enable_graph_report(device, height, width):
Expand All @@ -67,6 +69,7 @@ def test_enable_logging_and_enable_graph_report(device, height, width):
ttnn.to_torch(output_tensor)


@pytest.mark.requires_fast_runtime_mode_off
@pytest.mark.parametrize("height", [1024])
@pytest.mark.parametrize("width", [1024])
def test_enable_logging_and_enable_detailed_buffer_report(device, height, width):
Expand Down Expand Up @@ -107,6 +110,7 @@ def test_enable_logging_and_enable_detailed_buffer_report(device, height, width)
assert len(buffer_pages) > 0


@pytest.mark.requires_fast_runtime_mode_off
@pytest.mark.parametrize("height", [1024])
@pytest.mark.parametrize("width", [1024])
def test_enable_logging_and_enable_comparison_mode(device, height, width):
Expand Down Expand Up @@ -139,6 +143,7 @@ def test_enable_logging_and_enable_comparison_mode(device, height, width):
assert len(operations) > 0


@pytest.mark.requires_fast_runtime_mode_off
@pytest.mark.parametrize("height", [1024])
@pytest.mark.parametrize("width", [1024])
def test_enable_logging_and_enable_detailed_tensor_report(device, height, width):
Expand Down
2 changes: 2 additions & 0 deletions tests/ttnn/unit_tests/test_tracer.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ def test_exp():


@skip_for_wormhole_b0()
@pytest.mark.requires_fast_runtime_mode_off
def test_reshape():
with trace():
tensor = torch.randint(0, 100, (4, 64))
Expand All @@ -40,6 +41,7 @@ def test_reshape():


@skip_for_wormhole_b0()
@pytest.mark.requires_fast_runtime_mode_off
@pytest.mark.parametrize("show_modules", [True, False])
def test_torch_bert(show_modules):
model_name = "google/bert_uncased_L-4_H-256_A-4"
Expand Down
1 change: 1 addition & 0 deletions tests/ttnn/unit_tests/test_tutorials.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ def collect_tutorials():


@skip_for_wormhole_b0()
@pytest.mark.requires_fast_runtime_mode_off
@pytest.mark.parametrize("notebook_path", collect_tutorials())
def test_tutorials(notebook_path):
with open(notebook_path) as f:
Expand Down
1 change: 1 addition & 0 deletions tests/ttnn/unit_tests/test_validate_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@


@skip_for_wormhole_b0()
@pytest.mark.requires_fast_runtime_mode_off
@pytest.mark.parametrize("batch_size", [1])
@pytest.mark.parametrize("h", [32])
@pytest.mark.parametrize("w", [32])
Expand Down

0 comments on commit 8090144

Please sign in to comment.