Skip to content

Commit

Permalink
Rework pipeline tests (#19366)
Browse files Browse the repository at this point in the history
* Rework pipeline tests

* Try to fix Flax tests

* Try to put it before

* Use a new decorator instead

* Remove ignore marker since it doesn't work

* Filter pipeline tests

* Woopsie

* Use the fitlered list

* Clean up and fake modif

* Remove init

* Revert fake modif
  • Loading branch information
sgugger committed Oct 7, 2022
1 parent 983451a commit 9ac586b
Show file tree
Hide file tree
Showing 27 changed files with 95 additions and 149 deletions.
52 changes: 31 additions & 21 deletions .circleci/config.yml
Expand Up @@ -79,10 +79,19 @@ jobs:
path: ~/transformers/tests_fetched_summary.txt
- run: |
if [ -f test_list.txt ]; then
mv test_list.txt test_preparation/test_list.txt
cp test_list.txt test_preparation/test_list.txt
else
touch test_preparation/test_list.txt
fi
- run: python utils/tests_fetcher.py --filter_pipeline_tests
- run: |
if [ -f test_list.txt ]; then
mv test_list.txt test_preparation/filtered_test_list.txt
else
touch test_preparation/filtered_test_list.txt
fi
- store_artifacts:
path: ~/transformers/test_preparation/filtered_test_list.txt
- run: python utils/tests_fetcher.py --filters tests examples | tee examples_tests_fetched_summary.txt
- store_artifacts:
path: ~/transformers/examples_tests_fetched_summary.txt
Expand All @@ -97,6 +106,7 @@ jobs:
root: test_preparation/
paths:
test_list.txt
filtered_test_list.txt
examples_test_list.txt

# To run all tests for the nightly build
Expand All @@ -110,6 +120,8 @@ jobs:
mkdir test_preparation
echo "tests" > test_preparation/test_list.txt
echo "tests" > test_preparation/examples_test_list.txt
- run: python utils/tests_fetcher.py --filter_pipeline_tests
- run: mv test_list.txt test_preparation/filtered_test_list.txt

- persist_to_workspace:
root: test_preparation/
Expand All @@ -132,7 +144,7 @@ jobs:
- attach_workspace:
at: ~/transformers/test_preparation
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
if [ ! -s test_preparation/filtered_test_list.txt ]; then
echo "No tests to run, exiting early!"
circleci-agent step halt
fi
Expand All @@ -152,7 +164,7 @@ jobs:
key: v0.5-torch_and_tf-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf $(cat test_preparation/test_list.txt) -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf $(cat test_preparation/filtered_test_list.txt) -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
- store_artifacts:
Expand All @@ -174,7 +186,7 @@ jobs:
- attach_workspace:
at: ~/transformers/test_preparation
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
if [ ! -s test_preparation/filtered_test_list.txt ]; then
echo "No tests to run, exiting early!"
circleci-agent step halt
fi
Expand All @@ -192,7 +204,7 @@ jobs:
key: v0.5-torch_and_flax-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax $(cat test_preparation/test_list.txt) -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax $(cat test_preparation/filtered_test_list.txt) -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
- store_artifacts:
Expand All @@ -213,7 +225,7 @@ jobs:
- attach_workspace:
at: ~/transformers/test_preparation
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
if [ ! -s test_preparation/filtered_test_list.txt ]; then
echo "No tests to run, exiting early!"
circleci-agent step halt
fi
Expand All @@ -231,7 +243,7 @@ jobs:
key: v0.5-torch-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 3 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_torch $(cat test_preparation/test_list.txt) | tee tests_output.txt
- run: python -m pytest -n 3 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_torch $(cat test_preparation/filtered_test_list.txt) | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
- store_artifacts:
Expand All @@ -252,7 +264,7 @@ jobs:
- attach_workspace:
at: ~/transformers/test_preparation
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
if [ ! -s test_preparation/filtered_test_list.txt ]; then
echo "No tests to run, exiting early!"
circleci-agent step halt
fi
Expand All @@ -269,7 +281,7 @@ jobs:
key: v0.5-tf-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_tf $(cat test_preparation/test_list.txt) | tee tests_output.txt
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_tf $(cat test_preparation/filtered_test_list.txt) | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
- store_artifacts:
Expand All @@ -290,7 +302,7 @@ jobs:
- attach_workspace:
at: ~/transformers/test_preparation
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
if [ ! -s test_preparation/filtered_test_list.txt ]; then
echo "No tests to run, exiting early!"
circleci-agent step halt
fi
Expand All @@ -306,7 +318,7 @@ jobs:
key: v0.5-flax-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_flax $(cat test_preparation/test_list.txt) | tee tests_output.txt
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_flax $(cat test_preparation/filtered_test_list.txt) | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
- store_artifacts:
Expand All @@ -318,7 +330,6 @@ jobs:
- image: cimg/python:3.7.12
environment:
OMP_NUM_THREADS: 1
RUN_PIPELINE_TESTS: yes
TRANSFORMERS_IS_CI: yes
PYTEST_TIMEOUT: 120
resource_class: xlarge
Expand All @@ -345,7 +356,7 @@ jobs:
key: v0.5-torch-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch -m is_pipeline_test $(cat test_preparation/test_list.txt) | tee tests_output.txt
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch tests/pipelines | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
- store_artifacts:
Expand All @@ -357,7 +368,6 @@ jobs:
- image: cimg/python:3.7.12
environment:
OMP_NUM_THREADS: 1
RUN_PIPELINE_TESTS: yes
TRANSFORMERS_IS_CI: yes
PYTEST_TIMEOUT: 120
resource_class: xlarge
Expand All @@ -382,7 +392,7 @@ jobs:
key: v0.5-tf-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf $(cat test_preparation/test_list.txt) -m is_pipeline_test | tee tests_output.txt
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf tests/pipelines | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
- store_artifacts:
Expand All @@ -401,7 +411,7 @@ jobs:
- attach_workspace:
at: ~/transformers/test_preparation
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
if [ ! -s test_preparation/filtered_test_list.txt ]; then
echo "No tests to run, exiting early!"
circleci-agent step halt
fi
Expand Down Expand Up @@ -557,7 +567,7 @@ jobs:
- attach_workspace:
at: ~/transformers/test_preparation
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
if [ ! -s test_preparation/filtered_test_list.txt ]; then
echo "No tests to run, exiting early!"
circleci-agent step halt
fi
Expand All @@ -575,7 +585,7 @@ jobs:
key: v0.5-hub-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest --max-worker-restart=0 -sv --make-reports=tests_hub $(cat test_preparation/test_list.txt) -m is_staging_test | tee tests_output.txt
- run: python -m pytest --max-worker-restart=0 -sv --make-reports=tests_hub $(cat test_preparation/filtered_test_list.txt) -m is_staging_test | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
- store_artifacts:
Expand All @@ -596,7 +606,7 @@ jobs:
- attach_workspace:
at: ~/transformers/test_preparation
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
if [ ! -s test_preparation/filtered_test_list.txt ]; then
echo "No tests to run, exiting early!"
circleci-agent step halt
fi
Expand All @@ -610,7 +620,7 @@ jobs:
key: v0.5-onnx-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_onnx $(cat test_preparation/test_list.txt) -k onnx | tee tests_output.txt
- run: python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_onnx $(cat test_preparation/filtered_test_list.txt) -k onnx | tee tests_output.txt

- store_artifacts:
path: ~/transformers/tests_output.txt
Expand Down Expand Up @@ -690,7 +700,7 @@ jobs:
steps:
- checkout
- attach_workspace:
at: ~/transformers/test_preparation
at: ~/transformers/filtered_test_list.txt
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
echo "No tests to run, exiting early!"
Expand Down
8 changes: 2 additions & 6 deletions .github/workflows/self-scheduled.yml
Expand Up @@ -256,10 +256,8 @@ jobs:
- name: Run all pipeline tests on GPU
working-directory: /transformers
env:
RUN_PIPELINE_TESTS: yes
run: |
python3 -m pytest -n 1 -v --dist=loadfile -m is_pipeline_test --make-reports=${{ matrix.machine_type }}_tests_torch_pipeline_gpu tests
python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_tests_torch_pipeline_gpu tests/pipelines
- name: Failure short reports
if: ${{ failure() }}
Expand Down Expand Up @@ -301,10 +299,8 @@ jobs:
- name: Run all pipeline tests on GPU
working-directory: /transformers
env:
RUN_PIPELINE_TESTS: yes
run: |
python3 -m pytest -n 1 -v --dist=loadfile -m is_pipeline_test --make-reports=${{ matrix.machine_type }}_tests_tf_pipeline_gpu tests
python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_tests_tf_pipeline_gpu tests/pipelines
- name: Failure short reports
if: ${{ always() }}
Expand Down
1 change: 0 additions & 1 deletion conftest.py
Expand Up @@ -32,7 +32,6 @@


def pytest_configure(config):
config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipeline are tested")
config.addinivalue_line(
"markers", "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested"
)
Expand Down
32 changes: 12 additions & 20 deletions src/transformers/testing_utils.py
Expand Up @@ -133,7 +133,6 @@ def parse_int_from_env(key, default=None):
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)

Expand Down Expand Up @@ -176,25 +175,6 @@ def is_pt_flax_cross_test(test_case):
return pytest.mark.is_pt_flax_cross_test()(test_case)


def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)


def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
Expand Down Expand Up @@ -309,6 +289,18 @@ def require_torch(test_case):
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)


def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)


def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.
Expand Down
10 changes: 1 addition & 9 deletions tests/pipelines/test_pipelines_audio_classification.py
Expand Up @@ -18,19 +18,11 @@

from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_torchaudio, slow

from .test_pipelines_common import ANY, PipelineTestCaseMeta


@is_pipeline_test
@require_torch
class AudioClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
Expand Down
Expand Up @@ -31,7 +31,6 @@
from transformers.pipelines.audio_utils import chunk_bytes_iter
from transformers.pipelines.automatic_speech_recognition import chunk_iter
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_pyctcdecode,
Expand All @@ -52,7 +51,6 @@
# from .test_pipelines_common import CustomInputPipelineCommonMixin


@is_pipeline_test
class AutomaticSpeechRecognitionPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = {
k: v
Expand Down

0 comments on commit 9ac586b

Please sign in to comment.