diff --git a/.coveragerc b/.coveragerc index 91f6be48..56cf47fa 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,5 +1,10 @@ [run] branch = true -include = - pytest_bdd/* - tests/* +source_pkgs = pytest_bdd +source = tests + +[paths] +source = + . + .tox/*/lib/*/site-packages/ + .tox\\*\\Lib\\site-packages\\ diff --git a/.gitignore b/.gitignore index 8ddc92e6..89e984ff 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,7 @@ pip-log.txt # Unit test / coverage reports .coverage +.coverage.* .tox nosetests.xml diff --git a/pytest_bdd/__init__.py b/pytest_bdd/__init__.py index 1bf99991..d8f506fc 100644 --- a/pytest_bdd/__init__.py +++ b/pytest_bdd/__init__.py @@ -5,4 +5,4 @@ __version__ = "4.1.0" -__all__ = [given.__name__, when.__name__, then.__name__, scenario.__name__, scenarios.__name__] +__all__ = ["given", "when", "then", "scenario", "scenarios"] diff --git a/pytest_bdd/scenario.py b/pytest_bdd/scenario.py index 5a8729d5..8804b79e 100644 --- a/pytest_bdd/scenario.py +++ b/pytest_bdd/scenario.py @@ -10,6 +10,7 @@ scenario_name="Publishing the article", ) """ +import contextlib import collections import os import re @@ -81,7 +82,7 @@ def _find_step_function(request, step, scenario): ) -def _execute_step_function(request, scenario, step, step_func): +async def _execute_step_function(request, scenario, step, step_func, sync): """Execute step function. :param request: PyTest request. @@ -103,7 +104,10 @@ def _execute_step_function(request, scenario, step, step_func): request.config.hook.pytest_bdd_before_step_call(**kw) target_fixture = getattr(step_func, "target_fixture", None) # Execute the step. - return_value = step_func(**kwargs) + if sync: + return_value = step_func(**kwargs) + else: + return_value = await step_func(**kwargs) if target_fixture: inject_fixture(request, target_fixture, return_value) @@ -113,7 +117,7 @@ def _execute_step_function(request, scenario, step, step_func): raise -def _execute_scenario(feature, scenario, request): +async def _execute_scenario(feature, scenario, request, sync): """Execute the scenario. :param feature: Feature. @@ -133,7 +137,7 @@ def _execute_scenario(feature, scenario, request): request=request, feature=feature, scenario=scenario, step=step, exception=exception ) raise - _execute_step_function(request, scenario, step, step_func) + await _execute_step_function(request, scenario, step, step_func, sync) finally: request.config.hook.pytest_bdd_after_scenario(request=request, feature=feature, scenario=scenario) @@ -141,7 +145,18 @@ def _execute_scenario(feature, scenario, request): FakeRequest = collections.namedtuple("FakeRequest", ["module"]) -def _get_scenario_decorator(feature, feature_name, scenario, scenario_name): +def await_(fn, *args): + v = fn(*args) + with contextlib.closing(v.__await__()) as gen: + try: + gen.send(None) + except StopIteration as e: + return e.value + else: + raise RuntimeError("coro did not stop") + + +def _get_scenario_decorator(feature, feature_name, scenario, scenario_name, *, sync): # HACK: Ideally we would use `def decorator(fn)`, but we want to return a custom exception # when the decorator is misused. # Pytest inspect the signature to determine the required fixtures, and in that case it would look @@ -160,10 +175,19 @@ def decorator(*args): if arg not in function_args: function_args.append(arg) - @pytest.mark.usefixtures(*function_args) - def scenario_wrapper(request): - _execute_scenario(feature, scenario, request) - return fn(*(request.getfixturevalue(arg) for arg in args)) + if sync: + + @pytest.mark.usefixtures(*function_args) + def scenario_wrapper(request): + await_(_execute_scenario, feature, scenario, request, sync) + return fn(*(request.getfixturevalue(arg) for arg in args)) + + else: + + @pytest.mark.usefixtures(*function_args) + async def scenario_wrapper(request): + await _execute_scenario(feature, scenario, request, sync) + return await fn(*(request.getfixturevalue(arg) for arg in args)) for param_set in scenario.get_params(): if param_set: @@ -180,7 +204,7 @@ def scenario_wrapper(request): return decorator -def scenario(feature_name, scenario_name, encoding="utf-8", example_converters=None, features_base_dir=None): +def scenario(feature_name, scenario_name, encoding="utf-8", example_converters=None, features_base_dir=None, sync=True): """Scenario decorator. :param str feature_name: Feature file name. Absolute or relative to the configured feature base path. @@ -213,7 +237,7 @@ def scenario(feature_name, scenario_name, encoding="utf-8", example_converters=N scenario.validate() return _get_scenario_decorator( - feature=feature, feature_name=feature_name, scenario=scenario, scenario_name=scenario_name + feature=feature, feature_name=feature_name, scenario=scenario, scenario_name=scenario_name, sync=sync ) @@ -263,7 +287,7 @@ def get_name(): suffix = f"_{index}" -def scenarios(*feature_paths, **kwargs): +def scenarios(*feature_paths, sync=True, **kwargs): """Parse features from the paths and put all found scenarios in the caller module. :param *feature_paths: feature file paths to use for scenarios @@ -293,9 +317,18 @@ def scenarios(*feature_paths, **kwargs): # skip already bound scenarios if (scenario_object.feature.filename, scenario_name) not in module_scenarios: - @scenario(feature.filename, scenario_name, **kwargs) - def _scenario(): - pass # pragma: no cover + decorator = scenario(feature.filename, scenario_name, sync=sync, **kwargs) + if sync: + + @decorator + def _scenario(): + pass # pragma: no cover + + else: + + @decorator + async def _scenario(): + pass # pragma: no cover for test_name in get_python_name_generator(scenario_name): if test_name not in caller_locals: diff --git a/setup.cfg b/setup.cfg index f65f9aa6..b710dd26 100644 --- a/setup.cfg +++ b/setup.cfg @@ -35,7 +35,7 @@ install_requires = pytest>=4.3 tests_require = tox -packages = pytest_bdd +packages = find: include_package_data = True [options.entry_points] diff --git a/tests/feature/test_async_scenarios.py b/tests/feature/test_async_scenarios.py new file mode 100644 index 00000000..8c2dff4c --- /dev/null +++ b/tests/feature/test_async_scenarios.py @@ -0,0 +1,104 @@ +"""Test scenarios shortcut.""" +import textwrap + +from tests.utils import assert_outcomes + + +def test_scenarios(testdir, pytest_params): + """Test scenarios shortcut (used together with @scenario for individual test override).""" + testdir.makeini( + """ + [pytest] + console_output_style=classic + """ + ) + testdir.makeconftest( + """ + import pytest + from pytest_bdd import given + + import anyio + + @given('I have a bar') + async def i_have_bar(): + await anyio.sleep(0) + print('bar!') + return 'bar' + """ + ) + features = testdir.mkdir("features") + features.join("test.feature").write_text( + textwrap.dedent( + """ + @anyio + Scenario: Test scenario + Given I have a bar + """ + ), + "utf-8", + ensure=True, + ) + features.join("subfolder", "test.feature").write_text( + textwrap.dedent( + """ + @anyio + Scenario: Test subfolder scenario + Given I have a bar + + @anyio + Scenario: Test failing subfolder scenario + Given I have a failing bar + + @anyio + Scenario: Test already bound scenario + Given I have a bar + + @anyio + Scenario: Test scenario + Given I have a bar + """ + ), + "utf-8", + ensure=True, + ) + testdir.makepyfile( + """ + import pytest + from pytest_bdd import scenarios, scenario + + @pytest.mark.anyio + @scenario('features/subfolder/test.feature', 'Test already bound scenario', sync=False) + async def test_already_bound(): + pass + + scenarios('features', sync=False) + """ + ) + result = testdir.runpytest_subprocess("-v", "-s", *pytest_params) + assert_outcomes(result, passed=8, failed=2) + result.stdout.fnmatch_lines(["*collected 10 items"]) + result.stdout.fnmatch_lines(["*test_test_subfolder_scenario[[]asyncio[]] *bar!", "PASSED"]) + result.stdout.fnmatch_lines(["*test_test_subfolder_scenario[[]trio[]] *bar!", "PASSED"]) + result.stdout.fnmatch_lines(["*test_test_scenario[[]asyncio[]] *bar!", "PASSED"]) + result.stdout.fnmatch_lines(["*test_test_scenario[[]trio[]] *bar!", "PASSED"]) + result.stdout.fnmatch_lines(["*test_test_failing_subfolder_scenario[[]asyncio[]] *FAILED"]) + result.stdout.fnmatch_lines(["*test_test_failing_subfolder_scenario[[]trio[]] *FAILED"]) + result.stdout.fnmatch_lines(["*test_already_bound[[]asyncio[]] *bar!", "PASSED"]) + result.stdout.fnmatch_lines(["*test_already_bound[[]trio[]] *bar!", "PASSED"]) + result.stdout.fnmatch_lines(["*test_test_scenario_1[[]asyncio[]] *bar!", "PASSED"]) + result.stdout.fnmatch_lines(["*test_test_scenario_1[[]trio[]] *bar!", "PASSED"]) + + +def test_scenarios_none_found(testdir, pytest_params): + """Test scenarios shortcut when no scenarios found.""" + testpath = testdir.makepyfile( + """ + import pytest + from pytest_bdd import scenarios + + scenarios('.', sync=False) + """ + ) + result = testdir.runpytest_subprocess(testpath, *pytest_params) + assert_outcomes(result, errors=1) + result.stdout.fnmatch_lines(["*NoScenariosFound*"]) diff --git a/tox.ini b/tox.ini index 219b1cbb..f193cfe8 100644 --- a/tox.ini +++ b/tox.ini @@ -1,4 +1,8 @@ [tox] +minversion=3.23.1 +requires= + virtualenv>=20.4.7 +isolated_build=true distshare = {homedir}/.tox/distshare envlist = py38-pytestlatest-linters, py39-pytest{43,44,45,46,50,51,52,53,54,60,61,62, latest}-coverage, @@ -8,7 +12,7 @@ skip_missing_interpreters = true [testenv] setenv = - coverage: _PYTEST_CMD=coverage run --append -m pytest + coverage: _PYTEST_MORE_ARGS=--cov xdist: _PYTEST_MORE_ARGS=-n3 -rfsxX deps = pytestlatest: pytest @@ -25,9 +29,10 @@ deps = pytest44: pytest~=4.4.0 pytest43: pytest~=4.3.0 - coverage: coverage - xdist: pytest-xdist + coverage: pytest-cov + xdist: pytest-xdist>=2.3.0 -r{toxinidir}/requirements-testing.txt + anyio[trio] commands = {env:_PYTEST_CMD:pytest} {env:_PYTEST_MORE_ARGS:} {posargs:-vvl} ; Black doesn't support >py38 now