diff --git a/docs/changelog.rst b/docs/changelog.rst index ec1b5cd7b7..7b880e239d 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -8,6 +8,10 @@ Changelog - CLI options for overriding Open API parameters in test cases. :issue:`1676` +**Changed** + +- Raise an error if it is not possible to generate explicit examples. :issue:`1771` + .. _v3.23.1: :version:`3.23.1 ` - 2024-01-14 diff --git a/src/schemathesis/_hypothesis.py b/src/schemathesis/_hypothesis.py index a843cd1cf4..c1184f2429 100644 --- a/src/schemathesis/_hypothesis.py +++ b/src/schemathesis/_hypothesis.py @@ -2,7 +2,7 @@ from __future__ import annotations import asyncio import warnings -from typing import Any, Callable +from typing import Any, Callable, Optional import hypothesis from hypothesis import Phase @@ -113,7 +113,7 @@ def add_examples(test: Callable, operation: APIOperation, hook_dispatcher: HookD """Add examples to the Hypothesis test, if they are specified in the schema.""" try: examples: list[Case] = [get_single_example(strategy) for strategy in operation.get_strategies_from_examples()] - except (OperationSchemaError, HypothesisRefResolutionError, Unsatisfiable): + except (OperationSchemaError, HypothesisRefResolutionError, Unsatisfiable) as exc: # Invalid schema: # In this case, the user didn't pass `--validate-schema=false` and see an error in the output anyway, # and no tests will be executed. For this reason, examples can be skipped @@ -123,6 +123,8 @@ def add_examples(test: Callable, operation: APIOperation, hook_dispatcher: HookD # Skipping this exception here allows us to continue the testing process for other operations. # Still, we allow running user-defined hooks examples = [] + if isinstance(exc, Unsatisfiable): + add_unsatisfied_example_mark(test, exc) context = HookContext(operation) # context should be passed here instead GLOBAL_HOOK_DISPATCHER.dispatch("before_add_examples", context, examples) operation.schema.hooks.dispatch("before_add_examples", context, examples) @@ -133,6 +135,18 @@ def add_examples(test: Callable, operation: APIOperation, hook_dispatcher: HookD return test +def add_unsatisfied_example_mark(test: Callable, exc: Unsatisfiable) -> None: + test._schemathesis_unsatisfied_example = exc # type: ignore + + +def has_unsatisfied_example_mark(test: Callable) -> bool: + return hasattr(test, "_schemathesis_unsatisfied_example") + + +def get_unsatisfied_example_mark(test: Callable) -> Optional[Unsatisfiable]: + return getattr(test, "_schemathesis_unsatisfied_example", None) + + def get_single_example(strategy: st.SearchStrategy[Case]) -> Case: examples: list[Case] = [] add_single_example(strategy, examples) diff --git a/src/schemathesis/extra/pytest_plugin.py b/src/schemathesis/extra/pytest_plugin.py index 0f53a331bd..44ecef1361 100644 --- a/src/schemathesis/extra/pytest_plugin.py +++ b/src/schemathesis/extra/pytest_plugin.py @@ -1,4 +1,6 @@ from __future__ import annotations + +import unittest from contextlib import contextmanager from functools import partial from typing import Any, Callable, Generator, Type, TypeVar, cast @@ -10,10 +12,10 @@ from _pytest.nodes import Node from _pytest.python import Class, Function, FunctionDefinition, Metafunc, Module, PyCollector from hypothesis import reporting -from hypothesis.errors import InvalidArgument +from hypothesis.errors import InvalidArgument, Unsatisfiable from hypothesis_jsonschema._canonicalise import HypothesisRefResolutionError -from .._hypothesis import create_test +from .._hypothesis import create_test, get_unsatisfied_example_mark from ..constants import RECURSIVE_REFERENCE_ERROR_MESSAGE from .._dependency_versions import IS_PYTEST_ABOVE_7, IS_PYTEST_ABOVE_54 from ..exceptions import OperationSchemaError, SkipTest @@ -234,6 +236,7 @@ def pytest_pyfunc_call(pyfuncitem): # type:ignore For example - kwargs validation is failed for some strategy. """ + __tracebackhide__ = True if isinstance(pyfuncitem, SchemathesisFunction): with skip_unnecessary_hypothesis_output(): outcome = yield @@ -243,8 +246,12 @@ def pytest_pyfunc_call(pyfuncitem): # type:ignore raise OperationSchemaError(exc.args[0]) from None except HypothesisRefResolutionError: pytest.skip(RECURSIVE_REFERENCE_ERROR_MESSAGE) - except SkipTest as exc: - pytest.skip(exc.args[0]) + except (SkipTest, unittest.SkipTest) as exc: + unsatisfiable = get_unsatisfied_example_mark(pyfuncitem.obj) + if unsatisfiable is not None: + raise Unsatisfiable("Failed to generate test cases from examples for this API operation") from None + else: + pytest.skip(exc.args[0]) except Exception as exc: if hasattr(exc, "__notes__"): exc.__notes__ = [note for note in exc.__notes__ if not _should_ignore_entry(note)] # type: ignore diff --git a/src/schemathesis/runner/impl/core.py b/src/schemathesis/runner/impl/core.py index 33f3d6c8d9..5ef0a0e981 100644 --- a/src/schemathesis/runner/impl/core.py +++ b/src/schemathesis/runner/impl/core.py @@ -22,6 +22,7 @@ from ..override import CaseOverride from ... import failures, hooks from ..._compat import MultipleFailures +from ..._hypothesis import has_unsatisfied_example_mark from ...auths import unregister as unregister_auth from ...generation import DataGenerationMethod, GenerationConfig from ...constants import DEFAULT_STATEFUL_RECURSION_LIMIT, RECURSIVE_REFERENCE_ERROR_MESSAGE, USER_AGENT @@ -400,6 +401,11 @@ def run_test( except Exception as error: status = Status.error result.add_error(error) + if has_unsatisfied_example_mark(test): + status = Status.error + result.add_error( + hypothesis.errors.Unsatisfiable("Failed to generate test cases from examples for this API operation") + ) test_elapsed_time = time.monotonic() - test_start_time # DEPRECATED: Seed is the same per test run # Fetch seed value, hypothesis generates it during test execution diff --git a/test/runner/test_runner.py b/test/runner/test_runner.py index 5bb7446031..3a30e2b83c 100644 --- a/test/runner/test_runner.py +++ b/test/runner/test_runner.py @@ -741,7 +741,14 @@ def test_skip_operations_with_recursive_references(schema_with_recursive_referen assert RECURSIVE_REFERENCE_ERROR_MESSAGE in after.result.errors[0].exception -def test_unsatisfiable_example(empty_open_api_3_schema): +@pytest.mark.parametrize( + "phases, expected, total_errors", + ( + ([Phase.explicit, Phase.generate], "Failed to generate test cases for this API operation", 2), + ([Phase.explicit], "Failed to generate test cases from examples for this API operation", 1), + ), +) +def test_unsatisfiable_example(empty_open_api_3_schema, phases, expected, total_errors): # See GH-904 # When filling missing properties during examples generation leads to unsatisfiable schemas empty_open_api_3_schema["paths"] = { @@ -775,11 +782,12 @@ def test_unsatisfiable_example(empty_open_api_3_schema): # Then the testing process should not raise an internal error schema = oas_loaders.from_dict(empty_open_api_3_schema) *_, after, finished = from_schema( - schema, hypothesis_settings=hypothesis.settings(max_examples=1, deadline=None) + schema, hypothesis_settings=hypothesis.settings(max_examples=1, deadline=None, phases=phases) ).execute() # And the tests are failing because of the unsatisfiable schema assert finished.has_errors - assert "Failed to generate test cases for this API operation" in after.result.errors[0].exception + assert expected in after.result.errors[0].exception + assert len(after.result.errors) == total_errors @pytest.mark.operations("success") diff --git a/test/test_pytest.py b/test/test_pytest.py index f44bef0bf2..28d94246a7 100644 --- a/test/test_pytest.py +++ b/test/test_pytest.py @@ -647,3 +647,53 @@ def test(case): else: expected = rf"E curl -X GET -H 'Authorization: {auth}' {openapi3_base_url}/failure" assert expected in result.stdout.lines + + +def test_unsatisfiable_example(testdir, openapi3_base_url): + testdir.make_test( + f""" + +schema.base_url = "{openapi3_base_url}" + +@schema.parametrize(endpoint="success") +@settings(phases=[Phase.explicit]) +def test(case): + case.validate_response(response) +""", + paths={ + "/success": { + "post": { + "parameters": [ + # This parameter is not satisfiable + { + "name": "key", + "in": "query", + "required": True, + "schema": {"type": "integer", "minimum": 5, "maximum": 4}, + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "foo": {"type": "string", "example": "foo example string"}, + }, + }, + } + } + }, + "responses": {"200": {"description": "OK"}}, + } + } + }, + schema_name="simple_openapi.yaml", + ) + result = testdir.runpytest() + # We should skip checking for a server error + result.assert_outcomes(failed=1) + assert ( + "hypothesis.errors.Unsatisfiable: Failed to generate test cases from examples for this API operation" + in result.stdout.str() + ) diff --git a/tox.ini b/tox.ini index 08771bca77..8d1483d110 100644 --- a/tox.ini +++ b/tox.ini @@ -20,7 +20,7 @@ extras = cov setenv = COVERAGE_PROCESS_START={toxinidir}/pyproject.toml commands = - coverage run -m pytest {posargs:-n auto --durations=10} test + coverage run -m pytest {posargs:-n auto --durations=10 -vv} test coverage combine --keep coverage report coverage xml -i