Skip to content

Commit

Permalink
chore: Raise an error if it is not possible to generate explicit exam…
Browse files Browse the repository at this point in the history
…ples

Ref: #1771
  • Loading branch information
Stranger6667 committed Jan 20, 2024
1 parent a0ef66f commit ad5c558
Show file tree
Hide file tree
Showing 7 changed files with 99 additions and 10 deletions.
4 changes: 4 additions & 0 deletions docs/changelog.rst
Expand Up @@ -8,6 +8,10 @@ Changelog

- CLI options for overriding Open API parameters in test cases. :issue:`1676`

**Changed**

- Raise an error if it is not possible to generate explicit examples. :issue:`1771`

.. _v3.23.1:

:version:`3.23.1 <v3.23.0...v3.23.1>` - 2024-01-14
Expand Down
18 changes: 16 additions & 2 deletions src/schemathesis/_hypothesis.py
Expand Up @@ -2,7 +2,7 @@
from __future__ import annotations
import asyncio
import warnings
from typing import Any, Callable
from typing import Any, Callable, Optional

import hypothesis
from hypothesis import Phase
Expand Down Expand Up @@ -113,7 +113,7 @@ def add_examples(test: Callable, operation: APIOperation, hook_dispatcher: HookD
"""Add examples to the Hypothesis test, if they are specified in the schema."""
try:
examples: list[Case] = [get_single_example(strategy) for strategy in operation.get_strategies_from_examples()]
except (OperationSchemaError, HypothesisRefResolutionError, Unsatisfiable):
except (OperationSchemaError, HypothesisRefResolutionError, Unsatisfiable) as exc:
# Invalid schema:
# In this case, the user didn't pass `--validate-schema=false` and see an error in the output anyway,
# and no tests will be executed. For this reason, examples can be skipped
Expand All @@ -123,6 +123,8 @@ def add_examples(test: Callable, operation: APIOperation, hook_dispatcher: HookD
# Skipping this exception here allows us to continue the testing process for other operations.
# Still, we allow running user-defined hooks
examples = []
if isinstance(exc, Unsatisfiable):
add_unsatisfied_example_mark(test, exc)
context = HookContext(operation) # context should be passed here instead
GLOBAL_HOOK_DISPATCHER.dispatch("before_add_examples", context, examples)
operation.schema.hooks.dispatch("before_add_examples", context, examples)
Expand All @@ -133,6 +135,18 @@ def add_examples(test: Callable, operation: APIOperation, hook_dispatcher: HookD
return test


def add_unsatisfied_example_mark(test: Callable, exc: Unsatisfiable) -> None:
test._schemathesis_unsatisfied_example = exc # type: ignore


def has_unsatisfied_example_mark(test: Callable) -> bool:
return hasattr(test, "_schemathesis_unsatisfied_example")


def get_unsatisfied_example_mark(test: Callable) -> Optional[Unsatisfiable]:
return getattr(test, "_schemathesis_unsatisfied_example", None)


def get_single_example(strategy: st.SearchStrategy[Case]) -> Case:
examples: list[Case] = []
add_single_example(strategy, examples)
Expand Down
15 changes: 11 additions & 4 deletions src/schemathesis/extra/pytest_plugin.py
@@ -1,4 +1,6 @@
from __future__ import annotations

import unittest
from contextlib import contextmanager
from functools import partial
from typing import Any, Callable, Generator, Type, TypeVar, cast
Expand All @@ -10,10 +12,10 @@
from _pytest.nodes import Node
from _pytest.python import Class, Function, FunctionDefinition, Metafunc, Module, PyCollector
from hypothesis import reporting
from hypothesis.errors import InvalidArgument
from hypothesis.errors import InvalidArgument, Unsatisfiable
from hypothesis_jsonschema._canonicalise import HypothesisRefResolutionError

from .._hypothesis import create_test
from .._hypothesis import create_test, get_unsatisfied_example_mark
from ..constants import RECURSIVE_REFERENCE_ERROR_MESSAGE
from .._dependency_versions import IS_PYTEST_ABOVE_7, IS_PYTEST_ABOVE_54
from ..exceptions import OperationSchemaError, SkipTest
Expand Down Expand Up @@ -234,6 +236,7 @@ def pytest_pyfunc_call(pyfuncitem): # type:ignore
For example - kwargs validation is failed for some strategy.
"""
__tracebackhide__ = True
if isinstance(pyfuncitem, SchemathesisFunction):
with skip_unnecessary_hypothesis_output():
outcome = yield
Expand All @@ -243,8 +246,12 @@ def pytest_pyfunc_call(pyfuncitem): # type:ignore
raise OperationSchemaError(exc.args[0]) from None
except HypothesisRefResolutionError:
pytest.skip(RECURSIVE_REFERENCE_ERROR_MESSAGE)
except SkipTest as exc:
pytest.skip(exc.args[0])
except (SkipTest, unittest.SkipTest) as exc:
unsatisfiable = get_unsatisfied_example_mark(pyfuncitem.obj)
if unsatisfiable is not None:
raise Unsatisfiable("Failed to generate test cases from examples for this API operation") from None
else:
pytest.skip(exc.args[0])
except Exception as exc:
if hasattr(exc, "__notes__"):
exc.__notes__ = [note for note in exc.__notes__ if not _should_ignore_entry(note)] # type: ignore
Expand Down
6 changes: 6 additions & 0 deletions src/schemathesis/runner/impl/core.py
Expand Up @@ -22,6 +22,7 @@
from ..override import CaseOverride
from ... import failures, hooks
from ..._compat import MultipleFailures
from ..._hypothesis import has_unsatisfied_example_mark
from ...auths import unregister as unregister_auth
from ...generation import DataGenerationMethod, GenerationConfig
from ...constants import DEFAULT_STATEFUL_RECURSION_LIMIT, RECURSIVE_REFERENCE_ERROR_MESSAGE, USER_AGENT
Expand Down Expand Up @@ -400,6 +401,11 @@ def run_test(
except Exception as error:
status = Status.error
result.add_error(error)
if has_unsatisfied_example_mark(test):
status = Status.error
result.add_error(
hypothesis.errors.Unsatisfiable("Failed to generate test cases from examples for this API operation")
)
test_elapsed_time = time.monotonic() - test_start_time
# DEPRECATED: Seed is the same per test run
# Fetch seed value, hypothesis generates it during test execution
Expand Down
14 changes: 11 additions & 3 deletions test/runner/test_runner.py
Expand Up @@ -741,7 +741,14 @@ def test_skip_operations_with_recursive_references(schema_with_recursive_referen
assert RECURSIVE_REFERENCE_ERROR_MESSAGE in after.result.errors[0].exception


def test_unsatisfiable_example(empty_open_api_3_schema):
@pytest.mark.parametrize(
"phases, expected, total_errors",
(
([Phase.explicit, Phase.generate], "Failed to generate test cases for this API operation", 2),
([Phase.explicit], "Failed to generate test cases from examples for this API operation", 1),
),
)
def test_unsatisfiable_example(empty_open_api_3_schema, phases, expected, total_errors):
# See GH-904
# When filling missing properties during examples generation leads to unsatisfiable schemas
empty_open_api_3_schema["paths"] = {
Expand Down Expand Up @@ -775,11 +782,12 @@ def test_unsatisfiable_example(empty_open_api_3_schema):
# Then the testing process should not raise an internal error
schema = oas_loaders.from_dict(empty_open_api_3_schema)
*_, after, finished = from_schema(
schema, hypothesis_settings=hypothesis.settings(max_examples=1, deadline=None)
schema, hypothesis_settings=hypothesis.settings(max_examples=1, deadline=None, phases=phases)
).execute()
# And the tests are failing because of the unsatisfiable schema
assert finished.has_errors
assert "Failed to generate test cases for this API operation" in after.result.errors[0].exception
assert expected in after.result.errors[0].exception
assert len(after.result.errors) == total_errors


@pytest.mark.operations("success")
Expand Down
50 changes: 50 additions & 0 deletions test/test_pytest.py
Expand Up @@ -647,3 +647,53 @@ def test(case):
else:
expected = rf"E curl -X GET -H 'Authorization: {auth}' {openapi3_base_url}/failure"
assert expected in result.stdout.lines


def test_unsatisfiable_example(testdir, openapi3_base_url):
testdir.make_test(
f"""
schema.base_url = "{openapi3_base_url}"
@schema.parametrize(endpoint="success")
@settings(phases=[Phase.explicit])
def test(case):
case.validate_response(response)
""",
paths={
"/success": {
"post": {
"parameters": [
# This parameter is not satisfiable
{
"name": "key",
"in": "query",
"required": True,
"schema": {"type": "integer", "minimum": 5, "maximum": 4},
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"foo": {"type": "string", "example": "foo example string"},
},
},
}
}
},
"responses": {"200": {"description": "OK"}},
}
}
},
schema_name="simple_openapi.yaml",
)
result = testdir.runpytest()
# We should skip checking for a server error
result.assert_outcomes(failed=1)
assert (
"hypothesis.errors.Unsatisfiable: Failed to generate test cases from examples for this API operation"
in result.stdout.str()
)
2 changes: 1 addition & 1 deletion tox.ini
Expand Up @@ -20,7 +20,7 @@ extras =
cov
setenv = COVERAGE_PROCESS_START={toxinidir}/pyproject.toml
commands =
coverage run -m pytest {posargs:-n auto --durations=10} test
coverage run -m pytest {posargs:-n auto --durations=10 -vv} test
coverage combine --keep
coverage report
coverage xml -i
Expand Down

0 comments on commit ad5c558

Please sign in to comment.