Skip to content

Commit

Permalink
chore: Continue testing after schema parsing errors if possible
Browse files Browse the repository at this point in the history
Ref: #999
  • Loading branch information
Stranger6667 committed Jan 14, 2021
1 parent 418b56e commit 1d793e3
Show file tree
Hide file tree
Showing 22 changed files with 524 additions and 144 deletions.
3 changes: 3 additions & 0 deletions docs/changelog.rst
Expand Up @@ -34,6 +34,8 @@ Changelog
- ``--skip-deprecated-endpoints`` is renamed to ``--skip-deprecated-operations``. `#869`_
- Rename various internal API methods that contained ``endpoint`` in their names. `#869`_
- Bump ``hypothesis-jsonschema`` version to ``0.19.0``. This version improves the handling of unsupported regular expression syntax and can generate data for a subset of schemas containing such regular expressions.
- Schemathesis doesn't stop testing on errors during schema parsing. These errors are handled the same way as other errors
during the testing process. It allows Schemathesis to test API operations with valid definitions and report problematic operations instead of failing the whole run. `#999`_

**Fixed**

Expand Down Expand Up @@ -1660,6 +1662,7 @@ Deprecated

.. _#1007: https://github.com/schemathesis/schemathesis/issues/1007
.. _#1003: https://github.com/schemathesis/schemathesis/issues/1003
.. _#999: https://github.com/schemathesis/schemathesis/issues/999
.. _#994: https://github.com/schemathesis/schemathesis/issues/994
.. _#992: https://github.com/schemathesis/schemathesis/issues/992
.. _#990: https://github.com/schemathesis/schemathesis/issues/990
Expand Down
32 changes: 22 additions & 10 deletions src/schemathesis/cli/output/default.py
Expand Up @@ -10,7 +10,7 @@
from ...constants import __version__
from ...models import Status
from ...runner import events
from ...runner.serialization import SerializedCase, SerializedTestResult
from ...runner.serialization import SerializedCase, SerializedError, SerializedTestResult
from ..context import ExecutionContext
from ..handlers import EventHandler, get_unique_failures

Expand Down Expand Up @@ -116,6 +116,8 @@ def display_errors(context: ExecutionContext, event: events.Finished) -> None:
if not result.has_errors:
continue
display_single_error(context, result)
if event.generic_errors:
display_generic_errors(context, event.generic_errors)
if not context.show_errors_tracebacks:
click.secho(
"Add this option to your command line parameters to see full tracebacks: --show-errors-tracebacks", fg="red"
Expand All @@ -125,15 +127,25 @@ def display_errors(context: ExecutionContext, event: events.Finished) -> None:
def display_single_error(context: ExecutionContext, result: SerializedTestResult) -> None:
display_subsection(result)
for error in result.errors:
if context.show_errors_tracebacks:
message = error.exception_with_traceback
else:
message = error.exception
if error.exception.startswith("InvalidSchema") and context.validate_schema:
message += DISABLE_SCHEMA_VALIDATION_MESSAGE + "\n"
click.secho(message, fg="red")
if error.example is not None:
display_example(context, error.example, seed=result.seed)
_display_error(context, error, result.seed)


def display_generic_errors(context: ExecutionContext, errors: List[SerializedError]) -> None:
for error in errors:
display_section_name(error.title or "Generic error", "_", fg="red")
_display_error(context, error)


def _display_error(context: ExecutionContext, error: SerializedError, seed: Optional[int] = None) -> None:
if context.show_errors_tracebacks:
message = error.exception_with_traceback
else:
message = error.exception
if error.exception.startswith("InvalidSchema") and context.validate_schema:
message += DISABLE_SCHEMA_VALIDATION_MESSAGE + "\n"
click.secho(message, fg="red")
if error.example is not None:
display_example(context, error.example, seed=seed)


def display_failures(context: ExecutionContext, event: events.Finished) -> None:
Expand Down
19 changes: 18 additions & 1 deletion src/schemathesis/exceptions.py
@@ -1,6 +1,6 @@
from hashlib import sha1
from json import JSONDecodeError
from typing import Dict, Type, Union
from typing import Any, Callable, Dict, NoReturn, Optional, Type, Union

import attr
from jsonschema import ValidationError
Expand Down Expand Up @@ -82,10 +82,27 @@ def get_headers_error(message: str) -> Type[CheckFailed]:
return _get_hashed_exception("MissingHeadersError", message)


@attr.s(slots=True)
class InvalidSchema(Exception):
"""Schema associated with an API operation contains an error."""

__module__ = "builtins"
message: Optional[str] = attr.ib(default=None)
path: Optional[str] = attr.ib(default=None)
method: Optional[str] = attr.ib(default=None)
full_path: Optional[str] = attr.ib(default=None)

def as_failing_test_function(self) -> Callable:
"""Create a test function that will fail.
This approach allows us to use default pytest reporting style for operation-level schema errors.
"""

def actual_test(*args: Any, **kwargs: Any) -> NoReturn:
__tracebackhide__ = True # pylint: disable=unused-variable
raise self

return actual_test


class NonCheckError(Exception):
Expand Down
32 changes: 22 additions & 10 deletions src/schemathesis/extra/pytest_plugin.py
Expand Up @@ -16,7 +16,7 @@
from ..constants import RECURSIVE_REFERENCE_ERROR_MESSAGE
from ..exceptions import InvalidSchema
from ..models import APIOperation
from ..utils import is_schemathesis_test
from ..utils import Ok, Result, is_schemathesis_test

USE_FROM_PARENT = version.parse(pytest.__version__) >= version.parse("5.4.0")

Expand Down Expand Up @@ -65,7 +65,7 @@ def _get_test_name(self, operation: APIOperation, data_generation_method: DataGe
)

def _gen_items(
self, operation: APIOperation, data_generation_method: DataGenerationMethod
self, result: Result[APIOperation, InvalidSchema], data_generation_method: DataGenerationMethod
) -> Generator[SchemathesisFunction, None, None]:
"""Generate all tests for the given API operation.
Expand All @@ -75,14 +75,26 @@ def _gen_items(
This implementation is based on the original one in pytest, but with slight adjustments
to produce tests out of hypothesis ones.
"""
name = self._get_test_name(operation, data_generation_method)
funcobj = create_test(
operation=operation,
test=self.test_function,
_given_args=self.given_args,
_given_kwargs=self.given_kwargs,
data_generation_method=data_generation_method,
)
if isinstance(result, Ok):
operation = result.ok()
funcobj = create_test(
operation=operation,
test=self.test_function,
_given_args=self.given_args,
_given_kwargs=self.given_kwargs,
data_generation_method=data_generation_method,
)
name = self._get_test_name(operation, data_generation_method)
else:
error = result.err()
funcobj = error.as_failing_test_function()
name = self.name
# `full_path` is always available in this case
if error.method:
name += f"[{error.method.upper()}:{error.full_path}]"
else:
name += f"[{error.full_path}]"
name += f"[{data_generation_method.as_short_name()}]"

cls = self._get_class_parent()
definition: FunctionDefinition = create(FunctionDefinition, name=self.name, parent=self.parent, callobj=funcobj)
Expand Down
33 changes: 29 additions & 4 deletions src/schemathesis/lazy.py
Expand Up @@ -10,7 +10,7 @@
from .models import APIOperation
from .schemas import BaseSchema
from .types import Filter, GenericTest, NotSet
from .utils import NOT_SET
from .utils import NOT_SET, Ok


@attr.s(slots=True) # pragma: no mutate
Expand Down Expand Up @@ -49,6 +49,7 @@ def parametrize(
def wrapper(func: Callable) -> Callable:
def test(request: FixtureRequest, subtests: SubTests) -> None:
"""The actual test, which is executed by pytest."""
__tracebackhide__ = True # pylint: disable=unused-variable
if hasattr(test, "_schemathesis_hooks"):
func._schemathesis_hooks = test._schemathesis_hooks # type: ignore
schema = get_schema(
Expand All @@ -70,9 +71,22 @@ def test(request: FixtureRequest, subtests: SubTests) -> None:
settings = getattr(test, "_hypothesis_internal_use_settings", None)
tests = list(schema.get_all_tests(func, settings))
request.session.testscollected += len(tests)
for operation, data_generation_method, sub_test in tests:
subtests.item._nodeid = _get_node_name(node_id, operation, data_generation_method)
run_subtest(operation, fixtures, sub_test, subtests)
for result, data_generation_method in tests:
if isinstance(result, Ok):
operation, sub_test = result.ok()
subtests.item._nodeid = _get_node_name(node_id, operation, data_generation_method)
run_subtest(operation, fixtures, sub_test, subtests)
else:
# Schema errors
error = result.err()
sub_test = error.as_failing_test_function()
# `full_path` is always available in this case
kwargs = {"path": error.full_path}
if error.method:
kwargs["method"] = error.method.upper()
subtests.item._nodeid = _get_partial_node_name(node_id, data_generation_method, **kwargs)
with subtests.test(**kwargs):
sub_test()
subtests.item._nodeid = node_id

# Needed to prevent a failure when settings are applied to the test function
Expand All @@ -88,6 +102,17 @@ def _get_node_name(node_id: str, operation: APIOperation, data_generation_method
return f"{node_id}[{operation.method.upper()}:{operation.full_path}][{data_generation_method.as_short_name()}]"


def _get_partial_node_name(node_id: str, data_generation_method: DataGenerationMethod, **kwargs: Any) -> str:
"""Make a test node name for failing tests caused by schema errors."""
name = node_id
if "method" in kwargs:
name += f"[{kwargs['method']}:{kwargs['path']}]"
else:
name += f"[{kwargs['path']}]"
name += f"[{data_generation_method.as_short_name()}]"
return name


def run_subtest(operation: APIOperation, fixtures: Dict[str, Any], sub_test: Callable, subtests: SubTests) -> None:
"""Run the given subtest with pytest fixtures."""
with subtests.test(method=operation.method.upper(), path=operation.path):
Expand Down
5 changes: 3 additions & 2 deletions src/schemathesis/models.py
Expand Up @@ -792,14 +792,15 @@ class TestResultSet:
"""Set of multiple test results."""

results: List[TestResult] = attr.ib(factory=list) # pragma: no mutate
generic_errors: List[InvalidSchema] = attr.ib(factory=list) # pragma: no mutate

def __iter__(self) -> Iterator[TestResult]:
return iter(self.results)

@property
def is_empty(self) -> bool:
"""If the result set contains no results."""
return len(self.results) == 0
return len(self.results) == 0 and len(self.generic_errors) == 0

@property
def has_failures(self) -> bool:
Expand Down Expand Up @@ -829,7 +830,7 @@ def failed_count(self) -> int:

@property
def errored_count(self) -> int:
return self._count(lambda result: result.has_errors or result.is_errored)
return self._count(lambda result: result.has_errors or result.is_errored) + len(self.generic_errors)

@property
def total(self) -> Dict[str, Dict[Union[str, Status], int]]:
Expand Down
6 changes: 5 additions & 1 deletion src/schemathesis/runner/events.py
Expand Up @@ -8,7 +8,7 @@
from ..models import APIOperation, Status, TestResult, TestResultSet
from ..schemas import BaseSchema
from ..utils import format_exception
from .serialization import SerializedTestResult
from .serialization import SerializedError, SerializedTestResult


@attr.s() # pragma: no mutate
Expand Down Expand Up @@ -158,6 +158,7 @@ class Finished(ExecutionEvent):
has_errors: bool = attr.ib() # pragma: no mutate
has_logs: bool = attr.ib() # pragma: no mutate
is_empty: bool = attr.ib() # pragma: no mutate
generic_errors: List[SerializedError] = attr.ib() # pragma: no mutate

total: Dict[str, Dict[Union[str, Status], int]] = attr.ib() # pragma: no mutate

Expand All @@ -175,5 +176,8 @@ def from_results(cls, results: TestResultSet, running_time: float) -> "Finished"
has_logs=results.has_logs,
is_empty=results.is_empty,
total=results.total,
generic_errors=[
SerializedError.from_error(error, None, None, error.full_path) for error in results.generic_errors
],
running_time=running_time,
)
83 changes: 65 additions & 18 deletions src/schemathesis/runner/impl/core.py
Expand Up @@ -29,7 +29,8 @@
from ...stateful import Feedback, Stateful
from ...targets import Target, TargetContext
from ...types import RawAuth
from ...utils import GenericResponse, WSGIResponse, capture_hypothesis_output, format_exception
from ...utils import GenericResponse, Ok, WSGIResponse, capture_hypothesis_output, format_exception
from ..serialization import SerializedTestResult


def get_hypothesis_settings(hypothesis_options: Dict[str, Any]) -> hypothesis.settings:
Expand Down Expand Up @@ -83,29 +84,75 @@ def _run_tests(
template: Callable,
settings: hypothesis.settings,
seed: Optional[int],
results: TestResultSet,
recursion_level: int = 0,
**kwargs: Any,
) -> Generator[events.ExecutionEvent, None, None]:
"""Run tests and recursively run additional tests."""
if recursion_level > self.stateful_recursion_limit:
return
for operation, data_generation_method, test in maker(template, settings, seed):
feedback = Feedback(self.stateful, operation)
for event in run_test(
operation,
test,
feedback=feedback,
recursion_level=recursion_level,
data_generation_method=data_generation_method,
**kwargs,
):
yield event
if isinstance(event, events.Interrupted):
return
# Additional tests, generated via the `feedback` instance
yield from self._run_tests(
feedback.get_stateful_tests, template, settings, seed, recursion_level=recursion_level + 1, **kwargs
)
for result, data_generation_method in maker(template, settings, seed):
if isinstance(result, Ok):
operation, test = result.ok()
feedback = Feedback(self.stateful, operation)
for event in run_test(
operation,
test,
results=results,
feedback=feedback,
recursion_level=recursion_level,
data_generation_method=data_generation_method,
**kwargs,
):
yield event
if isinstance(event, events.Interrupted):
return
# Additional tests, generated via the `feedback` instance
yield from self._run_tests(
feedback.get_stateful_tests,
template,
settings,
seed,
recursion_level=recursion_level + 1,
results=results,
**kwargs,
)
else:
# Schema errors
yield from handle_schema_error(result.err(), results, data_generation_method, recursion_level)


def handle_schema_error(
error: InvalidSchema, results: TestResultSet, data_generation_method: DataGenerationMethod, recursion_level: int
) -> Generator[events.ExecutionEvent, None, None]:
if error.method is not None:
assert error.path is not None
assert error.full_path is not None
method = error.method.upper()
result = TestResult(
method=method,
path=error.full_path,
data_generation_method=data_generation_method,
)
result.add_error(error)

yield events.BeforeExecution(
method=method, path=error.full_path, relative_path=error.path, recursion_level=recursion_level
)
yield events.AfterExecution(
method=method,
path=error.full_path,
relative_path=error.path,
status=Status.error,
result=SerializedTestResult.from_test_result(result),
elapsed_time=0.0,
hypothesis_output=[],
)
results.append(result)
else:
# When there is no `method`, then the schema error may cover multiple operations and we can't display it in
# the progress bar
results.generic_errors.append(error)


def run_test( # pylint: disable=too-many-locals
Expand Down

0 comments on commit 1d793e3

Please sign in to comment.