Skip to content

Commit

Permalink
feat: --show-error-tracebacks CLI option to display errors' traceba…
Browse files Browse the repository at this point in the history
…cks in the output
  • Loading branch information
Stranger6667 committed Feb 7, 2020
1 parent c352fdf commit 8804716
Show file tree
Hide file tree
Showing 7 changed files with 45 additions and 14 deletions.
2 changes: 2 additions & 0 deletions docs/changelog.rst
Expand Up @@ -10,6 +10,7 @@ Added
~~~~~

- Support for testing of examples in Parameter & Media Type objects in Open API 3.0. `#394`_
- ``--show-error-tracebacks`` CLI option to display errors' tracebacks in the output. `#391`_

Changed
~~~~~~~
Expand Down Expand Up @@ -673,6 +674,7 @@ Fixed
.. _0.2.0: https://github.com/kiwicom/schemathesis/compare/v0.1.0...v0.2.0

.. _#394: https://github.com/kiwicom/schemathesis/issues/394
.. _#391: https://github.com/kiwicom/schemathesis/issues/391
.. _#386: https://github.com/kiwicom/schemathesis/issues/386
.. _#383: https://github.com/kiwicom/schemathesis/issues/383
.. _#381: https://github.com/kiwicom/schemathesis/issues/381
Expand Down
10 changes: 7 additions & 3 deletions src/schemathesis/cli/__init__.py
Expand Up @@ -99,6 +99,7 @@ def schemathesis(pre_run: Optional[str] = None) -> None:
@click.option("--app", help="WSGI application to test", type=str, callback=callbacks.validate_app)
@click.option("--request-timeout", help="Timeout in milliseconds for network requests during the test run.", type=int)
@click.option("--validate-schema", help="Enable or disable validation of input schema.", type=bool, default=True)
@click.option("--show-errors-tracebacks", help="Show full tracebacks for internal errors.", is_flag=True, default=False)
@click.option(
"--hypothesis-deadline",
help="Duration in milliseconds that each individual example with a test is not allowed to exceed.",
Expand Down Expand Up @@ -141,6 +142,7 @@ def run( # pylint: disable=too-many-arguments
app: Any = None,
request_timeout: Optional[int] = None,
validate_schema: bool = True,
show_errors_tracebacks: bool = False,
hypothesis_deadline: Optional[Union[int, NotSet]] = None,
hypothesis_derandomize: Optional[bool] = None,
hypothesis_max_examples: Optional[int] = None,
Expand Down Expand Up @@ -198,7 +200,7 @@ def run( # pylint: disable=too-many-arguments
# the given app
options["loader"] = get_loader_for_app(app)
prepared_runner = runner.prepare(schema, **options)
execute(prepared_runner, workers_num)
execute(prepared_runner, workers_num, show_errors_tracebacks)


def get_output_handler(workers_num: int) -> Callable[[events.ExecutionContext, events.ExecutionEvent], None]:
Expand Down Expand Up @@ -245,9 +247,11 @@ class OutputStyle(Enum):
short = output.short.handle_event


def execute(prepared_runner: Generator[events.ExecutionEvent, None, None], workers_num: int) -> None:
def execute(
prepared_runner: Generator[events.ExecutionEvent, None, None], workers_num: int, show_errors_tracebacks: bool
) -> None:
"""Execute a prepared runner by drawing events from it and passing to a proper handler."""
handler = get_output_handler(workers_num)
context = events.ExecutionContext(workers_num=workers_num)
context = events.ExecutionContext(workers_num=workers_num, show_errors_tracebacks=show_errors_tracebacks)
for event in prepared_runner:
handler(context, event)
14 changes: 9 additions & 5 deletions src/schemathesis/cli/output/default.py
Expand Up @@ -102,7 +102,7 @@ def display_hypothesis_output(hypothesis_output: List[str]) -> None:
click.secho(output, fg="red")


def display_errors(results: TestResultSet) -> None:
def display_errors(context: events.ExecutionContext, results: TestResultSet) -> None:
"""Display all errors in the test run."""
if not results.has_errors:
return
Expand All @@ -111,13 +111,17 @@ def display_errors(results: TestResultSet) -> None:
for result in results:
if not result.has_errors:
continue
display_single_error(result)
display_single_error(context, result)
if not context.show_errors_tracebacks:
click.secho(
"Add this option to your command line parameters to see full tracebacks: --show-error-tracebacks", fg="red"
)


def display_single_error(result: TestResult) -> None:
def display_single_error(context: events.ExecutionContext, result: TestResult) -> None:
display_subsection(result)
for error, example in result.errors:
message = utils.format_exception(error)
message = utils.format_exception(error, include_traceback=context.show_errors_tracebacks)
click.secho(message, fg="red")
if example is not None:
display_example(example, seed=result.seed)
Expand Down Expand Up @@ -295,7 +299,7 @@ def handle_finished(context: events.ExecutionContext, event: events.Finished) ->
"""Show the outcome of the whole testing session."""
click.echo()
display_hypothesis_output(context.hypothesis_output)
display_errors(event.results)
display_errors(context, event.results)
display_failures(event.results)
display_application_logs(event.results)
display_statistic(event.results)
Expand Down
1 change: 1 addition & 0 deletions src/schemathesis/runner/events.py
Expand Up @@ -16,6 +16,7 @@ class ExecutionContext:

hypothesis_output: List[str] = attr.ib(factory=list) # pragma: no mutate
workers_num: int = attr.ib(default=1) # pragma: no mutate
show_errors_tracebacks: bool = attr.ib(default=False) # pragma: no mutate
endpoints_processed: int = attr.ib(default=0) # pragma: no mutate
current_line_length: int = attr.ib(default=0) # pragma: no mutate
terminal_size: os.terminal_size = attr.ib(factory=shutil.get_terminal_size) # pragma: no mutate
Expand Down
4 changes: 3 additions & 1 deletion src/schemathesis/utils.py
Expand Up @@ -94,7 +94,9 @@ def get_output(value: str) -> None:
yield output


def format_exception(error: Exception) -> str:
def format_exception(error: Exception, include_traceback: bool = False) -> str:
if include_traceback:
return "".join(traceback.format_exception(type(error), error, error.__traceback__))
return "".join(traceback.format_exception_only(type(error), error))


Expand Down
27 changes: 22 additions & 5 deletions test/cli/output/test_default.py
Expand Up @@ -210,7 +210,8 @@ def test_after_execution_attributes(execution_context, after_execution):
assert execution_context.current_line_length == 2


def test_display_single_error(capsys, swagger_20, endpoint):
@pytest.mark.parametrize("show_errors_tracebacks", (True, False))
def test_display_single_error(capsys, swagger_20, endpoint, execution_context, show_errors_tracebacks):
# Given exception is multiline
exception = None
try:
Expand All @@ -221,14 +222,23 @@ def test_display_single_error(capsys, swagger_20, endpoint):
result = models.TestResult(endpoint)
result.add_error(exception)
# When the related test result is displayed
default.display_single_error(result)
execution_context.show_errors_tracebacks = show_errors_tracebacks
default.display_single_error(execution_context, result)
lines = capsys.readouterr().out.strip().split("\n")
# Then it should be correctly formatted and displayed in red color
if sys.version_info <= (3, 8):
expected = ' File "<string>", line 1\n some invalid code\n ^\nSyntaxError: invalid syntax\n'
else:
expected = ' File "<string>", line 1\n some invalid code\n ^\nSyntaxError: invalid syntax\n'
assert "\n".join(lines[1:6]) == click.style(expected, fg="red")
if show_errors_tracebacks:
lines = click.unstyle("\n".join(lines)).split("\n")
assert lines[1] == "Traceback (most recent call last):"
# There is a path on the next line, it is simpler to not check it since it doesn't give much value
# But presence of traceback itself is checked
expected = f' exec("some invalid code")\n{expected}'
assert "\n".join(lines[3:8]) == expected.strip("\n")
else:
assert "\n".join(lines[1:6]) == click.style(expected, fg="red")


def test_display_failures(swagger_20, capsys, results_set):
Expand All @@ -251,17 +261,24 @@ def test_display_failures(swagger_20, capsys, results_set):
assert "requests.get('http://127.0.0.1:8080/api/failure')" in out


def test_display_errors(swagger_20, capsys, results_set):
@pytest.mark.parametrize("show_errors_tracebacks", (True, False))
def test_display_errors(swagger_20, capsys, results_set, execution_context, show_errors_tracebacks):
# Given two test results - success and error
endpoint = models.Endpoint("/api/error", "GET", {}, swagger_20)
error = models.TestResult(endpoint, seed=123)
error.add_error(ConnectionError("Connection refused!"), models.Case(endpoint, query={"a": 1}))
results_set.append(error)
# When the errors are displayed
default.display_errors(results_set)
execution_context.show_errors_tracebacks = show_errors_tracebacks
default.display_errors(execution_context, results_set)
out = capsys.readouterr().out.strip()
# Then section title is displayed
assert " ERRORS " in out
help_message_exists = (
"Add this option to your command line parameters to " "see full tracebacks: --show-error-tracebacks" in out
)
# And help message is displayed only if tracebacks are not shown
assert help_message_exists is not show_errors_tracebacks
# And endpoint with an error is displayed as a subsection
assert " GET: /api/error " in out
# And the error itself is displayed
Expand Down
1 change: 1 addition & 0 deletions test/cli/test_commands.py
Expand Up @@ -135,6 +135,7 @@ def test_commands_run_help(cli):
" --request-timeout INTEGER Timeout in milliseconds for network requests",
" during the test run.",
" --validate-schema BOOLEAN Enable or disable validation of input schema.",
" --show-errors-tracebacks Show full tracebacks for internal errors.",
" --hypothesis-deadline INTEGER Duration in milliseconds that each individual",
" example with a test is not allowed to exceed.",
" --hypothesis-derandomize Use Hypothesis's deterministic mode.",
Expand Down

0 comments on commit 8804716

Please sign in to comment.