diff --git a/docs/changelog.rst b/docs/changelog.rst index 694c34018f..2ceae098a6 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -10,6 +10,7 @@ Added ~~~~~ - Support for testing of examples in Parameter & Media Type objects in Open API 3.0. `#394`_ +- ``--show-error-tracebacks`` CLI option to display errors' tracebacks in the output. `#391`_ Changed ~~~~~~~ @@ -673,6 +674,7 @@ Fixed .. _0.2.0: https://github.com/kiwicom/schemathesis/compare/v0.1.0...v0.2.0 .. _#394: https://github.com/kiwicom/schemathesis/issues/394 +.. _#391: https://github.com/kiwicom/schemathesis/issues/391 .. _#386: https://github.com/kiwicom/schemathesis/issues/386 .. _#383: https://github.com/kiwicom/schemathesis/issues/383 .. _#381: https://github.com/kiwicom/schemathesis/issues/381 diff --git a/src/schemathesis/cli/__init__.py b/src/schemathesis/cli/__init__.py index 57ae0c4b01..90c7ee3ada 100644 --- a/src/schemathesis/cli/__init__.py +++ b/src/schemathesis/cli/__init__.py @@ -99,6 +99,7 @@ def schemathesis(pre_run: Optional[str] = None) -> None: @click.option("--app", help="WSGI application to test", type=str, callback=callbacks.validate_app) @click.option("--request-timeout", help="Timeout in milliseconds for network requests during the test run.", type=int) @click.option("--validate-schema", help="Enable or disable validation of input schema.", type=bool, default=True) +@click.option("--show-errors-tracebacks", help="Show full tracebacks for internal errors.", is_flag=True, default=False) @click.option( "--hypothesis-deadline", help="Duration in milliseconds that each individual example with a test is not allowed to exceed.", @@ -141,6 +142,7 @@ def run( # pylint: disable=too-many-arguments app: Any = None, request_timeout: Optional[int] = None, validate_schema: bool = True, + show_errors_tracebacks: bool = False, hypothesis_deadline: Optional[Union[int, NotSet]] = None, hypothesis_derandomize: Optional[bool] = None, hypothesis_max_examples: Optional[int] = None, @@ -198,7 +200,7 @@ def run( # pylint: disable=too-many-arguments # the given app options["loader"] = get_loader_for_app(app) prepared_runner = runner.prepare(schema, **options) - execute(prepared_runner, workers_num) + execute(prepared_runner, workers_num, show_errors_tracebacks) def get_output_handler(workers_num: int) -> Callable[[events.ExecutionContext, events.ExecutionEvent], None]: @@ -245,9 +247,11 @@ class OutputStyle(Enum): short = output.short.handle_event -def execute(prepared_runner: Generator[events.ExecutionEvent, None, None], workers_num: int) -> None: +def execute( + prepared_runner: Generator[events.ExecutionEvent, None, None], workers_num: int, show_errors_tracebacks: bool +) -> None: """Execute a prepared runner by drawing events from it and passing to a proper handler.""" handler = get_output_handler(workers_num) - context = events.ExecutionContext(workers_num=workers_num) + context = events.ExecutionContext(workers_num=workers_num, show_errors_tracebacks=show_errors_tracebacks) for event in prepared_runner: handler(context, event) diff --git a/src/schemathesis/cli/output/default.py b/src/schemathesis/cli/output/default.py index 804fa4c4ff..198ceb49bc 100644 --- a/src/schemathesis/cli/output/default.py +++ b/src/schemathesis/cli/output/default.py @@ -102,7 +102,7 @@ def display_hypothesis_output(hypothesis_output: List[str]) -> None: click.secho(output, fg="red") -def display_errors(results: TestResultSet) -> None: +def display_errors(context: events.ExecutionContext, results: TestResultSet) -> None: """Display all errors in the test run.""" if not results.has_errors: return @@ -111,13 +111,17 @@ def display_errors(results: TestResultSet) -> None: for result in results: if not result.has_errors: continue - display_single_error(result) + display_single_error(context, result) + if not context.show_errors_tracebacks: + click.secho( + "Add this option to your command line parameters to see full tracebacks: --show-error-tracebacks", fg="red" + ) -def display_single_error(result: TestResult) -> None: +def display_single_error(context: events.ExecutionContext, result: TestResult) -> None: display_subsection(result) for error, example in result.errors: - message = utils.format_exception(error) + message = utils.format_exception(error, include_traceback=context.show_errors_tracebacks) click.secho(message, fg="red") if example is not None: display_example(example, seed=result.seed) @@ -295,7 +299,7 @@ def handle_finished(context: events.ExecutionContext, event: events.Finished) -> """Show the outcome of the whole testing session.""" click.echo() display_hypothesis_output(context.hypothesis_output) - display_errors(event.results) + display_errors(context, event.results) display_failures(event.results) display_application_logs(event.results) display_statistic(event.results) diff --git a/src/schemathesis/runner/events.py b/src/schemathesis/runner/events.py index 0c6c7048ed..d596670737 100644 --- a/src/schemathesis/runner/events.py +++ b/src/schemathesis/runner/events.py @@ -16,6 +16,7 @@ class ExecutionContext: hypothesis_output: List[str] = attr.ib(factory=list) # pragma: no mutate workers_num: int = attr.ib(default=1) # pragma: no mutate + show_errors_tracebacks: bool = attr.ib(default=False) # pragma: no mutate endpoints_processed: int = attr.ib(default=0) # pragma: no mutate current_line_length: int = attr.ib(default=0) # pragma: no mutate terminal_size: os.terminal_size = attr.ib(factory=shutil.get_terminal_size) # pragma: no mutate diff --git a/src/schemathesis/utils.py b/src/schemathesis/utils.py index 448cd02ca3..7b499a4d2e 100644 --- a/src/schemathesis/utils.py +++ b/src/schemathesis/utils.py @@ -94,7 +94,9 @@ def get_output(value: str) -> None: yield output -def format_exception(error: Exception) -> str: +def format_exception(error: Exception, include_traceback: bool = False) -> str: + if include_traceback: + return "".join(traceback.format_exception(type(error), error, error.__traceback__)) return "".join(traceback.format_exception_only(type(error), error)) diff --git a/test/cli/output/test_default.py b/test/cli/output/test_default.py index 0dd7da39b8..f61ce4de8a 100644 --- a/test/cli/output/test_default.py +++ b/test/cli/output/test_default.py @@ -210,7 +210,8 @@ def test_after_execution_attributes(execution_context, after_execution): assert execution_context.current_line_length == 2 -def test_display_single_error(capsys, swagger_20, endpoint): +@pytest.mark.parametrize("show_errors_tracebacks", (True, False)) +def test_display_single_error(capsys, swagger_20, endpoint, execution_context, show_errors_tracebacks): # Given exception is multiline exception = None try: @@ -221,14 +222,23 @@ def test_display_single_error(capsys, swagger_20, endpoint): result = models.TestResult(endpoint) result.add_error(exception) # When the related test result is displayed - default.display_single_error(result) + execution_context.show_errors_tracebacks = show_errors_tracebacks + default.display_single_error(execution_context, result) lines = capsys.readouterr().out.strip().split("\n") # Then it should be correctly formatted and displayed in red color if sys.version_info <= (3, 8): expected = ' File "", line 1\n some invalid code\n ^\nSyntaxError: invalid syntax\n' else: expected = ' File "", line 1\n some invalid code\n ^\nSyntaxError: invalid syntax\n' - assert "\n".join(lines[1:6]) == click.style(expected, fg="red") + if show_errors_tracebacks: + lines = click.unstyle("\n".join(lines)).split("\n") + assert lines[1] == "Traceback (most recent call last):" + # There is a path on the next line, it is simpler to not check it since it doesn't give much value + # But presence of traceback itself is checked + expected = f' exec("some invalid code")\n{expected}' + assert "\n".join(lines[3:8]) == expected.strip("\n") + else: + assert "\n".join(lines[1:6]) == click.style(expected, fg="red") def test_display_failures(swagger_20, capsys, results_set): @@ -251,17 +261,24 @@ def test_display_failures(swagger_20, capsys, results_set): assert "requests.get('http://127.0.0.1:8080/api/failure')" in out -def test_display_errors(swagger_20, capsys, results_set): +@pytest.mark.parametrize("show_errors_tracebacks", (True, False)) +def test_display_errors(swagger_20, capsys, results_set, execution_context, show_errors_tracebacks): # Given two test results - success and error endpoint = models.Endpoint("/api/error", "GET", {}, swagger_20) error = models.TestResult(endpoint, seed=123) error.add_error(ConnectionError("Connection refused!"), models.Case(endpoint, query={"a": 1})) results_set.append(error) # When the errors are displayed - default.display_errors(results_set) + execution_context.show_errors_tracebacks = show_errors_tracebacks + default.display_errors(execution_context, results_set) out = capsys.readouterr().out.strip() # Then section title is displayed assert " ERRORS " in out + help_message_exists = ( + "Add this option to your command line parameters to " "see full tracebacks: --show-error-tracebacks" in out + ) + # And help message is displayed only if tracebacks are not shown + assert help_message_exists is not show_errors_tracebacks # And endpoint with an error is displayed as a subsection assert " GET: /api/error " in out # And the error itself is displayed diff --git a/test/cli/test_commands.py b/test/cli/test_commands.py index d568056e25..a724b3eb16 100644 --- a/test/cli/test_commands.py +++ b/test/cli/test_commands.py @@ -135,6 +135,7 @@ def test_commands_run_help(cli): " --request-timeout INTEGER Timeout in milliseconds for network requests", " during the test run.", " --validate-schema BOOLEAN Enable or disable validation of input schema.", + " --show-errors-tracebacks Show full tracebacks for internal errors.", " --hypothesis-deadline INTEGER Duration in milliseconds that each individual", " example with a test is not allowed to exceed.", " --hypothesis-derandomize Use Hypothesis's deterministic mode.",