Skip to content

Commit

Permalink
refactor(cli): Restructure CLI output for better extensibility
Browse files Browse the repository at this point in the history
  • Loading branch information
Stranger6667 committed Nov 26, 2019
1 parent 4d26e6e commit dd48985
Show file tree
Hide file tree
Showing 5 changed files with 85 additions and 78 deletions.
14 changes: 10 additions & 4 deletions src/schemathesis/cli/__init__.py
@@ -1,7 +1,8 @@
import pathlib
import traceback
from contextlib import contextmanager
from typing import Callable, Dict, Generator, Iterable, List, Optional, Tuple
from enum import Enum
from typing import Callable, Dict, Generator, Iterable, List, Optional, Tuple, cast

import click
import hypothesis
Expand Down Expand Up @@ -155,7 +156,8 @@ def run( # pylint: disable=too-many-arguments
prepared_runner = runner.prepare(schema, checks=selected_checks, loader=from_path, **options)
else:
prepared_runner = runner.prepare(schema, checks=selected_checks, **options)
execute(prepared_runner)
output_style = cast(Callable, OutputStyle.default)
execute(prepared_runner, output_style)


def load_hook(module_name: str) -> None:
Expand Down Expand Up @@ -189,9 +191,13 @@ def abort_on_network_errors() -> Generator[None, None, None]:
raise click.Abort


def execute(prepared_runner: Generator[events.ExecutionEvent, None, None]) -> None:
class OutputStyle(Enum):
default = output.default.handle_event


def execute(prepared_runner: Generator[events.ExecutionEvent, None, None], handler: Callable) -> None:
"""Execute a prepared runner by drawing events from it and passing to a proper handler."""
with utils.capture_hypothesis_output() as hypothesis_output:
context = events.ExecutionContext(hypothesis_output)
for event in prepared_runner:
output.handle_event(context, event)
handler(context, event)
1 change: 1 addition & 0 deletions src/schemathesis/cli/output/__init__.py
@@ -0,0 +1 @@
from . import default
Expand Up @@ -8,10 +8,10 @@
from hypothesis import settings
from importlib_metadata import version

from ...constants import __version__
from ...models import Case, Status, TestResult, TestResultSet
from ...runner import events
from .. import utils
from ..constants import __version__
from ..models import Case, Status, TestResult, TestResultSet
from ..runner import events


def get_terminal_width() -> int:
Expand All @@ -36,47 +36,6 @@ def get_percentage(position: int, length: int) -> str:
return f"[{percentage_message}]"


def handle_initialized(context: events.ExecutionContext, event: events.Initialized) -> None:
"""Display information about the test session."""
display_section_name("Schemathesis test session starts")
versions = (
f"platform {platform.system()} -- "
f"Python {platform.python_version()}, "
f"schemathesis-{__version__}, "
f"hypothesis-{version('hypothesis')}, "
f"hypothesis_jsonschema-{version('hypothesis_jsonschema')}, "
f"jsonschema-{version('jsonschema')}"
)
click.echo(versions)
click.echo(f"rootdir: {os.getcwd()}")
click.echo(
f"hypothesis profile '{settings._current_profile}' " # type: ignore
f"-> {settings.get_profile(settings._current_profile).show_changed()}"
)
if event.schema.location is not None:
click.echo(f"Schema location: {event.schema.location}")
if event.schema.base_url is not None:
click.echo(f"Base URL: {event.schema.base_url}")
click.echo(f"Specification version: {event.schema.verbose_name}")
click.secho(f"collected endpoints: {event.schema.endpoints_count}", bold=True)
if event.schema.endpoints_count >= 1:
click.echo()


def handle_before_execution(context: events.ExecutionContext, event: events.BeforeExecution) -> None:
"""Display what method / endpoint will be tested next."""
message = f"{event.endpoint.method} {event.endpoint.path} "
context.current_line_length = len(message)
click.echo(message, nl=False)


def handle_after_execution(context: events.ExecutionContext, event: events.AfterExecution) -> None:
"""Display the execution result + current progress at the same line with the method / endpoint names."""
context.endpoints_processed += 1
display_execution_result(context, event)
display_percentage(context, event)


def display_execution_result(context: events.ExecutionContext, event: events.AfterExecution) -> None:
"""Display an appropriate symbol for the given event's execution result."""
symbol, color = {Status.success: (".", "green"), Status.failure: ("F", "red"), Status.error: ("E", "red")}[
Expand All @@ -97,17 +56,6 @@ def display_percentage(context: events.ExecutionContext, event: events.AfterExec
click.echo(template.format(styled))


def handle_finished(context: events.ExecutionContext, event: events.Finished) -> None:
"""Show the outcome of the whole testing session."""
click.echo()
display_hypothesis_output(context.hypothesis_output)
display_errors(event.results)
display_failures(event.results)
display_statistic(event.results)
click.echo()
display_summary(event)


def display_summary(event: events.Finished) -> None:
message, color, status_code = get_summary_output(event)
display_section_name(message, fg=color)
Expand Down Expand Up @@ -265,6 +213,58 @@ def display_check_result(check_name: str, results: Dict[Union[str, Status], int]
)


def handle_initialized(context: events.ExecutionContext, event: events.Initialized) -> None:
"""Display information about the test session."""
display_section_name("Schemathesis test session starts")
versions = (
f"platform {platform.system()} -- "
f"Python {platform.python_version()}, "
f"schemathesis-{__version__}, "
f"hypothesis-{version('hypothesis')}, "
f"hypothesis_jsonschema-{version('hypothesis_jsonschema')}, "
f"jsonschema-{version('jsonschema')}"
)
click.echo(versions)
click.echo(f"rootdir: {os.getcwd()}")
click.echo(
f"hypothesis profile '{settings._current_profile}' " # type: ignore
f"-> {settings.get_profile(settings._current_profile).show_changed()}"
)
if event.schema.location is not None:
click.echo(f"Schema location: {event.schema.location}")
if event.schema.base_url is not None:
click.echo(f"Base URL: {event.schema.base_url}")
click.echo(f"Specification version: {event.schema.verbose_name}")
click.secho(f"collected endpoints: {event.schema.endpoints_count}", bold=True)
if event.schema.endpoints_count >= 1:
click.echo()


def handle_before_execution(context: events.ExecutionContext, event: events.BeforeExecution) -> None:
"""Display what method / endpoint will be tested next."""
message = f"{event.endpoint.method} {event.endpoint.path} "
context.current_line_length = len(message)
click.echo(message, nl=False)


def handle_after_execution(context: events.ExecutionContext, event: events.AfterExecution) -> None:
"""Display the execution result + current progress at the same line with the method / endpoint names."""
context.endpoints_processed += 1
display_execution_result(context, event)
display_percentage(context, event)


def handle_finished(context: events.ExecutionContext, event: events.Finished) -> None:
"""Show the outcome of the whole testing session."""
click.echo()
display_hypothesis_output(context.hypothesis_output)
display_errors(event.results)
display_failures(event.results)
display_statistic(event.results)
click.echo()
display_summary(event)


def handle_interrupted(context: events.ExecutionContext, event: events.Interrupted) -> None:
click.echo()
display_section_name("KeyboardInterrupt", "!", bold=False)
Expand Down
Empty file added test/cli/output/__init__.py
Empty file.
38 changes: 19 additions & 19 deletions test/cli/test_output.py → test/cli/output/test_default.py
Expand Up @@ -8,7 +8,7 @@

import schemathesis
from schemathesis import models, runner, utils
from schemathesis.cli import output
from schemathesis.cli.output import default


@pytest.fixture(autouse=True)
Expand Down Expand Up @@ -50,9 +50,9 @@ def after_execution(results_set, endpoint, swagger_20):
)
def test_display_section_name(capsys, title, separator, printed, expected):
# When section name is displayed
output.display_section_name(title, separator=separator)
default.display_section_name(title, separator=separator)
out = capsys.readouterr().out.strip()
terminal_width = output.get_terminal_width()
terminal_width = default.get_terminal_width()
# It should fit into the terminal width
assert len(click.unstyle(out)) == terminal_width
# And the section name should be bold
Expand All @@ -66,7 +66,7 @@ def test_handle_initialized(capsys, execution_context, results_set, swagger_20):
results=results_set, schema=swagger_20, checks=(), hypothesis_settings=hypothesis.settings()
)
# When this even is handled
output.handle_initialized(execution_context, event)
default.handle_initialized(execution_context, event)
out = capsys.readouterr().out
lines = out.split("\n")
# Then initial title is displayed
Expand All @@ -92,7 +92,7 @@ def test_display_statistic(capsys, swagger_20, endpoint):
)
results = models.TestResultSet([single_test_statistic])
# When test results are displayed
output.display_statistic(results)
default.display_statistic(results)

lines = [line for line in capsys.readouterr().out.split("\n") if line]
failed = click.style("FAILED", bold=True, fg="red")
Expand All @@ -107,7 +107,7 @@ def test_display_statistic(capsys, swagger_20, endpoint):


def test_display_statistic_empty(capsys, results_set):
output.display_statistic(results_set)
default.display_statistic(results_set)
assert capsys.readouterr().out.split("\n")[2] == click.style("No checks were performed.", bold=True)


Expand All @@ -123,7 +123,7 @@ def test_capture_hypothesis_output():

@pytest.mark.parametrize("position, length, expected", ((1, 100, "[ 1%]"), (20, 100, "[ 20%]"), (100, 100, "[100%]")))
def test_get_percentage(position, length, expected):
assert output.get_percentage(position, length) == expected
assert default.get_percentage(position, length) == expected


@pytest.mark.parametrize("current_line_length", (0, 20))
Expand All @@ -134,17 +134,17 @@ def test_display_percentage(
execution_context.current_line_length = current_line_length
execution_context.endpoints_processed = endpoints_processed
# When percentage is displayed
output.display_percentage(execution_context, after_execution)
default.display_percentage(execution_context, after_execution)
out = capsys.readouterr().out
# Then the whole line fits precisely to the terminal width
assert len(click.unstyle(out)) + current_line_length == output.get_terminal_width()
assert len(click.unstyle(out)) + current_line_length == default.get_terminal_width()
# And the percentage displayed as expected in cyan color
assert out.strip() == click.style(percentage, fg="cyan")


def test_display_hypothesis_output(capsys):
# When Hypothesis output is displayed
output.display_hypothesis_output(["foo", "bar"])
default.display_hypothesis_output(["foo", "bar"])
lines = capsys.readouterr().out.split("\n")
# Then the relevant section title is displayed
assert " HYPOTHESIS OUTPUT" in lines[0]
Expand All @@ -167,7 +167,7 @@ def test_display_single_failure(capsys, swagger_20, endpoint, body):
[success, success, success, failure, failure, models.Check("different_check", models.Status.success)],
)
# When this failure is displayed
output.display_single_failure(test_statistic)
default.display_single_failure(test_statistic)
out = capsys.readouterr().out
lines = out.split("\n")
# Then the endpoint name is displayed as a subsection
Expand Down Expand Up @@ -195,7 +195,7 @@ def test_handle_after_execution(capsys, execution_context, after_execution, stat
# Given AfterExecution even with certain status
after_execution.status = status
# When this event is handled
output.handle_after_execution(execution_context, after_execution)
default.handle_after_execution(execution_context, after_execution)

lines = capsys.readouterr().out.strip().split("\n")
symbol, percentage = lines[0].split()
Expand All @@ -207,13 +207,13 @@ def test_handle_after_execution(capsys, execution_context, after_execution, stat

def test_after_execution_attributes(execution_context, after_execution):
# When `handle_after_execution` is executed
output.handle_after_execution(execution_context, after_execution)
default.handle_after_execution(execution_context, after_execution)
# Then number of endpoints processed grows by 1
assert execution_context.endpoints_processed == 1
# And the line length grows by 1 symbol
assert execution_context.current_line_length == 1

output.handle_after_execution(execution_context, after_execution)
default.handle_after_execution(execution_context, after_execution)
assert execution_context.endpoints_processed == 2
assert execution_context.current_line_length == 2

Expand All @@ -229,7 +229,7 @@ def test_display_single_error(capsys, swagger_20, endpoint):
result = models.TestResult(endpoint, swagger_20)
result.add_error(exception)
# When the related test result is displayed
output.display_single_error(result)
default.display_single_error(result)
lines = capsys.readouterr().out.strip().split("\n")
# Then it should be correctly formatted and displayed in red color
if sys.version_info <= (3, 8):
Expand All @@ -245,7 +245,7 @@ def test_display_failures(swagger_20, capsys, results_set):
failure.add_failure("test", models.Case("/api/failure", "GET", base_url="http://127.0.0.1:8080"), "Message")
results_set.append(failure)
# When the failures are displayed
output.display_failures(results_set)
default.display_failures(results_set)
out = capsys.readouterr().out.strip()
# Then section title is displayed
assert " FAILURES " in out
Expand All @@ -267,7 +267,7 @@ def test_display_errors(swagger_20, capsys, results_set):
)
results_set.append(error)
# When the errors are displayed
output.display_errors(results_set)
default.display_errors(results_set)
out = capsys.readouterr().out.strip()
# Then section title is displayed
assert " ERRORS " in out
Expand All @@ -285,15 +285,15 @@ def test_display_errors(swagger_20, capsys, results_set):
((models.Case.__attrs_attrs__[0], "Path"), (models.Case.__attrs_attrs__[3], "Path parameters")),
)
def test_make_verbose_name(attribute, expected):
assert output.make_verbose_name(attribute) == expected
assert default.make_verbose_name(attribute) == expected


def test_display_summary(capsys, results_set, swagger_20):
# Given the Finished event
event = runner.events.Finished(results=results_set, schema=swagger_20, running_time=1.257)
# When `display_summary` is called
with pytest.raises(click.exceptions.Exit):
output.display_summary(event)
default.display_summary(event)
out = capsys.readouterr().out.strip()
# Then number of total tests & total running time should be displayed
assert "=== 1 passed in 1.26s ===" in out
Expand Down

0 comments on commit dd48985

Please sign in to comment.