Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Test case verbosity #11653

Merged
merged 15 commits into from Feb 24, 2024
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions changelog/11387.feature.rst
@@ -1,4 +1,5 @@
Added the new :confval:`verbosity_assertions` configuration option for fine-grained control of failed assertions verbosity.
Added the new :confval:`verbosity_test_cases` configuration option for fine-grained control of test execution verbosity.

See :ref:`Fine-grained verbosity <pytest.fine_grained_verbosity>` for more details.

Expand Down
4 changes: 3 additions & 1 deletion doc/en/how-to/output.rst
Expand Up @@ -298,7 +298,9 @@ This is done by setting a verbosity level in the configuration file for the spec
``pytest --no-header`` with a value of ``2`` would have the same output as the previous example, but each test inside
the file is shown by a single character in the output.

(Note: currently this is the only option available, but more might be added in the future).
:confval:`verbosity_test_cases`: Controls how verbose the test execution output should be when pytest is executed.
Running ``pytest --no-header`` with a value of ``2`` would have the same output as the first verbosity example, but each
test inside the file gets its own line in the output.

.. _`pytest.detailed_failed_tests_usage`:

Expand Down
13 changes: 13 additions & 0 deletions doc/en/reference/reference.rst
Expand Up @@ -1859,6 +1859,19 @@ passed multiple times. The expected format is ``name=value``. For example::
"auto" can be used to explicitly use the global verbosity level.


.. confval:: verbosity_test_cases

Set a verbosity level specifically for test case execution related output, overriding the application wide level.

.. code-block:: ini

[pytest]
verbosity_test_cases = 2

Defaults to application wide verbosity level (via the ``-v`` command-line option). A special value of
"auto" can be used to explicitly use the global verbosity level.


.. confval:: xfail_strict

If set to ``True``, tests marked with ``@pytest.mark.xfail`` that actually succeed will by default fail the
Expand Down
2 changes: 2 additions & 0 deletions src/_pytest/config/__init__.py
Expand Up @@ -1655,6 +1655,8 @@ def getvalueorskip(self, name: str, path=None):

#: Verbosity type for failed assertions (see :confval:`verbosity_assertions`).
VERBOSITY_ASSERTIONS: Final = "assertions"
#: Verbosity type for test case execution (see :confval:`verbosity_test_cases`).
VERBOSITY_TEST_CASES: Final = "test_cases"
_VERBOSITY_INI_DEFAULT: Final = "auto"

def get_verbosity(self, verbosity_type: Optional[str] = None) -> int:
Expand Down
28 changes: 20 additions & 8 deletions src/_pytest/terminal.py
Expand Up @@ -253,6 +253,14 @@
"progress even when capture=no)",
default="progress",
)
Config._add_verbosity_ini(
parser,
Config.VERBOSITY_TEST_CASES,
help=(
"Specify a verbosity level for test case execution, overriding the main level. "
"Higher levels will provide more detailed information about each test case executed."
),
)


def pytest_configure(config: Config) -> None:
Expand Down Expand Up @@ -406,7 +414,7 @@
@property
def showfspath(self) -> bool:
if self._showfspath is None:
return self.verbosity >= 0
return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) >= 0
return self._showfspath

@showfspath.setter
Expand All @@ -415,7 +423,7 @@

@property
def showlongtestinfo(self) -> bool:
return self.verbosity > 0
return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) > 0

def hasopt(self, char: str) -> bool:
char = {"xfailed": "x", "skipped": "s"}.get(char, char)
Expand Down Expand Up @@ -593,7 +601,7 @@
markup = {"yellow": True}
else:
markup = {}
if self.verbosity <= 0:
if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0:
self._tw.write(letter, **markup)
else:
self._progress_nodeids_reported.add(rep.nodeid)
Expand All @@ -602,7 +610,7 @@
self.write_ensure_prefix(line, word, **markup)
if rep.skipped or hasattr(report, "wasxfail"):
reason = _get_raw_skip_reason(rep)
if self.config.option.verbose < 2:
if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) < 2:
available_width = (
(self._tw.fullwidth - self._tw.width_of_current_line)
- len(" [100%]")
Expand Down Expand Up @@ -639,7 +647,10 @@

def pytest_runtest_logfinish(self, nodeid: str) -> None:
assert self._session
if self.verbosity <= 0 and self._show_progress_info:
if (
self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0
and self._show_progress_info
):
if self._show_progress_info == "count":
num_tests = self._session.testscollected
progress_length = len(f" [{num_tests}/{num_tests}]")
Expand Down Expand Up @@ -819,8 +830,9 @@
rep.toterminal(self._tw)

def _printcollecteditems(self, items: Sequence[Item]) -> None:
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
test_cases_verbosity = self.config.get_verbosity(Config.VERBOSITY_TEST_CASES)

Check warning on line 833 in src/_pytest/terminal.py

View check run for this annotation

Codecov / codecov/patch

src/_pytest/terminal.py#L833

Added line #L833 was not covered by tests
if test_cases_verbosity < 0:
if test_cases_verbosity < -1:
counts = Counter(item.nodeid.split("::", 1)[0] for item in items)
for name, count in sorted(counts.items()):
self._tw.line("%s: %d" % (name, count))
Expand All @@ -840,7 +852,7 @@
stack.append(col)
indent = (len(stack) - 1) * " "
self._tw.line(f"{indent}{col}")
if self.config.option.verbose >= 1:
if test_cases_verbosity >= 1:
obj = getattr(col, "obj", None)
doc = inspect.getdoc(obj) if obj else None
if doc:
Expand Down
231 changes: 231 additions & 0 deletions testing/test_terminal.py
Expand Up @@ -2614,3 +2614,234 @@ def test_format_trimmed() -> None:

assert _format_trimmed(" ({}) ", msg, len(msg) + 4) == " (unconditional skip) "
assert _format_trimmed(" ({}) ", msg, len(msg) + 3) == " (unconditional ...) "


class TestFineGrainedTestCase:
DEFAULT_FILE_CONTENTS = """
import pytest

@pytest.mark.parametrize("i", range(4))
def test_ok(i):
'''
some docstring
'''
pass

def test_fail():
assert False
"""
LONG_SKIP_FILE_CONTENTS = """
import pytest

@pytest.mark.skip(
"some long skip reason that will not fit on a single line with other content that goes"
" on and on and on and on and on"
)
def test_skip():
pass
"""

@pytest.mark.parametrize("verbosity", [1, 2])
def test_execute_positive(self, verbosity, pytester: Pytester) -> None:
# expected: one test case per line (with file name), word describing result
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=verbosity)
result = pytester.runpytest(p)

result.stdout.fnmatch_lines(
[
"collected 5 items",
"",
f"{p.name}::test_ok[0] PASSED [ 20%]",
f"{p.name}::test_ok[1] PASSED [ 40%]",
f"{p.name}::test_ok[2] PASSED [ 60%]",
f"{p.name}::test_ok[3] PASSED [ 80%]",
f"{p.name}::test_fail FAILED [100%]",
],
consecutive=True,
)

def test_execute_0_global_1(self, pytester: Pytester) -> None:
# expected: one file name per line, single character describing result
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=0)
result = pytester.runpytest("-v", p)

result.stdout.fnmatch_lines(
[
"collecting ... collected 5 items",
"",
f"{p.name} ....F [100%]",
],
consecutive=True,
)

@pytest.mark.parametrize("verbosity", [-1, -2])
def test_execute_negative(self, verbosity, pytester: Pytester) -> None:
# expected: single character describing result
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=verbosity)
result = pytester.runpytest(p)

result.stdout.fnmatch_lines(
[
"collected 5 items",
"....F [100%]",
],
consecutive=True,
)

def test_execute_skipped_positive_2(self, pytester: Pytester) -> None:
# expected: one test case per line (with file name), word describing result, full reason
p = TestFineGrainedTestCase._initialize_files(
pytester,
verbosity=2,
file_contents=TestFineGrainedTestCase.LONG_SKIP_FILE_CONTENTS,
)
result = pytester.runpytest(p)

result.stdout.fnmatch_lines(
[
"collected 1 item",
"",
f"{p.name}::test_skip SKIPPED (some long skip",
"reason that will not fit on a single line with other content that goes",
"on and on and on and on and on) [100%]",
],
consecutive=True,
)

def test_execute_skipped_positive_1(self, pytester: Pytester) -> None:
# expected: one test case per line (with file name), word describing result, reason truncated
p = TestFineGrainedTestCase._initialize_files(
pytester,
verbosity=1,
file_contents=TestFineGrainedTestCase.LONG_SKIP_FILE_CONTENTS,
)
result = pytester.runpytest(p)

result.stdout.fnmatch_lines(
[
"collected 1 item",
"",
f"{p.name}::test_skip SKIPPED (some long ski...) [100%]",
],
consecutive=True,
)

def test_execute_skipped__0_global_1(self, pytester: Pytester) -> None:
# expected: one file name per line, single character describing result (no reason)
p = TestFineGrainedTestCase._initialize_files(
pytester,
verbosity=0,
file_contents=TestFineGrainedTestCase.LONG_SKIP_FILE_CONTENTS,
)
result = pytester.runpytest("-v", p)

result.stdout.fnmatch_lines(
[
"collecting ... collected 1 item",
"",
f"{p.name} s [100%]",
],
consecutive=True,
)

@pytest.mark.parametrize("verbosity", [-1, -2])
def test_execute_skipped_negative(self, verbosity, pytester: Pytester) -> None:
# expected: single character describing result (no reason)
p = TestFineGrainedTestCase._initialize_files(
pytester,
verbosity=verbosity,
file_contents=TestFineGrainedTestCase.LONG_SKIP_FILE_CONTENTS,
)
result = pytester.runpytest(p)

result.stdout.fnmatch_lines(
[
"collected 1 item",
"s [100%]",
],
consecutive=True,
)

@pytest.mark.parametrize("verbosity", [1, 2])
def test__collect_only_positive(self, verbosity, pytester: Pytester) -> None:
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=verbosity)
result = pytester.runpytest("--collect-only", p)

result.stdout.fnmatch_lines(
[
"collected 5 items",
"",
f"<Module {p.name}>",
" <Function test_ok[0]>",
" some docstring",
" <Function test_ok[1]>",
" some docstring",
" <Function test_ok[2]>",
" some docstring",
" <Function test_ok[3]>",
" some docstring",
" <Function test_fail>",
],
consecutive=True,
)

def test_collect_only_0_global_1(self, pytester: Pytester) -> None:
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=0)
result = pytester.runpytest("-v", "--collect-only", p)

result.stdout.fnmatch_lines(
[
"collecting ... collected 5 items",
"",
f"<Module {p.name}>",
" <Function test_ok[0]>",
" <Function test_ok[1]>",
" <Function test_ok[2]>",
" <Function test_ok[3]>",
" <Function test_fail>",
],
consecutive=True,
)

def test_collect_only_negative_1(self, pytester: Pytester) -> None:
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=-1)
result = pytester.runpytest("--collect-only", p)

result.stdout.fnmatch_lines(
[
"collected 5 items",
"",
f"{p.name}::test_ok[0]",
f"{p.name}::test_ok[1]",
f"{p.name}::test_ok[2]",
f"{p.name}::test_ok[3]",
f"{p.name}::test_fail",
],
consecutive=True,
)

def test_collect_only_negative_2(self, pytester: Pytester) -> None:
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=-2)
result = pytester.runpytest("--collect-only", p)

result.stdout.fnmatch_lines(
[
"collected 5 items",
"",
f"{p.name}: 5",
],
consecutive=True,
)

@staticmethod
def _initialize_files(
pytester: Pytester, verbosity: int, file_contents: str = DEFAULT_FILE_CONTENTS
) -> Path:
p = pytester.makepyfile(file_contents)
pytester.makeini(
f"""
[pytest]
verbosity_test_cases = {verbosity}
"""
)
return p