Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions AUTHORS
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ Nicolas Delaby
Pieter Mulder
Piotr Banaszkiewicz
Punyashloka Biswal
Quentin Pradet
Ralf Schmitt
Raphael Pierzina
Ronny Pfannschmidt
Expand Down
5 changes: 4 additions & 1 deletion CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@

*

*
* Fix ``pytest.mark.skip`` mark when used in strict mode.
Thanks `@pquentin`_ for the PR and `@RonnyPfannschmidt`_ for
showing how to fix the bug.

* Minor improvements and fixes to the documentation.
Thanks `@omarkohl`_ for the PR.
Expand Down Expand Up @@ -165,6 +167,7 @@
.. _@rabbbit: https://github.com/rabbbit
.. _@hackebrot: https://github.com/hackebrot
.. _@omarkohl: https://github.com/omarkohl
.. _@pquentin: https://github.com/pquentin

2.8.7
=====
Expand Down
19 changes: 12 additions & 7 deletions _pytest/skipping.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,11 @@ def nop(*args, **kwargs):
nop.Exception = XFailed
setattr(pytest, "xfail", nop)

config.addinivalue_line("markers",
"skip(reason=None): skip the given test function with an optional reason. "
"Example: skip(reason=\"no way of currently testing this\") skips the "
"test."
)
config.addinivalue_line("markers",
"skipif(condition): skip the given test function if eval(condition) "
"results in a True value. Evaluation happens within the "
Expand All @@ -38,13 +43,13 @@ def nop(*args, **kwargs):
"http://pytest.org/latest/skipping.html"
)
config.addinivalue_line("markers",
"xfail(condition, reason=None, run=True, raises=None): mark the the test function "
"as an expected failure if eval(condition) has a True value. "
"Optionally specify a reason for better reporting and run=False if "
"you don't even want to execute the test function. If only specific "
"exception(s) are expected, you can list them in raises, and if the test fails "
"in other ways, it will be reported as a true failure. "
"See http://pytest.org/latest/skipping.html"
"xfail(condition, reason=None, run=True, raises=None, strict=False): "
"mark the the test function as an expected failure if eval(condition) "
"has a True value. Optionally specify a reason for better reporting "
"and run=False if you don't even want to execute the test function. "
"If only specific exception(s) are expected, you can list them in "
"raises, and if the test fails in other ways, it will be reported as "
"a true failure. See http://pytest.org/latest/skipping.html"
)


Expand Down
15 changes: 14 additions & 1 deletion testing/test_skipping.py
Original file line number Diff line number Diff line change
Expand Up @@ -539,6 +539,19 @@ def test_baz():
"*1 passed*2 skipped*",
])

def test_strict_and_skip(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip
def test_hello():
pass
""")
result = testdir.runpytest("-rs --strict")
result.stdout.fnmatch_lines([
"*unconditional skip*",
"*1 skipped*",
])

class TestSkipif:
def test_skipif_conditional(self, testdir):
item = testdir.getitem("""
Expand Down Expand Up @@ -812,7 +825,7 @@ def test_default_markers(testdir):
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines([
"*skipif(*condition)*skip*",
"*xfail(*condition, reason=None, run=True, raises=None)*expected failure*",
"*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*",
])

def test_xfail_test_setup_exception(testdir):
Expand Down