Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Stepwise #4147

Merged
merged 17 commits into from Oct 27, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 2 additions & 0 deletions AUTHORS
Expand Up @@ -59,6 +59,7 @@ Danielle Jenkins
Dave Hunt
David Díaz-Barquero
David Mohr
David Szotten
David Vierra
Daw-Ran Liou
Denis Kirisov
Expand Down Expand Up @@ -161,6 +162,7 @@ Miro Hrončok
Nathaniel Waisbrot
Ned Batchelder
Neven Mundar
Niclas Olofsson
Nicolas Delaby
Oleg Pidsadnyi
Oleg Sushchenko
Expand Down
1 change: 1 addition & 0 deletions changelog/4147.feature.rst
@@ -0,0 +1 @@
Add ``-sw``, ``--stepwise`` as an alternative to ``--lf -x`` for stopping at the first failure, but starting the next test invocation from that test. See `the documentation <https://docs.pytest.org/en/latest/cache.html#stepwise>`_ for more info.
6 changes: 6 additions & 0 deletions doc/en/cache.rst
Expand Up @@ -260,3 +260,9 @@ by adding the ``--cache-clear`` option like this::
This is recommended for invocations from Continuous Integration
servers where isolation and correctness is more important
than speed.


Stepwise
--------

As an alternative to ``--lf -x``, especially for cases where you expect a large part of the test suite will fail, ``--sw``, ``--stepwise`` allows you to fix them one at a time. The test suite will run until the first failure and then stop. At the next invocation, tests will continue from the last failing test and then run until the next failing test. You may use the ``--stepwise-skip`` option to ignore one failing test and stop the test execution on the second failing test instead. This is useful if you get stuck on a failing test and just want to ignore it until later.
1 change: 1 addition & 0 deletions src/_pytest/config/__init__.py
Expand Up @@ -126,6 +126,7 @@ def directory_arg(path, optname):
"freeze_support",
"setuponly",
"setupplan",
"stepwise",
"warnings",
"logging",
)
Expand Down
102 changes: 102 additions & 0 deletions src/_pytest/stepwise.py
@@ -0,0 +1,102 @@
import pytest


def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--sw",
"--stepwise",
action="store_true",
dest="stepwise",
help="exit on test fail and continue from last failing test next time",
)
group.addoption(
"--stepwise-skip",
action="store_true",
dest="stepwise_skip",
help="ignore the first failing test but stop on the next failing test",
)


@pytest.hookimpl
def pytest_configure(config):
config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin")


class StepwisePlugin:
def __init__(self, config):
self.config = config
self.active = config.getvalue("stepwise")
self.session = None

if self.active:
self.lastfailed = config.cache.get("cache/stepwise", None)
self.skip = config.getvalue("stepwise_skip")

def pytest_sessionstart(self, session):
self.session = session

def pytest_collection_modifyitems(self, session, config, items):
if not self.active or not self.lastfailed:
return

already_passed = []
found = False

# Make a list of all tests that have been run before the last failing one.
for item in items:
if item.nodeid == self.lastfailed:
found = True
break
else:
already_passed.append(item)

# If the previously failed test was not found among the test items,
# do not skip any tests.
if not found:
already_passed = []

for item in already_passed:
items.remove(item)

config.hook.pytest_deselected(items=already_passed)

def pytest_collectreport(self, report):
if self.active and report.failed:
self.session.shouldstop = (
"Error when collecting test, stopping test execution."
)

def pytest_runtest_logreport(self, report):
# Skip this hook if plugin is not active or the test is xfailed.
if not self.active or "xfail" in report.keywords:
return

if report.failed:
if self.skip:
# Remove test from the failed ones (if it exists) and unset the skip option
# to make sure the following tests will not be skipped.
if report.nodeid == self.lastfailed:
self.lastfailed = None

self.skip = False
else:
# Mark test as the last failing and interrupt the test session.
self.lastfailed = report.nodeid
self.session.shouldstop = (
"Test failed, continuing from this test next run."
)

else:
# If the test was actually run and did pass.
if report.when == "call":
# Remove test from the failed ones, if exists.
if report.nodeid == self.lastfailed:
self.lastfailed = None

def pytest_sessionfinish(self, session):
if self.active:
self.config.cache.set("cache/stepwise", self.lastfailed)
else:
# Clear the list of failing tests if the plugin is not active.
self.config.cache.set("cache/stepwise", [])
3 changes: 2 additions & 1 deletion testing/test_cacheprovider.py
Expand Up @@ -63,7 +63,8 @@ def test_error():
)
result = testdir.runpytest("-rw")
assert result.ret == 1
result.stdout.fnmatch_lines(["*could not create cache path*", "*2 warnings*"])
# warnings from nodeids, lastfailed, and stepwise
result.stdout.fnmatch_lines(["*could not create cache path*", "*3 warnings*"])

def test_config_cache(self, testdir):
testdir.makeconftest(
Expand Down
148 changes: 148 additions & 0 deletions testing/test_stepwise.py
@@ -0,0 +1,148 @@
import pytest


@pytest.fixture
def stepwise_testdir(testdir):
# Rather than having to modify our testfile between tests, we introduce
# a flag for wether or not the second test should fail.
testdir.makeconftest(
"""
def pytest_addoption(parser):
group = parser.getgroup('general')
group.addoption('--fail', action='store_true', dest='fail')
group.addoption('--fail-last', action='store_true', dest='fail_last')
"""
)

# Create a simple test suite.
testdir.makepyfile(
test_a="""
def test_success_before_fail():
assert 1

def test_fail_on_flag(request):
assert not request.config.getvalue('fail')

def test_success_after_fail():
assert 1

def test_fail_last_on_flag(request):
assert not request.config.getvalue('fail_last')

def test_success_after_last_fail():
assert 1
"""
)

testdir.makepyfile(
test_b="""
def test_success():
assert 1
"""
)

return testdir


@pytest.fixture
def error_testdir(testdir):
testdir.makepyfile(
test_a="""
def test_error(nonexisting_fixture):
assert 1

def test_success_after_fail():
assert 1
"""
)

return testdir


@pytest.fixture
def broken_testdir(testdir):
testdir.makepyfile(
working_testfile="def test_proper(): assert 1", broken_testfile="foobar"
)
return testdir


def test_run_without_stepwise(stepwise_testdir):
result = stepwise_testdir.runpytest("-v", "--strict", "--fail")

result.stdout.fnmatch_lines(["*test_success_before_fail PASSED*"])
result.stdout.fnmatch_lines(["*test_fail_on_flag FAILED*"])
result.stdout.fnmatch_lines(["*test_success_after_fail PASSED*"])


def test_fail_and_continue_with_stepwise(stepwise_testdir):
# Run the tests with a failing second test.
result = stepwise_testdir.runpytest("-v", "--strict", "--stepwise", "--fail")
assert not result.stderr.str()

stdout = result.stdout.str()
# Make sure we stop after first failing test.
assert "test_success_before_fail PASSED" in stdout
assert "test_fail_on_flag FAILED" in stdout
assert "test_success_after_fail" not in stdout

# "Fix" the test that failed in the last run and run it again.
result = stepwise_testdir.runpytest("-v", "--strict", "--stepwise")
assert not result.stderr.str()

stdout = result.stdout.str()
# Make sure the latest failing test runs and then continues.
assert "test_success_before_fail" not in stdout
assert "test_fail_on_flag PASSED" in stdout
assert "test_success_after_fail PASSED" in stdout


def test_run_with_skip_option(stepwise_testdir):
result = stepwise_testdir.runpytest(
"-v", "--strict", "--stepwise", "--stepwise-skip", "--fail", "--fail-last"
)
assert not result.stderr.str()

stdout = result.stdout.str()
# Make sure first fail is ignore and second fail stops the test run.
assert "test_fail_on_flag FAILED" in stdout
assert "test_success_after_fail PASSED" in stdout
assert "test_fail_last_on_flag FAILED" in stdout
assert "test_success_after_last_fail" not in stdout


def test_fail_on_errors(error_testdir):
result = error_testdir.runpytest("-v", "--strict", "--stepwise")

assert not result.stderr.str()
stdout = result.stdout.str()

assert "test_error ERROR" in stdout
assert "test_success_after_fail" not in stdout


def test_change_testfile(stepwise_testdir):
result = stepwise_testdir.runpytest(
"-v", "--strict", "--stepwise", "--fail", "test_a.py"
)
assert not result.stderr.str()

stdout = result.stdout.str()
assert "test_fail_on_flag FAILED" in stdout

# Make sure the second test run starts from the beginning, since the
# test to continue from does not exist in testfile_b.
result = stepwise_testdir.runpytest("-v", "--strict", "--stepwise", "test_b.py")
assert not result.stderr.str()

stdout = result.stdout.str()
assert "test_success PASSED" in stdout


def test_stop_on_collection_errors(broken_testdir):
result = broken_testdir.runpytest(
"-v", "--strict", "--stepwise", "working_testfile.py", "broken_testfile.py"
)

stdout = result.stdout.str()
assert "errors during collection" in stdout