Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Deprecate --fast option #8970

Merged
merged 4 commits into from Jan 28, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
7 changes: 1 addition & 6 deletions pants.ini
Expand Up @@ -405,17 +405,12 @@ pytest_plugins: +[
timeouts: true
timeout_default: 60


[test.junit]
# TODO(#8649): Switch to --no-fast.
fast: true
timeouts: true
timeout_default: 60


[test.go]
# TODO(#8649): Switch to --no-fast --chroot.
fast: true
# TODO(#8649): Switch to --chroot.
chroot: false


Expand Down
7 changes: 7 additions & 0 deletions src/python/pants/task/testrunner_task_mixin.py
Expand Up @@ -397,6 +397,13 @@ def register_options(cls, register):
# https://github.com/pantsbuild/pants/issues/5073

register('--fast', type=bool, default=False, fingerprint=True,
removal_version="1.27.0.dev0",
removal_hint="This option is going away for better isolation of tests and in "
"preparation for switching to the V2 test implementation, which provides "
"much better caching and concurrency.\n\n"
"We recommend running a full CI suite with `no-fast` (the default now) "
"to see if any tests fail. If any fail, this likely signals shared state "
"between your test targets.",
help='Run all tests in a single invocation. If turned off, each test target '
'will run in its own invocation, which will be slower, but isolates '
'tests from process-wide state created by tests in other targets.')
Expand Down
Expand Up @@ -4,7 +4,6 @@
import os
from collections import namedtuple
from contextlib import contextmanager
from unittest import expectedFailure

from pants.testutil.pants_run_integration_test import PantsRunIntegrationTest
from pants.util.contextutil import temporary_dir
Expand Down Expand Up @@ -184,10 +183,7 @@ def test_unit_tests_with_different_sets(self):
])
self.assert_success(run)

@expectedFailure
Copy link
Sponsor Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🎸

def test_unit_tests_with_different_sets_one_batch(self):
# NB(gmalmquist): Currently, junit_run isn't smart enough to partition the targets to run
# separately if they depend on jar_libraries which resolve using different managed dependencies.
run = self.run_pants([
'test',
'--test-junit-batch-size=2',
Expand Down
Expand Up @@ -57,8 +57,7 @@ def test_junit_test_with_test_option_with_classname(self):
pants_run = self.run_pants_with_workdir(
['test.junit',
'--test=org.pantsbuild.example.hello.greet.GreetingTest',
'examples/tests/java/org/pantsbuild/example/hello/greet',
'examples/tests/scala/org/pantsbuild/example/hello/welcome'],
'examples/tests/java/org/pantsbuild/example/hello/greet'],
workdir)
self.assert_success(pants_run)
self._assert_output_for_class(workdir=workdir,
Expand All @@ -68,17 +67,13 @@ def test_junit_test_requiring_cwd_fails_without_option_specified(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/cwdexample',
'--python-setup-interpreter-constraints=CPython>=2.7,<3',
'--python-setup-interpreter-constraints=CPython>=3.3',
'--jvm-test-junit-options=-Dcwd.test.enabled=true'])
self.assert_failure(pants_run)

def test_junit_test_requiring_cwd_passes_with_option_with_value_specified(self):
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/cwdexample',
'--python-setup-interpreter-constraints=CPython>=2.7,<3',
'--python-setup-interpreter-constraints=CPython>=3.3',
'--jvm-test-junit-options=-Dcwd.test.enabled=true',
'--no-test-junit-chroot',
'--test-junit-cwd=testprojects/src/java/org/pantsbuild/testproject/cwdexample/subdir'])
Expand All @@ -88,8 +83,6 @@ def test_junit_test_requiring_cwd_fails_with_option_with_no_value_specified(self
pants_run = self.run_pants([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/cwdexample',
'--python-setup-interpreter-constraints=CPython>=2.7,<3',
'--python-setup-interpreter-constraints=CPython>=3.3',
'--jvm-test-junit-options=-Dcwd.test.enabled=true'])
self.assert_failure(pants_run)

Expand Down
50 changes: 22 additions & 28 deletions tests/python/pants_test/backend/python/tasks/test_pytest_run.py
Expand Up @@ -57,9 +57,8 @@ def task_type(cls):
_default_test_options = {
'colors': False,
'level': 'info', # When debugging a test failure it may be helpful to set this to 'debug'.
# TODO(8989): Don't require chroot=False and fast=True
# TODO(8989): Don't require chroot=False
'chroot': False,
'fast': True,
}

def _augment_options(self, options):
Expand Down Expand Up @@ -432,19 +431,20 @@ def null():
self.py23 = self.target('tests:py23-tests')
self.py3_and_more = self.target('tests:py3-and-more-tests')

@ensure_cached(PytestRun, expected_num_artifacts=0)
@ensure_cached(PytestRun, expected_num_artifacts=1)
def test_error(self):
"""Test that a test that errors rather than fails shows up in ErrorWhileTesting."""
failing_targets = [self.red, self.error]
self.run_failing_tests(targets=[self.green, *failing_targets],
failed_targets=failing_targets)

self.run_failing_tests(targets=[self.red, self.green, self.error],
failed_targets=[self.red, self.error])

@ensure_cached(PytestRun, expected_num_artifacts=0)
@ensure_cached(PytestRun, expected_num_artifacts=1)
def test_error_outside_function(self):
self.run_failing_tests(targets=[self.red, self.green, self.failure_outside_function],
failed_targets=[self.red, self.failure_outside_function])
failing_targets = [self.red, self.failure_outside_function]
self.run_failing_tests(targets=[self.green, *failing_targets],
failed_targets=failing_targets)

@ensure_cached(PytestRun, expected_num_artifacts=1)
@ensure_cached(PytestRun, expected_num_artifacts=2)
def test_succeeds_for_intersecting_unique_constraints(self):
self.run_tests(targets=[self.py23, self.py3_and_more])

Expand All @@ -457,15 +457,13 @@ def test_caches_greens_fast(self):
self.run_tests(targets=[self.green, self.green2, self.green3], fast=True)

@ensure_cached(PytestRun, expected_num_artifacts=3)
def test_cache_greens_slow(self):
self.run_tests(targets=[self.green, self.green2, self.green3], fast=False)
def test_cache_greens(self):
self.run_tests(targets=[self.green, self.green2, self.green3])

def test_timeout_slow(self):
# Confirm that if we run fast=False with timeouts, the correct test is blamed.
self.run_failing_tests(
targets=[self.green, self.sleep_timeout],
failed_targets=[self.sleep_timeout],
fast=False,
timeout_default=3,
)

Expand All @@ -476,8 +474,8 @@ def test_out_of_band_deselect_fast_success(self):
# NB: Both red and green are cached. Red because its skipped via deselect and so runs (noops)
# successfully. This is OK since the -k passthru is part of the task fingerprinting.
@ensure_cached(PytestRun, expected_num_artifacts=2)
def test_out_of_band_deselect_no_fast_success(self):
self.run_tests([self.green, self.red], '-ktest_core_green', fast=False)
def test_out_of_band_deselect_success(self):
self.run_tests([self.green, self.red], '-ktest_core_green')

@ensure_cached(PytestRun, expected_num_artifacts=0)
def test_red(self):
Expand All @@ -491,17 +489,16 @@ def test_fail_fast_skips_second_red_test_with_single_chroot(self):
fast=True)

@ensure_cached(PytestRun, expected_num_artifacts=0)
def test_fail_fast_skips_second_red_test_with_isolated_chroots(self):
def test_fail_fast_skips_second_red_test(self):
self.run_failing_tests(targets=[self.red, self.red_in_class],
failed_targets=[self.red],
fail_fast=True,
fast=False)
fail_fast=True)

@ensure_cached(PytestRun, expected_num_artifacts=0)
def test_red_test_in_class(self):
self.run_failing_tests(targets=[self.red_in_class], failed_targets=[self.red_in_class])

@ensure_cached(PytestRun, expected_num_artifacts=0)
@ensure_cached(PytestRun, expected_num_artifacts=1)
def test_mixed(self):
self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red])

Expand All @@ -520,14 +517,12 @@ def test_green_junit_xml_dir(self):

self.assert_test_info(junit_xml_dir, ('test_one', 'success'))

@ensure_cached(PytestRun, expected_num_artifacts=0)
@ensure_cached(PytestRun, expected_num_artifacts=1)
def test_red_junit_xml_dir(self):
with temporary_dir() as junit_xml_dir:
self.run_failing_tests(targets=[self.red, self.green],
failed_targets=[self.red],
junit_xml_dir=junit_xml_dir,
fast=True,
fail_fast=False)
junit_xml_dir=junit_xml_dir)

self.assert_test_info(junit_xml_dir, ('test_one', 'success'), ('test_two', 'failure'))

Expand Down Expand Up @@ -611,12 +606,12 @@ def test_coverage_auto_option_red(self):
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([2], not_run_statements)

@ensure_cached(PytestRun, expected_num_artifacts=0)
@ensure_cached(PytestRun, expected_num_artifacts=1)
def test_coverage_auto_option_mixed_multiple_targets(self):
all_statements, not_run_statements = self.run_coverage_auto(targets=[self.green, self.red],
failed_targets=[self.red])
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([], not_run_statements)
self.assertEqual([2], not_run_statements)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I couldn't figure this one out.

This is now the same output as test_coverage_auto_option_red. I suspect what's happening is that self.green says every statement ran, but self.red says statement 2 did not, so the combined result says statement 2 did not run?

cc @jsirois, do you recall if V1 Pytest --no-fast will attempt to merge coverage results? I know that's a tricky task @TansyArron has spent much time on for V2.


@ensure_cached(PytestRun, expected_num_artifacts=0)
def test_coverage_auto_option_mixed_single_target(self):
Expand Down Expand Up @@ -781,7 +776,7 @@ def test_coverage_auto_option_no_explicit_coverage_idiosyncratic_layout_no_packa
# short-circuiting coverage.
self.run_coverage_auto(targets=[self.all], failed_targets=[self.all], expect_coverage=False)

@ensure_cached(PytestRun, expected_num_artifacts=0)
@ensure_cached(PytestRun, expected_num_artifacts=1)
def test_coverage_modules_dne_option(self):
self.assertFalse(os.path.isfile(self.coverage_data_file()))

Expand Down Expand Up @@ -848,7 +843,6 @@ def test_coverage_issue_5314_all_source_roots(self):
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([2], not_run_statements)

@ensure_cached(PytestRun, expected_num_artifacts=1)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I removed this because @ensure_cached is hardcoded to check that there is only one cache_subdir. With sharding, we have two subdirs. Rewriting TaskTestBase to allow for multiple cache_subdirs would be a lot of work for this legacy code.

def test_sharding(self):
shard0_failed_targets = self.try_run_tests(targets=[self.red, self.green], test_shard='0/2')
shard1_failed_targets = self.try_run_tests(targets=[self.red, self.green], test_shard='1/2')
Expand Down