Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[3.6] [3.7] bpo-34279: Synchronize regrtest with master (GH-10800) #10802

Merged
merged 1 commit into from Nov 29, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion Lib/test/libregrtest/cmdline.py
Expand Up @@ -170,7 +170,7 @@ def _create_parser():
group.add_argument('--wait', action='store_true',
help='wait for user input, e.g., allow a debugger '
'to be attached')
group.add_argument('--slaveargs', metavar='ARGS')
group.add_argument('--worker-args', metavar='ARGS')
group.add_argument('-S', '--start', metavar='START',
help='the name of the test at which to start.' +
more_details)
Expand Down
19 changes: 15 additions & 4 deletions Lib/test/libregrtest/main.py
Expand Up @@ -14,7 +14,7 @@
from test.libregrtest.runtest import (
findtests, runtest, get_abs_module,
STDTESTS, NOTTESTS, PASSED, FAILED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED,
INTERRUPTED, CHILD_ERROR,
INTERRUPTED, CHILD_ERROR, TEST_DID_NOT_RUN,
PROGRESS_MIN_TIME, format_test_result)
from test.libregrtest.setup import setup_tests
from test.libregrtest.utils import removepy, count, format_duration, printlist
Expand Down Expand Up @@ -79,6 +79,7 @@ def __init__(self):
self.resource_denieds = []
self.environment_changed = []
self.rerun = []
self.run_no_tests = []
self.first_result = None
self.interrupted = False

Expand Down Expand Up @@ -118,6 +119,8 @@ def accumulate_result(self, test, result):
elif ok == RESOURCE_DENIED:
self.skipped.append(test)
self.resource_denieds.append(test)
elif ok == TEST_DID_NOT_RUN:
self.run_no_tests.append(test)
elif ok != INTERRUPTED:
raise ValueError("invalid test result: %r" % ok)

Expand Down Expand Up @@ -368,6 +371,11 @@ def display_result(self):
print("%s:" % count(len(self.rerun), "re-run test"))
printlist(self.rerun)

if self.run_no_tests:
print()
print(count(len(self.run_no_tests), "test"), "run no tests:")
printlist(self.run_no_tests)

def run_tests_sequential(self):
if self.ns.trace:
import trace
Expand Down Expand Up @@ -458,6 +466,9 @@ def get_tests_result(self):
result.append("FAILURE")
elif self.ns.fail_env_changed and self.environment_changed:
result.append("ENV CHANGED")
elif not any((self.good, self.bad, self.skipped, self.interrupted,
self.environment_changed)):
result.append("NO TEST RUN")

if self.interrupted:
result.append("INTERRUPTED")
Expand Down Expand Up @@ -582,9 +593,9 @@ def _main(self, tests, kwargs):
print(msg, file=sys.stderr, flush=True)
sys.exit(2)

if self.ns.slaveargs is not None:
from test.libregrtest.runtest_mp import run_tests_slave
run_tests_slave(self.ns.slaveargs)
if self.ns.worker_args is not None:
from test.libregrtest.runtest_mp import run_tests_worker
run_tests_worker(self.ns.worker_args)

if self.ns.wait:
input("Press any key to continue...")
Expand Down
5 changes: 5 additions & 0 deletions Lib/test/libregrtest/runtest.py
Expand Up @@ -19,6 +19,7 @@
RESOURCE_DENIED = -3
INTERRUPTED = -4
CHILD_ERROR = -5 # error in a child process
TEST_DID_NOT_RUN = -6 # error in a child process

_FORMAT_TEST_RESULT = {
PASSED: '%s passed',
Expand All @@ -28,6 +29,7 @@
RESOURCE_DENIED: '%s skipped (resource denied)',
INTERRUPTED: '%s interrupted',
CHILD_ERROR: '%s crashed',
TEST_DID_NOT_RUN: '%s run no tests',
}

# Minimum duration of a test to display its duration or to mention that
Expand Down Expand Up @@ -94,6 +96,7 @@ def runtest(ns, test):
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
EMPTY_TEST_SUITE test ran no subtests.

If ns.xmlpath is not None, xml_data is a list containing each
generated testsuite element.
Expand Down Expand Up @@ -197,6 +200,8 @@ def test_runner():
else:
print("test", test, "failed", file=sys.stderr, flush=True)
return FAILED, test_time
except support.TestDidNotRun:
return TEST_DID_NOT_RUN, test_time
except:
msg = traceback.format_exc()
if not ns.pgo:
Expand Down
14 changes: 7 additions & 7 deletions Lib/test/libregrtest/runtest_mp.py
Expand Up @@ -28,23 +28,23 @@


def run_test_in_subprocess(testname, ns):
"""Run the given test in a subprocess with --slaveargs.
"""Run the given test in a subprocess with --worker-args.

ns is the option Namespace parsed from command-line arguments. regrtest
is invoked in a subprocess with the --slaveargs argument; when the
is invoked in a subprocess with the --worker-args argument; when the
subprocess exits, its return code, stdout and stderr are returned as a
3-tuple.
"""
from subprocess import Popen, PIPE

ns_dict = vars(ns)
slaveargs = (ns_dict, testname)
slaveargs = json.dumps(slaveargs)
worker_args = (ns_dict, testname)
worker_args = json.dumps(worker_args)

cmd = [sys.executable, *support.args_from_interpreter_flags(),
'-u', # Unbuffered stdout and stderr
'-m', 'test.regrtest',
'--slaveargs', slaveargs]
'--worker-args', worker_args]
if ns.pgo:
cmd += ['--pgo']

Expand All @@ -62,8 +62,8 @@ def run_test_in_subprocess(testname, ns):
return retcode, stdout, stderr


def run_tests_slave(slaveargs):
ns_dict, testname = json.loads(slaveargs)
def run_tests_worker(worker_args):
ns_dict, testname = json.loads(worker_args)
ns = types.SimpleNamespace(**ns_dict)

setup_tests(ns)
Expand Down
7 changes: 6 additions & 1 deletion Lib/test/support/__init__.py
Expand Up @@ -74,7 +74,7 @@
# globals
"PIPE_MAX_SIZE", "verbose", "max_memuse", "use_resources", "failfast",
# exceptions
"Error", "TestFailed", "ResourceDenied",
"Error", "TestFailed", "TestDidNotRun", "ResourceDenied",
# imports
"import_module", "import_fresh_module", "CleanImport",
# modules
Expand Down Expand Up @@ -120,6 +120,9 @@ class Error(Exception):
class TestFailed(Error):
"""Test failed."""

class TestDidNotRun(Error):
"""Test did not run any subtests."""

class ResourceDenied(unittest.SkipTest):
"""Test skipped because it requested a disallowed resource.

Expand Down Expand Up @@ -1916,6 +1919,8 @@ def _run_suite(suite):
if junit_xml_list is not None:
junit_xml_list.append(result.get_xml_element())

if not result.testsRun:
raise TestDidNotRun
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
Expand Down
94 changes: 85 additions & 9 deletions Lib/test/test_regrtest.py
Expand Up @@ -66,10 +66,10 @@ def test_wait(self):
ns = libregrtest._parse_args(['--wait'])
self.assertTrue(ns.wait)

def test_slaveargs(self):
ns = libregrtest._parse_args(['--slaveargs', '[[], {}]'])
self.assertEqual(ns.slaveargs, '[[], {}]')
self.checkError(['--slaveargs'], 'expected one argument')
def test_worker_args(self):
ns = libregrtest._parse_args(['--worker-args', '[[], {}]'])
self.assertEqual(ns.worker_args, '[[], {}]')
self.checkError(['--worker-args'], 'expected one argument')

def test_start(self):
for opt in '-S', '--start':
Expand Down Expand Up @@ -351,11 +351,20 @@ def setUp(self):
self.tmptestdir = tempfile.mkdtemp()
self.addCleanup(support.rmtree, self.tmptestdir)

def create_test(self, name=None, code=''):
def create_test(self, name=None, code=None):
if not name:
name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID
BaseTestCase.TEST_UNIQUE_ID += 1

if code is None:
code = textwrap.dedent("""
import unittest

class Tests(unittest.TestCase):
def test_empty_test(self):
pass
""")

# test_regrtest cannot be run twice in parallel because
# of setUp() and create_test()
name = self.TESTNAME_PREFIX + name
Expand Down Expand Up @@ -390,7 +399,7 @@ def parse_executed_tests(self, output):

def check_executed_tests(self, output, tests, skipped=(), failed=(),
env_changed=(), omitted=(),
rerun=(),
rerun=(), no_test_ran=(),
randomize=False, interrupted=False,
fail_env_changed=False):
if isinstance(tests, str):
Expand All @@ -405,6 +414,8 @@ def check_executed_tests(self, output, tests, skipped=(), failed=(),
omitted = [omitted]
if isinstance(rerun, str):
rerun = [rerun]
if isinstance(no_test_ran, str):
no_test_ran = [no_test_ran]

executed = self.parse_executed_tests(output)
if randomize:
Expand Down Expand Up @@ -447,8 +458,12 @@ def list_regex(line_format, tests):
regex = "Re-running test %r in verbose mode" % name
self.check_line(output, regex)

if no_test_ran:
regex = list_regex('%s test%s run no tests', no_test_ran)
self.check_line(output, regex)

good = (len(tests) - len(skipped) - len(failed)
- len(omitted) - len(env_changed))
- len(omitted) - len(env_changed) - len(no_test_ran))
if good:
regex = r'%s test%s OK\.$' % (good, plural(good))
if not skipped and not failed and good > 1:
Expand All @@ -465,12 +480,16 @@ def list_regex(line_format, tests):
result.append('ENV CHANGED')
if interrupted:
result.append('INTERRUPTED')
if not result:
if not any((good, result, failed, interrupted, skipped,
env_changed, fail_env_changed)):
result.append("NO TEST RUN")
elif not result:
result.append('SUCCESS')
result = ', '.join(result)
if rerun:
self.check_line(output, 'Tests result: %s' % result)
result = 'FAILURE then %s' % result

self.check_line(output, 'Tests result: %s' % result)

def parse_random_seed(self, output):
Expand Down Expand Up @@ -649,7 +668,14 @@ def test_resources(self):
# test -u command line option
tests = {}
for resource in ('audio', 'network'):
code = 'from test import support\nsupport.requires(%r)' % resource
code = textwrap.dedent("""
from test import support; support.requires(%r)
import unittest
class PassingTest(unittest.TestCase):
def test_pass(self):
pass
""" % resource)

tests[resource] = self.create_test(resource, code)
test_names = sorted(tests.values())

Expand Down Expand Up @@ -983,6 +1009,56 @@ def test_bug(self):
output = self.run_tests("-w", testname, exitcode=2)
self.check_executed_tests(output, [testname],
failed=testname, rerun=testname)
def test_no_tests_ran(self):
code = textwrap.dedent("""
import unittest

class Tests(unittest.TestCase):
def test_bug(self):
pass
""")
testname = self.create_test(code=code)

output = self.run_tests(testname, "-m", "nosuchtest", exitcode=0)
self.check_executed_tests(output, [testname], no_test_ran=testname)

def test_no_tests_ran_multiple_tests_nonexistent(self):
code = textwrap.dedent("""
import unittest

class Tests(unittest.TestCase):
def test_bug(self):
pass
""")
testname = self.create_test(code=code)
testname2 = self.create_test(code=code)

output = self.run_tests(testname, testname2, "-m", "nosuchtest", exitcode=0)
self.check_executed_tests(output, [testname, testname2],
no_test_ran=[testname, testname2])

def test_no_test_ran_some_test_exist_some_not(self):
code = textwrap.dedent("""
import unittest

class Tests(unittest.TestCase):
def test_bug(self):
pass
""")
testname = self.create_test(code=code)
other_code = textwrap.dedent("""
import unittest

class Tests(unittest.TestCase):
def test_other_bug(self):
pass
""")
testname2 = self.create_test(code=other_code)

output = self.run_tests(testname, testname2, "-m", "nosuchtest",
"-m", "test_other_bug", exitcode=0)
self.check_executed_tests(output, [testname, testname2],
no_test_ran=[testname])


class TestUtils(unittest.TestCase):
Expand Down
@@ -0,0 +1,3 @@
regrtest issue a warning when no tests have been executed in a particular
test file. Also, a new final result state is issued if no test have been
executed across all test files. Patch by Pablo Galindo.