diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2dcfd98..63a193e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -37,11 +37,20 @@ jobs: python3 -m pip install -U wheel setuptools python3 -m pip install sphinx python3 -m pip install ".[test]" + python3 -m pip install ruff - name: Tests run: | python -W once -m testtools.run testrepository.tests.test_suite + - name: Lint + run: | + ruff check . + + - name: Check formatting + run: | + ruff format --check . + success: needs: build runs-on: ubuntu-latest diff --git a/.testr.conf b/.testr.conf index 8cd68fb..1f997b1 100644 --- a/.testr.conf +++ b/.testr.conf @@ -1,5 +1,5 @@ [DEFAULT] -test_command=${PYTHON:-python} -m subunit.run $LISTOPT $IDOPTION testrepository.tests.test_suite +test_command=${PYTHON:-python3} -m subunit.run $LISTOPT $IDOPTION testrepository.tests.test_suite test_id_option=--load-list $IDFILE test_list_option=--list ;filter_tags=worker-0 diff --git a/testr b/testr index 6e72414..52b97b6 100755 --- a/testr +++ b/testr @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # Copyright (c) 2009 Testrepository Contributors # diff --git a/testrepository/__init__.py b/testrepository/__init__.py index c4a2a17..071dc3c 100644 --- a/testrepository/__init__.py +++ b/testrepository/__init__.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -22,6 +22,9 @@ The tests package contains tests and test specific support code. """ +__all__ = ["__version__"] + + try: # If setuptools_scm is installed (e.g. in a development environment with # an editable install), then use it to determine the version dynamically. diff --git a/testrepository/arguments/__init__.py b/testrepository/arguments/__init__.py index 2dad338..d14aa11 100644 --- a/testrepository/arguments/__init__.py +++ b/testrepository/arguments/__init__.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -61,19 +61,18 @@ def __init__(self, name, min=1, max=1): def summary(self): """Get a regex-like summary of this argument.""" result = self.name - if (self.minimum_count == self.maximum_count and - self.minimum_count == 1): - return result + if self.minimum_count == self.maximum_count and self.minimum_count == 1: + return result minmax = (self.minimum_count, self.maximum_count) if minmax == (0, 1): - return result + '?' + return result + "?" if minmax == (1, None): - return result + '+' + return result + "+" if minmax == (0, None): - return result + '*' - if minmax[1] == None: - minmax = (minmax[0], '') - return result + '{%s,%s}' % minmax + return result + "*" + if minmax[1] is None: + minmax = (minmax[0], "") + return result + "{%s,%s}" % minmax def parse(self, argv): """Evaluate arguments in argv. @@ -87,7 +86,8 @@ def parse(self, argv): result = [] error = None while len(argv) > count and ( - self.maximum_count is None or count < self.maximum_count): + self.maximum_count is None or count < self.maximum_count + ): arg = argv[count] count += 1 try: @@ -100,13 +100,13 @@ def parse(self, argv): if count < self.minimum_count: if error is not None: raise error[1].with_traceback(error[2]) - raise ValueError('not enough arguments present/matched in %s' % argv) + raise ValueError("not enough arguments present/matched in %s" % argv) del argv[:count] return result def _parse_one(self, arg): """Parse a single argument. - + :param arg: An arg from an argv. :result: The parsed argument. :raises ValueError: If the arg cannot be parsed/validated. diff --git a/testrepository/arguments/command.py b/testrepository/arguments/command.py index 1996b0b..4a9f697 100644 --- a/testrepository/arguments/command.py +++ b/testrepository/arguments/command.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -19,10 +19,10 @@ class CustomError(ValueError): - def __str__(self): return self.args[0] + class CommandArgument(AbstractArgument): """An argument that looks up a command.""" diff --git a/testrepository/arguments/doubledash.py b/testrepository/arguments/doubledash.py index 5ab93ce..8f6ae7b 100644 --- a/testrepository/arguments/doubledash.py +++ b/testrepository/arguments/doubledash.py @@ -1,11 +1,11 @@ # # Copyright (c) 2012 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -21,9 +21,9 @@ class DoubledashArgument(AbstractArgument): """An argument that captures '--'.""" def __init__(self): - super(DoubledashArgument, self).__init__('doubledash', min=0) + super(DoubledashArgument, self).__init__("doubledash", min=0) def _parse_one(self, arg): - if arg != '--': - raise ValueError('not a doubledash %r' % (arg,)) + if arg != "--": + raise ValueError("not a doubledash %r" % (arg,)) return arg diff --git a/testrepository/arguments/path.py b/testrepository/arguments/path.py index 56a621c..b435f6c 100644 --- a/testrepository/arguments/path.py +++ b/testrepository/arguments/path.py @@ -1,11 +1,11 @@ # # Copyright (c) 2012 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -23,8 +23,8 @@ class ExistingPathArgument(AbstractArgument): """An argument that stores a string verbatim.""" def _parse_one(self, arg): - if arg == '--': - raise ValueError('-- is not a valid argument') + if arg == "--": + raise ValueError("-- is not a valid argument") if not os.path.exists(arg): - raise ValueError('No such path %r' % (arg,)) + raise ValueError("No such path %r" % (arg,)) return arg diff --git a/testrepository/arguments/string.py b/testrepository/arguments/string.py index b418fe8..0a01627 100644 --- a/testrepository/arguments/string.py +++ b/testrepository/arguments/string.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -21,6 +21,6 @@ class StringArgument(AbstractArgument): """An argument that stores a string verbatim.""" def _parse_one(self, arg): - if arg == '--': - raise ValueError('-- is not a valid argument') + if arg == "--": + raise ValueError("-- is not a valid argument") return arg diff --git a/testrepository/commands/__init__.py b/testrepository/commands/__init__.py index 0615b77..64535c9 100644 --- a/testrepository/commands/__init__.py +++ b/testrepository/commands/__init__.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009, 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -19,7 +19,7 @@ Actual commands can be found in testrepository.commands.$commandname. -For example, testrepository.commands.init is the init command name, and +For example, testrepository.commands.init is the init command name, and testrepository.command.show_stats would be the show-stats command (if one existed). The Command discovery logic looks for a class in the module with the same name - e.g. tesrepository.commands.init.init would be the class. @@ -37,13 +37,12 @@ import os import sys -import subunit - from testrepository.repository import file + def _find_command(cmd_name): orig_cmd_name = cmd_name - cmd_name = cmd_name.replace('-', '_') + cmd_name = cmd_name.replace("-", "_") classname = "%s" % cmd_name modname = "testrepository.commands.%s" % cmd_name try: @@ -54,8 +53,9 @@ def _find_command(cmd_name): if result is None: raise KeyError( "Malformed command module - no command class %s found in module %s." - % (classname, modname)) - if getattr(result, 'name', None) is None: + % (classname, modname) + ) + if getattr(result, "name", None) is None: # Store the name for the common case of name == lookup path. result.name = orig_cmd_name return result @@ -69,13 +69,13 @@ def iter_commands(): # For now, only support regular installs. TODO: support zip, eggs. for filename in os.listdir(path): base = os.path.basename(filename) - if base.startswith('.'): + if base.startswith("."): continue - name = base.split('.', 1)[0] - name = name.replace('_', '-') + name = base.split(".", 1)[0] + name = name.replace("_", "-") names.add(name) - names.discard('--init--') - names.discard('--pycache--') + names.discard("--init--") + names.discard("--pycache--") names = sorted(names) for name in names: yield _find_command(name) @@ -91,7 +91,7 @@ class Command(object): :ivar ui: a UI object which is responsible for brokering the command arguments, input and output. There is no default ui, it must be passed to the constructor. - + :ivar repository_factory: a repository factory which is used to create or open repositories. The default repository factory is suitable for use in the command line tool. @@ -129,7 +129,7 @@ def execute(self): This interrogates the UI to ensure that arguments and options are supplied, performs any validation for the same that the command needs and finally calls run() to perform the command. Most commands should - not need to override this method, and any user wanting to run a + not need to override this method, and any user wanting to run a command should call this method. This is a synchronous method, and basically just a helper. GUI's or @@ -150,7 +150,7 @@ def execute(self): @classmethod def get_summary(klass): - docs = klass.__doc__.split('\n') + docs = klass.__doc__.split("\n") return docs[0] def _init(self): @@ -173,15 +173,16 @@ def run_argv(argv, stdin, stdout, stderr): cmd_name = None cmd_args = argv[1:] for arg in argv[1:]: - if not arg.startswith('-'): + if not arg.startswith("-"): cmd_name = arg break if cmd_name is None: - cmd_name = 'help' - cmd_args = ['help'] + cmd_name = "help" + cmd_args = ["help"] cmd_args.remove(cmd_name) cmdclass = _find_command(cmd_name) from testrepository.ui import cli + ui = cli.UI(cmd_args, stdin, stdout, stderr) cmd = cmdclass(ui) result = cmd.execute() @@ -203,11 +204,11 @@ def get_command_parser(cmd): parser = OptionParser() for option in cmd.options: parser.add_option(option) - usage = '%%prog %(cmd)s [options] %(args)s\n\n%(help)s' % { - 'args': ' '.join(map(lambda x:x.summary(), cmd.args)), - 'cmd': getattr(cmd, 'name', cmd), - 'help': getdoc(cmd), - } + usage = "%%prog %(cmd)s [options] %(args)s\n\n%(help)s" % { + "args": " ".join(map(lambda x: x.summary(), cmd.args)), + "cmd": getattr(cmd, "name", cmd), + "help": getdoc(cmd), + } parser.set_usage(usage) return parser diff --git a/testrepository/commands/commands.py b/testrepository/commands/commands.py index ed263ba..9ad53fc 100644 --- a/testrepository/commands/commands.py +++ b/testrepository/commands/commands.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -16,11 +16,12 @@ import testrepository.commands + class commands(testrepository.commands.Command): """List available commands.""" def run(self): - table = [('command', 'description')] + table = [("command", "description")] for command in testrepository.commands.iter_commands(): table.append((command.name, command.get_summary())) self.ui.output_table(table) diff --git a/testrepository/commands/failing.py b/testrepository/commands/failing.py index 78fe901..ba96ddd 100644 --- a/testrepository/commands/failing.py +++ b/testrepository/commands/failing.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -17,7 +17,6 @@ import optparse import testtools -from testtools import ExtendedToStreamDecorator, MultiTestResult from testrepository.commands import Command from testrepository.testcommand import TestCommand @@ -25,7 +24,7 @@ class failing(Command): """Show the current failures known by the repository. - + Today this is the failures from the most recent run, but once partial and full runs are understood it will be all the failures from the last full run combined with any failures in subsequent partial runs, minus any @@ -39,12 +38,18 @@ class failing(Command): options = [ optparse.Option( - "--subunit", action="store_true", - default=False, help="Show output as a subunit stream."), + "--subunit", + action="store_true", + default=False, + help="Show output as a subunit stream.", + ), optparse.Option( - "--list", action="store_true", - default=False, help="Show only a list of failing tests."), - ] + "--list", + action="store_true", + default=False, + help="Show only a list of failing tests.", + ), + ] # Can be assigned to to inject a custom command factory. command_factory = TestCommand @@ -80,7 +85,6 @@ def run(self): else: result = 0 if self.ui.options.list: - failing_tests = [ - test for test, _ in summary.errors + summary.failures] + failing_tests = [test for test, _ in summary.errors + summary.failures] self.ui.output_tests(failing_tests) return result diff --git a/testrepository/commands/help.py b/testrepository/commands/help.py index f12a67a..f9f55cd 100644 --- a/testrepository/commands/help.py +++ b/testrepository/commands/help.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -19,25 +19,29 @@ from testrepository.commands import ( Command, get_command_parser, - ) +) + class help(Command): """Get help on a command.""" - args = [command.CommandArgument('command_name', min=0)] + args = [command.CommandArgument("command_name", min=0)] def run(self): - if not self.ui.arguments['command_name']: - version = '.'.join(map(str, testrepository.__version__)) - help = """testr %s -- a free test repository + if not self.ui.arguments["command_name"]: + version = ".".join(map(str, testrepository.__version__)) + help = ( + """testr %s -- a free test repository https://launchpad.net/testrepository/ testr commands -- list commands testr quickstart -- starter documentation testr help [command] -- help system -""" % version +""" + % version + ) else: - cmd = self.ui.arguments['command_name'][0] + cmd = self.ui.arguments["command_name"][0] parser = get_command_parser(cmd) help = parser.format_help() self.ui.output_rest(help) diff --git a/testrepository/commands/init.py b/testrepository/commands/init.py index c3ad890..ab40d69 100644 --- a/testrepository/commands/init.py +++ b/testrepository/commands/init.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -16,6 +16,7 @@ from testrepository.commands import Command + class init(Command): """Create a new repository.""" diff --git a/testrepository/commands/last.py b/testrepository/commands/last.py index 4c73b04..f8e8d0d 100644 --- a/testrepository/commands/last.py +++ b/testrepository/commands/last.py @@ -1,6 +1,6 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in @@ -16,7 +16,6 @@ import optparse -import testtools from testrepository.commands import Command from testrepository.testcommand import TestCommand @@ -35,9 +34,12 @@ class last(Command): options = [ optparse.Option( - "--subunit", action="store_true", - default=False, help="Show output as a subunit stream."), - ] + "--subunit", + action="store_true", + default=False, + help="Show output as a subunit stream.", + ), + ] # Can be assigned to to inject a custom command factory. command_factory = TestCommand @@ -57,7 +59,8 @@ def run(self): previous_run = None failed = False result, summary = self.ui.make_result( - latest_run.get_id, testcommand, previous_run=previous_run) + latest_run.get_id, testcommand, previous_run=previous_run + ) result.startTestRun() try: case.run(result) diff --git a/testrepository/commands/list_tests.py b/testrepository/commands/list_tests.py index fed31af..a124e59 100644 --- a/testrepository/commands/list_tests.py +++ b/testrepository/commands/list_tests.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -16,7 +16,6 @@ from io import BytesIO -from testtools import TestResult from testrepository.arguments.doubledash import DoubledashArgument from testrepository.arguments.string import StringArgument @@ -25,11 +24,17 @@ class list_tests(Command): - __doc__ = """Lists the tests for a project. - """ + testrconf_help + __doc__ = ( + """Lists the tests for a project. + """ + + testrconf_help + ) - args = [StringArgument('testfilters', 0, None), DoubledashArgument(), - StringArgument('testargs', 0, None)] + args = [ + StringArgument("testfilters", 0, None), + DoubledashArgument(), + StringArgument("testargs", 0, None), + ] # Can be assigned to to inject a custom command factory. command_factory = TestCommand @@ -37,12 +42,13 @@ def run(self): testcommand = self.command_factory(self.ui, None) ids = None filters = None - if self.ui.arguments['testfilters']: - filters = self.ui.arguments['testfilters'] + if self.ui.arguments["testfilters"]: + filters = self.ui.arguments["testfilters"] testcommand.setUp() try: cmd = testcommand.get_run_command( - ids, self.ui.arguments['testargs'], test_filters=filters) + ids, self.ui.arguments["testargs"], test_filters=filters + ) cmd.setUp() try: # Ugh. @@ -53,7 +59,7 @@ def run(self): ids = cmd.test_ids stream = BytesIO() for id in ids: - stream.write(('%s\n' % id).encode('utf8')) + stream.write(("%s\n" % id).encode("utf8")) stream.seek(0) self.ui.output_stream(stream) return 0 diff --git a/testrepository/commands/load.py b/testrepository/commands/load.py index e17d8a6..fd3d7e8 100644 --- a/testrepository/commands/load.py +++ b/testrepository/commands/load.py @@ -15,7 +15,6 @@ """Load data into a repository.""" from functools import partial -from operator import methodcaller import optparse import threading @@ -27,6 +26,7 @@ from testrepository.repository import RepositoryNotFound from testrepository.testcommand import TestCommand + class InputToStreamResult(object): """Generate Stream events from stdin. @@ -44,8 +44,8 @@ def run(self, result): char = self.source.read(1) if not char: return - if char == b'a': - result.status(test_id='stdin', test_status='fail') + if char == b"a": + result.status(test_id="stdin", test_status="fail") class load(Command): @@ -57,22 +57,35 @@ class load(Command): Unless the stream is a partial stream, any existing failures are discarded. """ - input_streams = ['subunit+', 'interactive?'] + input_streams = ["subunit+", "interactive?"] - args = [ExistingPathArgument('streams', min=0, max=None)] + args = [ExistingPathArgument("streams", min=0, max=None)] options = [ - optparse.Option("--partial", action="store_true", - default=False, help="The stream being loaded was a partial run."), optparse.Option( - "--force-init", action="store_true", + "--partial", + action="store_true", default=False, - help="Initialise the repository if it does not exist already"), - optparse.Option("--subunit", action="store_true", - default=False, help="Display results in subunit format."), - optparse.Option("--full-results", action="store_true", + help="The stream being loaded was a partial run.", + ), + optparse.Option( + "--force-init", + action="store_true", default=False, - help="No-op - deprecated and kept only for backwards compat."), - ] + help="Initialise the repository if it does not exist already", + ), + optparse.Option( + "--subunit", + action="store_true", + default=False, + help="Display results in subunit format.", + ), + optparse.Option( + "--full-results", + action="store_true", + default=False, + help="No-op - deprecated and kept only for backwards compat.", + ), + ] # Can be assigned to to inject a custom command factory. command_factory = TestCommand @@ -92,21 +105,25 @@ def run(self): # XXX: Be nice if we could declare that the argument, which is a path, # is to be an input stream - and thus push this conditional down into # the UI object. - if self.ui.arguments.get('streams'): - opener = partial(open, mode='rb') - streams = map(opener, self.ui.arguments['streams']) + if self.ui.arguments.get("streams"): + opener = partial(open, mode="rb") + streams = map(opener, self.ui.arguments["streams"]) else: - streams = self.ui.iter_streams('subunit') - mktagger = lambda pos, result:testtools.StreamTagger( - [result], add=['worker-%d' % pos]) + streams = self.ui.iter_streams("subunit") + + def mktagger(pos, result): + return testtools.StreamTagger([result], add=["worker-%d" % pos]) + def make_tests(): for pos, stream in enumerate(streams): # Calls StreamResult API. case = subunit.ByteStreamToStreamResult( - stream, non_subunit_name='stdout') + stream, non_subunit_name="stdout" + ) decorate = partial(mktagger, pos) case = testtools.DecorateTestCaseResult(case, decorate) yield (case, str(pos)) + case = testtools.ConcurrentStreamTestSuite(make_tests) # One unmodified copy of the stream to repository storage inserter = repo.get_inserter(partial=self.ui.options.partial) @@ -117,17 +134,17 @@ def make_tests(): except KeyError: previous_run = None output_result, summary_result = self.ui.make_result( - inserter.get_id, testcommand, previous_run=previous_run) + inserter.get_id, testcommand, previous_run=previous_run + ) result = testtools.CopyStreamResult([inserter, output_result]) runner_thread = None result.startTestRun() try: # Convert user input into a stdin event stream - interactive_streams = list(self.ui.iter_streams('interactive')) + interactive_streams = list(self.ui.iter_streams("interactive")) if interactive_streams: case = InputToStreamResult(interactive_streams[0]) - runner_thread = threading.Thread( - target=case.run, args=(result,)) + runner_thread = threading.Thread(target=case.run, args=(result,)) runner_thread.daemon = True runner_thread.start() case.run(result) diff --git a/testrepository/commands/quickstart.py b/testrepository/commands/quickstart.py index 318ab9d..b942b2b 100644 --- a/testrepository/commands/quickstart.py +++ b/testrepository/commands/quickstart.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -16,6 +16,7 @@ from testrepository.commands import Command + class quickstart(Command): """Introductory documentation for testrepository.""" diff --git a/testrepository/commands/run.py b/testrepository/commands/run.py index 8d189af..ba0332c 100644 --- a/testrepository/commands/run.py +++ b/testrepository/commands/run.py @@ -17,13 +17,9 @@ from io import BytesIO from math import ceil import optparse -import re import subunit import testtools -from testtools import ( - TestByTestResult, - ) from testrepository.arguments.doubledash import DoubledashArgument from testrepository.arguments.string import StringArgument @@ -35,7 +31,7 @@ from testrepository.testlist import parse_list -LINEFEED = b'\n'[0] +LINEFEED = b"\n"[0] class ReturnCodeToSubunit(object): @@ -73,17 +69,21 @@ def _append_return_code_as_test(self): # line. V2 needs to start on any fresh utf8 character border # - which is not guaranteed in an arbitrary stream endpoint, so # injecting a \n gives us such a guarantee. - self.source.write(b'\n') + self.source.write(b"\n") stream = subunit.StreamResultToBytes(self.source) - stream.status(test_id='process-returncode', test_status='fail', - file_name='traceback', mime_type='text/plain;charset=utf8', - file_bytes=('returncode %d' % returncode).encode('utf8')) + stream.status( + test_id="process-returncode", + test_status="fail", + file_name="traceback", + mime_type="text/plain;charset=utf8", + file_bytes=("returncode %d" % returncode).encode("utf8"), + ) self.source.seek(0) self.done = True def read(self, count=-1): if count == 0: - return b'' + return b"" result = self.source.read(count) if result: self.lastoutput = result[-1] @@ -109,42 +109,83 @@ def readlines(self): class run(Command): - __doc__ = """Run the tests for a project and load them into testrepository. - """ + testrconf_help + __doc__ = ( + """Run the tests for a project and load them into testrepository. + """ + + testrconf_help + ) options = [ - optparse.Option("--failing", action="store_true", - default=False, help="Run only tests known to be failing."), - optparse.Option("--parallel", action="store_true", - default=False, help="Run tests in parallel processes."), - optparse.Option("--concurrency", action="store", type="int", default=0, - help="How many processes to use. The default (0) autodetects your CPU count."), - optparse.Option("--load-list", default=None, - help="Only run tests listed in the named file."), - optparse.Option("--partial", action="store_true", + optparse.Option( + "--failing", + action="store_true", default=False, - help="Only some tests will be run. Implied by --failing."), - optparse.Option("--subunit", action="store_true", - default=False, help="Display results in subunit format."), + help="Run only tests known to be failing.", + ), optparse.Option( - "--force-init", action="store_true", + "--parallel", + action="store_true", default=False, - help="Initialise the repository if it does not exist already"), - optparse.Option("--full-results", action="store_true", + help="Run tests in parallel processes.", + ), + optparse.Option( + "--concurrency", + action="store", + type="int", + default=0, + help="How many processes to use. The default (0) autodetects your CPU count.", + ), + optparse.Option( + "--load-list", default=None, help="Only run tests listed in the named file." + ), + optparse.Option( + "--partial", + action="store_true", default=False, - help="No-op - deprecated and kept only for backwards compat."), - optparse.Option("--until-failure", action="store_true", + help="Only some tests will be run. Implied by --failing.", + ), + optparse.Option( + "--subunit", + action="store_true", + default=False, + help="Display results in subunit format.", + ), + optparse.Option( + "--force-init", + action="store_true", default=False, - help="Repeat the run again and again until failure occurs."), - optparse.Option("--analyze-isolation", action="store_true", + help="Initialise the repository if it does not exist already", + ), + optparse.Option( + "--full-results", + action="store_true", default=False, - help="Search the last test run for 2-test test isolation interactions."), - optparse.Option("--isolated", action="store_true", + help="No-op - deprecated and kept only for backwards compat.", + ), + optparse.Option( + "--until-failure", + action="store_true", default=False, - help="Run each test id in a separate test runner."), - ] - args = [StringArgument('testfilters', 0, None), DoubledashArgument(), - StringArgument('testargs', 0, None)] + help="Repeat the run again and again until failure occurs.", + ), + optparse.Option( + "--analyze-isolation", + action="store_true", + default=False, + help="Search the last test run for 2-test test isolation interactions.", + ), + optparse.Option( + "--isolated", + action="store_true", + default=False, + help="Run each test id in a separate test runner.", + ), + ] + args = [ + StringArgument("testfilters", 0, None), + DoubledashArgument(), + StringArgument("testargs", 0, None), + ] # Can be assigned to to inject a custom command factory. command_factory = TestCommand @@ -152,9 +193,11 @@ def _find_failing(self, repo): run = repo.get_failing() case = run.get_test() ids = [] + def gather_errors(test_dict): - if test_dict['status'] == 'fail': - ids.append(test_dict['id']) + if test_dict["status"] == "fail": + ids.append(test_dict["id"]) + result = testtools.StreamToDict(gather_errors) result.startTestRun() try: @@ -178,7 +221,7 @@ def run(self): if self.ui.options.load_list: list_ids = set() # Should perhaps be text.. currently does its own decode. - with open(self.ui.options.load_list, 'rb') as list_file: + with open(self.ui.options.load_list, "rb") as list_file: list_ids = set(parse_list(list_file.read())) if ids is None: # Use the supplied list verbatim @@ -187,16 +230,17 @@ def run(self): # We have some already limited set of ids, just reduce to ids # that are both failing and listed. ids = list_ids.intersection(ids) - if self.ui.arguments['testfilters']: - filters = self.ui.arguments['testfilters'] + if self.ui.arguments["testfilters"]: + filters = self.ui.arguments["testfilters"] else: filters = None testcommand = self.command_factory(self.ui, repo) testcommand.setUp() try: if not self.ui.options.analyze_isolation: - cmd = testcommand.get_run_command(ids, self.ui.arguments['testargs'], - test_filters = filters) + cmd = testcommand.get_run_command( + ids, self.ui.arguments["testargs"], test_filters=filters + ) if self.ui.options.isolated: result = 0 cmd.setUp() @@ -205,8 +249,11 @@ def run(self): finally: cmd.cleanUp() for test_id in ids: - cmd = testcommand.get_run_command([test_id], - self.ui.arguments['testargs'], test_filters=filters) + cmd = testcommand.get_run_command( + [test_id], + self.ui.arguments["testargs"], + test_filters=filters, + ) run_result = self._run_tests(cmd) if run_result > result: result = run_result @@ -223,8 +270,9 @@ def run(self): # reduced by testfilters) to eliminate fails-on-own tests. spurious_failures = set() for test_id in ids: - cmd = testcommand.get_run_command([test_id], - self.ui.arguments['testargs'], test_filters = filters) + cmd = testcommand.get_run_command( + [test_id], self.ui.arguments["testargs"], test_filters=filters + ) if not self._run_tests(cmd): # If the test was filtered, it won't have been run. if test_id in repo.get_test_ids(repo.latest_id()): @@ -245,24 +293,26 @@ def run(self): # spurious-failure -> cause. test_conflicts = {} for spurious_failure in spurious_failures: - candidate_causes = self._prior_tests( - latest_run, spurious_failure) + candidate_causes = self._prior_tests(latest_run, spurious_failure) bottom = 0 top = len(candidate_causes) width = top - bottom while width: check_width = int(ceil(width / 2.0)) cmd = testcommand.get_run_command( - candidate_causes[bottom:bottom + check_width] + candidate_causes[bottom : bottom + check_width] + [spurious_failure], - self.ui.arguments['testargs']) + self.ui.arguments["testargs"], + ) self._run_tests(cmd) # check that the test we're probing still failed - still # awkward. found_fail = [] + def find_fail(test_dict): - if test_dict['id'] == spurious_failure: + if test_dict["id"] == spurious_failure: found_fail.append(True) + checker = testtools.StreamToDict(find_fail) checker.startTestRun() try: @@ -274,8 +324,9 @@ def find_fail(test_dict): top = bottom + check_width if width == 1: # found the cause - test_conflicts[ - spurious_failure] = candidate_causes[bottom] + test_conflicts[spurious_failure] = candidate_causes[ + bottom + ] width = 0 else: width = top - bottom @@ -290,9 +341,9 @@ def find_fail(test_dict): width = top - bottom if spurious_failure not in test_conflicts: # Could not determine cause - test_conflicts[spurious_failure] = 'unknown - no conflicts' + test_conflicts[spurious_failure] = "unknown - no conflicts" if test_conflicts: - table = [('failing test', 'caused by test')] + table = [("failing test", "caused by test")] for failure, causes in test_conflicts.items(): table.append((failure, causes)) self.ui.output_table(table) @@ -306,7 +357,7 @@ def _prior_tests(self, run, failing_id): Tests that ran in a different worker are not included in the result. """ - if not getattr(self, '_worker_to_test', False): + if not getattr(self, "_worker_to_test", False): # TODO: switch to route codes? case = run.get_test() # Use None if there is no worker-N tag @@ -315,18 +366,20 @@ def _prior_tests(self, run, failing_id): worker_to_test = {} # (testid -> [workerN, ...]) test_to_worker = {} + def map_test(test_dict): - tags = test_dict['tags'] - id = test_dict['id'] + tags = test_dict["tags"] + id = test_dict["id"] workers = [] for tag in tags: - if tag.startswith('worker-'): + if tag.startswith("worker-"): workers.append(tag) if not workers: workers = [None] for worker in workers: worker_to_test.setdefault(worker, []).append(id) test_to_worker.setdefault(id, []).extend(workers) + mapper = testtools.StreamToDict(map_test) mapper.startTestRun() try: @@ -339,23 +392,31 @@ def map_test(test_dict): prior_tests = [] for worker in failing_workers: worker_tests = self._worker_to_test[worker] - prior_tests.extend(worker_tests[:worker_tests.index(failing_id)]) + prior_tests.extend(worker_tests[: worker_tests.index(failing_id)]) return prior_tests def _run_tests(self, cmd): """Run the tests cmd was parameterised with.""" cmd.setUp() try: + def run_tests(): - run_procs = [('subunit', ReturnCodeToSubunit(proc)) for proc in cmd.run_tests()] + run_procs = [ + ("subunit", ReturnCodeToSubunit(proc)) for proc in cmd.run_tests() + ] options = {} - if (self.ui.options.failing or self.ui.options.analyze_isolation - or self.ui.options.isolated): - options['partial'] = True - load_ui = decorator.UI(input_streams=run_procs, options=options, - decorated=self.ui) + if ( + self.ui.options.failing + or self.ui.options.analyze_isolation + or self.ui.options.isolated + ): + options["partial"] = True + load_ui = decorator.UI( + input_streams=run_procs, options=options, decorated=self.ui + ) load_cmd = load(load_ui) return load_cmd.execute() + if not self.ui.options.until_failure: return run_tests() else: diff --git a/testrepository/commands/slowest.py b/testrepository/commands/slowest.py index a63f813..0c8ef73 100644 --- a/testrepository/commands/slowest.py +++ b/testrepository/commands/slowest.py @@ -29,26 +29,30 @@ class slowest(Command): """ DEFAULT_ROWS_SHOWN = 10 - TABLE_HEADER = ('Test id', 'Runtime (s)') + TABLE_HEADER = ("Test id", "Runtime (s)") options = [ optparse.Option( - "--all", action="store_true", - default=False, help="Show timing for all tests."), - ] + "--all", + action="store_true", + default=False, + help="Show timing for all tests.", + ), + ] @staticmethod def format_times(times): times = list(times) precision = 3 - digits_before_point = int( - math.log10(times[0][1])) + 1 + digits_before_point = int(math.log10(times[0][1])) + 1 min_length = digits_before_point + precision + 1 + def format_time(time): # Limit the number of digits after the decimal # place, and also enforce a minimum width # based on the longest duration return "%*.*f" % (min_length, precision, time) + times = [(name, format_time(time)) for name, time in times] return times @@ -60,11 +64,11 @@ def run(self): return 3 # what happens when there is no timing info? test_times = repo.get_test_times(repo.get_test_ids(latest_id)) - known_times =list( test_times['known'].items()) + known_times = list(test_times["known"].items()) known_times.sort(key=itemgetter(1), reverse=True) if len(known_times) > 0: if not self.ui.options.all: - known_times = known_times[:self.DEFAULT_ROWS_SHOWN] + known_times = known_times[: self.DEFAULT_ROWS_SHOWN] known_times = self.format_times(known_times) rows = [self.TABLE_HEADER] + known_times self.ui.output_table(rows) diff --git a/testrepository/commands/stats.py b/testrepository/commands/stats.py index 3c349d7..e738d58 100644 --- a/testrepository/commands/stats.py +++ b/testrepository/commands/stats.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -14,17 +14,17 @@ """Report stats about a repository. Current vestigial.""" - from testrepository.commands import Command + class stats(Command): """Report stats about a repository. - + This is currently vestigial, but should grow to be the main entry point for getting summary information about the repository. """ def run(self): repo = self.repository_factory.open(self.ui.here) - self.ui.output_values([('runs', repo.count())]) + self.ui.output_values([("runs", repo.count())]) return 0 diff --git a/testrepository/repository/__init__.py b/testrepository/repository/__init__.py index b58c0a2..30ee805 100644 --- a/testrepository/repository/__init__.py +++ b/testrepository/repository/__init__.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -16,7 +16,7 @@ A Repository provides storage and indexing of results. -The AbstractRepository class defines the contract to which any Repository +The AbstractRepository class defines the contract to which any Repository implementation must adhere. The file submodule is the usual repository that code will use for local @@ -27,14 +27,14 @@ the initialize function in the appropriate repository module. """ -from testtools import StreamToDict, TestResult +from testtools import StreamToDict class AbstractRepositoryFactory(object): """Interface for making or opening repositories.""" def initialise(self, url): - """Create a repository at URL. + """Create a repository at URL. Call on the class of the repository you wish to create. """ @@ -56,7 +56,7 @@ class AbstractRepository(object): def count(self): """Return the number of test runs this repository has stored. - + :return count: The count of test runs stored in the repositor. """ raise NotImplementedError(self.count) @@ -83,10 +83,10 @@ def get_inserter(self, partial=False): stopTestRun methods in particular must be called. """ return self._get_inserter(partial) - + def _get_inserter(self): """Get an inserter for get_inserter. - + The result is decorated with an AutoTimingTestResultDecorator. """ raise NotImplementedError(self._get_inserter) @@ -143,8 +143,10 @@ def get_test_ids(self, run_id): """ run = self.get_test_run(run_id) ids = [] + def gather(test_dict): - ids.append(test_dict['id']) + ids.append(test_dict["id"]) + result = StreamToDict(gather) result.startTestRun() try: @@ -156,7 +158,7 @@ def gather(test_dict): class AbstractTestRun(object): """A test run that has been stored in a repository. - + Should implement the StreamResult protocol as well as the testrepository specific methods documented here. """ diff --git a/testrepository/repository/file.py b/testrepository/repository/file.py index c5c0621..c22e542 100644 --- a/testrepository/repository/file.py +++ b/testrepository/repository/file.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009, 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -15,6 +15,7 @@ """Persistent storage of test results.""" from io import BytesIO + try: import anydbm as dbm except ImportError: @@ -34,7 +35,7 @@ AbstractRepositoryFactory, AbstractTestRun, RepositoryNotFound, - ) +) from testrepository.utils import timedelta_to_seconds @@ -45,14 +46,13 @@ def atomicish_rename(source, target): class RepositoryFactory(AbstractRepositoryFactory): - def initialise(klass, url): """Create a repository at url/path.""" - base = os.path.join(os.path.expanduser(url), '.testrepository') + base = os.path.join(os.path.expanduser(url), ".testrepository") os.mkdir(base) - stream = open(os.path.join(base, 'format'), 'wt') + stream = open(os.path.join(base, "format"), "wt") try: - stream.write('1\n') + stream.write("1\n") finally: stream.close() result = Repository(base) @@ -61,25 +61,25 @@ def initialise(klass, url): def open(self, url): path = os.path.expanduser(url) - base = os.path.join(path, '.testrepository') + base = os.path.join(path, ".testrepository") try: - stream = open(os.path.join(base, 'format'), 'rt') + stream = open(os.path.join(base, "format"), "rt") except (IOError, OSError) as e: if e.errno == errno.ENOENT: raise RepositoryNotFound(url) raise with stream: - if '1\n' != stream.read(): + if "1\n" != stream.read(): raise ValueError(url) return Repository(base) class Repository(AbstractRepository): """Disk based storage of test results. - + This repository stores each stream it receives as a file in a directory. Indices are then built on top of this basic store. - + This particular disk layout is subject to change at any time, as its primarily a bootstrapping exercise at this point. Any changes made are likely to have an automatic upgrade process. @@ -91,7 +91,7 @@ def __init__(self, base): :param base: The path to the repository. """ self.base = base - + def _allocate(self): # XXX: lock the file. K?! value = self.count() @@ -99,7 +99,7 @@ def _allocate(self): return value def _next_stream(self): - with open(os.path.join(self.base, 'next-stream'), 'rt') as fp: + with open(os.path.join(self.base, "next-stream"), "rt") as fp: next_content = fp.read() try: return int(next_content) @@ -114,22 +114,22 @@ def latest_id(self): if result < 0: raise KeyError("No tests in repository") return result - + def get_failing(self): try: - with open( os.path.join(self.base, "failing"), 'rb') as fp: + with open(os.path.join(self.base, "failing"), "rb") as fp: run_subunit_content = fp.read() except IOError: err = sys.exc_info()[1] if err.errno == errno.ENOENT: - run_subunit_content = b'' + run_subunit_content = b"" else: raise return _DiskRun(None, run_subunit_content) def get_test_run(self, run_id): try: - with open( os.path.join(self.base, str(run_id)), 'rb') as fp: + with open(os.path.join(self.base, str(run_id)), "rb") as fp: run_subunit_content = fp.read() except IOError as e: if e.errno == errno.ENOENT: @@ -143,12 +143,12 @@ def _get_inserter(self, partial): def _get_test_times(self, test_ids): # May be too slow, but build and iterate. # 'c' because an existing repo may be missing a file. - db = dbm.open(self._path('times.dbm'), 'c') + db = dbm.open(self._path("times.dbm"), "c") try: result = {} for test_id in test_ids: - if type(test_id) != str: - test_id = test_id.encode('utf8') + if not isinstance(test_id, str): + test_id = test_id.encode("utf8") # gdbm does not support get(). try: duration = db[test_id] @@ -168,13 +168,13 @@ def _write_next_stream(self, value): # user, repo-per-working-tree model makes this acceptable in the short # term. Likewise we don't fsync - this data isn't valuable enough to # force disk IO. - prefix = self._path('next-stream') - stream = open(prefix + '.new', 'wt') + prefix = self._path("next-stream") + stream = open(prefix + ".new", "wt") try: - stream.write('%d\n' % value) + stream.write("%d\n" % value) finally: stream.close() - atomicish_rename(prefix + '.new', prefix) + atomicish_rename(prefix + ".new", prefix) class _DiskRun(AbstractTestRun): @@ -205,8 +205,9 @@ def get_subunit_stream(self): return output def get_test(self): - #case = subunit.ProtocolTestCase(self.get_subunit_stream()) + # case = subunit.ProtocolTestCase(self.get_subunit_stream()) case = subunit.ProtocolTestCase(BytesIO(self._content)) + def wrap_result(result): # Wrap in a router to mask out startTestRun/stopTestRun from the # ExtendedToStreamDecorator. @@ -214,37 +215,36 @@ def wrap_result(result): # Wrap that in ExtendedToStreamDecorator to convert v1 calls to # StreamResult. return testtools.ExtendedToStreamDecorator(result) + return testtools.DecorateTestCaseResult( - case, wrap_result, methodcaller('startTestRun'), - methodcaller('stopTestRun')) + case, wrap_result, methodcaller("startTestRun"), methodcaller("stopTestRun") + ) class _SafeInserter(object): - def __init__(self, repository, partial=False): # XXX: Perhaps should factor into a decorator and use an unaltered # TestProtocolClient. self._repository = repository fd, name = tempfile.mkstemp(dir=self._repository.base) self.fname = name - stream = os.fdopen(fd, 'wb') + stream = os.fdopen(fd, "wb") self.partial = partial # The time take by each test, flushed at the end. self._times = {} self._test_start = None self._time = None - subunit_client = testtools.StreamToExtendedDecorator( - TestProtocolClient(stream)) - self.hook = testtools.CopyStreamResult([ - subunit_client, - testtools.StreamToDict(self._handle_test)]) + subunit_client = testtools.StreamToExtendedDecorator(TestProtocolClient(stream)) + self.hook = testtools.CopyStreamResult( + [subunit_client, testtools.StreamToDict(self._handle_test)] + ) self._stream = stream def _handle_test(self, test_dict): - start, stop = test_dict['timestamps'] - if test_dict['status'] == 'exists' or None in (start, stop): + start, stop = test_dict["timestamps"] + if test_dict["status"] == "exists" or None in (start, stop): return - self._times[test_dict['id']] = str(timedelta_to_seconds(stop - start)) + self._times[test_dict["id"]] = str(timedelta_to_seconds(stop - start)) def startTestRun(self): self.hook.startTestRun() @@ -258,14 +258,14 @@ def stopTestRun(self): final_path = os.path.join(self._repository.base, str(run_id)) atomicish_rename(self.fname, final_path) # May be too slow, but build and iterate. - db = dbm.open(self._repository._path('times.dbm'), 'c') + db = dbm.open(self._repository._path("times.dbm"), "c") try: db_times = {} for key, value in self._times.items(): - if type(key) != str: - key = key.encode('utf8') + if not isinstance(key, str): + key = key.encode("utf8") db_times[key] = value - if getattr(db, 'update', None): + if getattr(db, "update", None): db.update(db_times) else: for key, value in db_times.items(): @@ -294,7 +294,6 @@ def _name(self): class _Inserter(_SafeInserter): - def _name(self): return self._repository._allocate() @@ -307,6 +306,7 @@ def stopTestRun(self): # Should just pull the failing items aside as they happen perhaps. # Or use a router and avoid using a memory object at all. from testrepository.repository import memory + repo = memory.Repository() if self.partial: # Seed with current failing @@ -315,7 +315,7 @@ def stopTestRun(self): failing = self._repository.get_failing() failing.get_test().run(inserter) inserter.stopTestRun() - inserter= testtools.ExtendedToStreamDecorator(repo.get_inserter(partial=True)) + inserter = testtools.ExtendedToStreamDecorator(repo.get_inserter(partial=True)) inserter.startTestRun() run = self._repository.get_test_run(self.get_id()) run.get_test().run(inserter) diff --git a/testrepository/repository/memory.py b/testrepository/repository/memory.py index 7fe159d..e67d792 100644 --- a/testrepository/repository/memory.py +++ b/testrepository/repository/memory.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009, 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -26,7 +26,7 @@ AbstractRepositoryFactory, AbstractTestRun, RepositoryNotFound, - ) +) class RepositoryFactory(AbstractRepositoryFactory): @@ -56,8 +56,8 @@ class Repository(AbstractRepository): def __init__(self): # Test runs: self._runs = [] - self._failing = OrderedDict() # id -> test - self._times = {} # id -> duration + self._failing = OrderedDict() # id -> test + self._times = {} # id -> duration def count(self): return len(self._runs) @@ -118,9 +118,10 @@ def wrap_result(result): # Wrap that in ExtendedToStreamDecorator to convert v1 calls to # StreamResult. return testtools.ExtendedToStreamDecorator(result) + return testtools.DecorateTestCaseResult( - self, wrap_result, methodcaller('startTestRun'), - methodcaller('stopTestRun')) + self, wrap_result, methodcaller("startTestRun"), methodcaller("stopTestRun") + ) def run(self, result): # Speaks original V1 protocol. @@ -141,21 +142,22 @@ def __init__(self, repository, partial): def startTestRun(self): self._subunit = BytesIO() serialiser = subunit.v2.StreamResultToBytes(self._subunit) - self._hook = testtools.CopyStreamResult([ - testtools.StreamToDict(self._handle_test), - serialiser]) + self._hook = testtools.CopyStreamResult( + [testtools.StreamToDict(self._handle_test), serialiser] + ) self._hook.startTestRun() def _handle_test(self, test_dict): self._tests.append(test_dict) - start, stop = test_dict['timestamps'] - if test_dict['status'] == 'exists' or None in (start, stop): + start, stop = test_dict["timestamps"] + if test_dict["status"] == "exists" or None in (start, stop): return duration_delta = stop - start - duration_seconds = ((duration_delta.microseconds + - (duration_delta.seconds + duration_delta.days * 24 * 3600) - * 10**6) / 10.0**6) - self._repository._times[test_dict['id']] = duration_seconds + duration_seconds = ( + duration_delta.microseconds + + (duration_delta.seconds + duration_delta.days * 24 * 3600) * 10**6 + ) / 10.0**6 + self._repository._times[test_dict["id"]] = duration_seconds def stopTestRun(self): self._hook.stopTestRun() @@ -164,8 +166,8 @@ def stopTestRun(self): if not self._partial: self._repository._failing = OrderedDict() for test_dict in self._tests: - test_id = test_dict['id'] - if test_dict['status'] == 'fail': + test_id = test_dict["id"] + if test_dict["status"] == "fail": case = testtools.testresult.real.test_dict_to_case(test_dict) self._repository._failing[test_id] = case else: @@ -190,9 +192,10 @@ def wrap_result(result): # Wrap that in ExtendedToStreamDecorator to convert v1 calls to # StreamResult. return testtools.ExtendedToStreamDecorator(result) + return testtools.DecorateTestCaseResult( - self, wrap_result, methodcaller('startTestRun'), - methodcaller('stopTestRun')) + self, wrap_result, methodcaller("startTestRun"), methodcaller("stopTestRun") + ) def run(self, result): # Speaks original. diff --git a/testrepository/results.py b/testrepository/results.py index 2babcb0..ec366f5 100644 --- a/testrepository/results.py +++ b/testrepository/results.py @@ -17,13 +17,12 @@ from testtools import ( StreamSummary, StreamResult, - ) +) from testrepository.utils import timedelta_to_seconds class SummarizingResult(StreamSummary): - def __init__(self): super(SummarizingResult, self).__init__() @@ -33,8 +32,8 @@ def startTestRun(self): self._last_time = None def status(self, *args, **kwargs): - if kwargs.get('timestamp') is not None: - timestamp = kwargs['timestamp'] + if kwargs.get("timestamp") is not None: + timestamp = kwargs["timestamp"] if self._last_time is None: self._first_time = timestamp self._last_time = timestamp @@ -53,21 +52,31 @@ def get_time_taken(self): return timedelta_to_seconds(self._last_time - self._first_time) -#XXX: Should be in testtools. +# XXX: Should be in testtools. class CatFiles(StreamResult): """Cat file attachments received to a stream.""" - + def __init__(self, byte_stream): self.stream = subunit.make_stream_binary(byte_stream) self.last_file = None - def status(self, test_id=None, test_status=None, test_tags=None, - runnable=True, file_name=None, file_bytes=None, eof=False, - mime_type=None, route_code=None, timestamp=None): + def status( + self, + test_id=None, + test_status=None, + test_tags=None, + runnable=True, + file_name=None, + file_bytes=None, + eof=False, + mime_type=None, + route_code=None, + timestamp=None, + ): if file_name is None: return if self.last_file != file_name: - self.stream.write(("--- %s ---\n" % file_name).encode('utf8')) + self.stream.write(("--- %s ---\n" % file_name).encode("utf8")) self.last_file = file_name self.stream.write(file_bytes) self.stream.flush() diff --git a/testrepository/setuptools_command.py b/testrepository/setuptools_command.py index 5fc830c..312abaf 100644 --- a/testrepository/setuptools_command.py +++ b/testrepository/setuptools_command.py @@ -39,24 +39,27 @@ class Testr(cmd.Command): - description = "Run unit tests using testr" user_options = [ - ('coverage', None, "Replace PYTHON with coverage and merge coverage " - "from each testr worker."), - ('testr-args=', 't', "Run 'testr' with these args"), - ('omit=', 'o', 'Files to omit from coverage calculations'), - ('coverage-package-name=', None, "Use this name for coverage package"), - ('slowest', None, "Show slowest test times after tests complete."), + ( + "coverage", + None, + "Replace PYTHON with coverage and merge coverage from each testr worker.", + ), + ("testr-args=", "t", "Run 'testr' with these args"), + ("omit=", "o", "Files to omit from coverage calculations"), + ("coverage-package-name=", None, "Use this name for coverage package"), + ("slowest", None, "Show slowest test times after tests complete."), ] - boolean_options = ['coverage', 'slowest'] + boolean_options = ["coverage", "slowest"] def _run_testr(self, *args): logger.info("_run_testr called") - return commands.run_argv([sys.argv[0]] + list(args), - sys.stdin, sys.stdout, sys.stderr) + return commands.run_argv( + [sys.argv[0]] + list(args), sys.stdin, sys.stdout, sys.stderr + ) def initialize_options(self): logger.info("initialize_options called") @@ -85,10 +88,9 @@ def run(self): self._coverage_before() testr_ret = self._run_testr("run", "--parallel", *self.testr_args) if testr_ret: - raise distutils.errors.DistutilsError( - "testr failed (%d)" % testr_ret) + raise distutils.errors.DistutilsError("testr failed (%d)" % testr_ret) if self.slowest: - print ("Slowest Tests") + print("Slowest Tests") self._run_testr("slowest") if self.coverage: self._coverage_after() @@ -96,15 +98,15 @@ def run(self): def _coverage_before(self): logger.info("_coverage_before called") package = self.distribution.get_name() - if package.startswith('python-'): + if package.startswith("python-"): package = package[7:] # Use this as coverage package name if self.coverage_package_name: package = self.coverage_package_name options = "--source %s --parallel-mode" % package - os.environ['PYTHON'] = ("coverage run %s" % options) - logger.info("os.environ['PYTHON'] = %r", os.environ['PYTHON']) + os.environ["PYTHON"] = "coverage run %s" % options + logger.info("os.environ['PYTHON'] = %r", os.environ["PYTHON"]) def _coverage_after(self): logger.info("_coverage_after called") diff --git a/testrepository/testcommand.py b/testrepository/testcommand.py index 103d5d9..50fbb19 100644 --- a/testrepository/testcommand.py +++ b/testrepository/testcommand.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -33,7 +33,7 @@ from testrepository.testlist import ( parse_enumeration, write_list, - ) +) testrconf_help = dedent(""" Configuring via .testr.conf: @@ -90,7 +90,7 @@ class CallWhenProcFinishes(object): """Convert a process object to trigger a callback when returncode is set. - + This just wraps the entire object and when the returncode attribute access finds a set value, calls the callback. """ @@ -129,14 +129,27 @@ def wait(self): return self._proc.wait() -compiled_re_type = type(re.compile('')) +compiled_re_type = type(re.compile("")) + class TestListingFixture(Fixture): """Write a temporary file to disk with test ids in it.""" - def __init__(self, test_ids, cmd_template, listopt, idoption, ui, - repository, parallel=True, listpath=None, parser=None, - test_filters=None, instance_source=None, group_callback=None): + def __init__( + self, + test_ids, + cmd_template, + listopt, + idoption, + ui, + repository, + parallel=True, + listpath=None, + parser=None, + test_filters=None, + instance_source=None, + group_callback=None, + ): """Create a TestListingFixture. :param test_ids: The test_ids to use. May be None indicating that @@ -189,25 +202,29 @@ def __init__(self, test_ids, cmd_template, listopt, idoption, ui, def setUp(self): super(TestListingFixture, self).setUp() - variable_regex = '\$(IDOPTION|IDFILE|IDLIST|LISTOPT)' + variable_regex = "\$(IDOPTION|IDFILE|IDLIST|LISTOPT)" variables = {} - list_variables = {'LISTOPT': self.listopt} + list_variables = {"LISTOPT": self.listopt} cmd = self.template try: - default_idstr = self._parser.get('DEFAULT', 'test_id_list_default') - list_variables['IDLIST'] = default_idstr + default_idstr = self._parser.get("DEFAULT", "test_id_list_default") + list_variables["IDLIST"] = default_idstr # In theory we should also support casting this into IDFILE etc - # needs this horrible class refactored. except configparser.NoOptionError as e: if e.message != "No option 'test_id_list_default' in section: 'DEFAULT'": raise default_idstr = None + def list_subst(match): - return list_variables.get(match.groups(1)[0], '') + return list_variables.get(match.groups(1)[0], "") + self.list_cmd = re.sub(variable_regex, list_subst, cmd) - nonparallel = (not self.parallel or not - getattr(self.ui, 'options', None) or not - getattr(self.ui.options, 'parallel', None)) + nonparallel = ( + not self.parallel + or not getattr(self.ui, "options", None) + or not getattr(self.ui.options, "parallel", None) + ) if nonparallel: self.concurrency = 1 else: @@ -229,22 +246,24 @@ def list_subst(match): if self.test_ids is None: # No test ids to supply to the program. self.list_file_name = None - name = '' - idlist = '' + name = "" + idlist = "" else: self.test_ids = self.filter_tests(self.test_ids) name = self.make_listfile() - variables['IDFILE'] = name - idlist = ' '.join(self.test_ids) - variables['IDLIST'] = idlist + variables["IDFILE"] = name + idlist = " ".join(self.test_ids) + variables["IDLIST"] = idlist + def subst(match): - return variables.get(match.groups(1)[0], '') + return variables.get(match.groups(1)[0], "") + if self.test_ids is None: # No test ids, no id option. - idoption = '' + idoption = "" else: idoption = re.sub(variable_regex, subst, self.idoption) - variables['IDOPTION'] = idoption + variables["IDOPTION"] = idoption self.cmd = re.sub(variable_regex, subst, cmd) def make_listfile(self): @@ -252,10 +271,10 @@ def make_listfile(self): try: if self._listpath: name = self._listpath - stream = open(name, 'wb') + stream = open(name, "wb") else: fd, name = tempfile.mkstemp() - stream = os.fdopen(fd, 'wb') + stream = os.fdopen(fd, "wb") self.list_file_name = name write_list(stream, self.test_ids) stream.close() @@ -268,16 +287,18 @@ def make_listfile(self): def filter_tests(self, test_ids): """Filter test_ids by the test_filters. - + :return: A list of test ids. """ if self.test_filters is None: return test_ids filters = list(map(re.compile, self.test_filters)) + def include(test_id): for pred in filters: if pred.search(test_id): return True + return list(filter(include, test_ids)) def list_tests(self): @@ -285,24 +306,26 @@ def list_tests(self): :return: A list of test ids. """ - if '$LISTOPT' not in self.template: + if "$LISTOPT" not in self.template: raise ValueError("LISTOPT not configured in .testr.conf") instance, list_cmd = self._per_instance_command(self.list_cmd) try: - self.ui.output_values([('running', list_cmd)]) - run_proc = self.ui.subprocess_Popen(list_cmd, shell=True, - stdout=subprocess.PIPE, stdin=subprocess.PIPE) + self.ui.output_values([("running", list_cmd)]) + run_proc = self.ui.subprocess_Popen( + list_cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE + ) out, err = run_proc.communicate() if run_proc.returncode != 0: new_out = io.BytesIO() - ByteStreamToStreamResult(io.BytesIO(out), 'stdout').run( - results.CatFiles(new_out)) + ByteStreamToStreamResult(io.BytesIO(out), "stdout").run( + results.CatFiles(new_out) + ) out = new_out.getvalue() self.ui.output_stream(io.BytesIO(out)) self.ui.output_stream(io.BytesIO(err)) raise ValueError( - "Non-zero exit code (%d) from test listing." - % (run_proc.returncode)) + "Non-zero exit code (%d) from test listing." % (run_proc.returncode) + ) ids = parse_enumeration(out) return ids finally: @@ -311,7 +334,7 @@ def list_tests(self): def _per_instance_command(self, cmd): """Customise cmd to with an instance-id. - + :param concurrency: The number of instances to ask for (used to avoid death-by-1000 cuts of latency. """ @@ -320,17 +343,18 @@ def _per_instance_command(self, cmd): instance = self._instance_source.obtain_instance(self.concurrency) if instance is not None: try: - instance_prefix = self._parser.get( - 'DEFAULT', 'instance_execute') + instance_prefix = self._parser.get("DEFAULT", "instance_execute") variables = { - 'INSTANCE_ID': instance.decode('utf8'), - 'COMMAND': cmd, + "INSTANCE_ID": instance.decode("utf8"), + "COMMAND": cmd, # --list-tests cannot use FILES, so handle it being unset. - 'FILES': getattr(self, 'list_file_name', None) or '', + "FILES": getattr(self, "list_file_name", None) or "", } - variable_regex = '\$(INSTANCE_ID|COMMAND|FILES)' + variable_regex = "\$(INSTANCE_ID|COMMAND|FILES)" + def subst(match): - return variables.get(match.groups(1)[0], '') + return variables.get(match.groups(1)[0], "") + cmd = re.sub(variable_regex, subst, instance_prefix) except configparser.NoOptionError: # Per-instance execution environment not configured. @@ -348,16 +372,21 @@ def run_tests(self): # Have to customise cmd here, as instances are allocated # just-in-time. XXX: Indicates this whole region needs refactoring. instance, cmd = self._per_instance_command(self.cmd) - self.ui.output_values([('running', cmd)]) - run_proc = self.ui.subprocess_Popen(cmd, shell=True, - stdout=subprocess.PIPE, stdin=subprocess.PIPE) + self.ui.output_values([("running", cmd)]) + run_proc = self.ui.subprocess_Popen( + cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE + ) # Prevent processes stalling if they read from stdin; we could # pass this through in future, but there is no point doing that # until we have a working can-run-debugger-inline story. run_proc.stdin.close() if instance: - return [CallWhenProcFinishes(run_proc, - lambda:self._instance_source.release_instance(instance))] + return [ + CallWhenProcFinishes( + run_proc, + lambda: self._instance_source.release_instance(instance), + ) + ] else: return [run_proc] test_id_groups = self.partition_tests(test_ids, self.concurrency) @@ -365,10 +394,19 @@ def run_tests(self): if not test_ids: # No tests in this partition continue - fixture = self.useFixture(TestListingFixture(test_ids, - self.template, self.listopt, self.idoption, self.ui, - self.repository, parallel=False, parser=self._parser, - instance_source=self._instance_source)) + fixture = self.useFixture( + TestListingFixture( + test_ids, + self.template, + self.listopt, + self.idoption, + self.ui, + self.repository, + parallel=False, + parser=self._parser, + instance_source=self._instance_source, + ) + ) result.extend(fixture.run_tests()) return result @@ -377,7 +415,7 @@ def partition_tests(self, test_ids, concurrency): Test durations from the repository are used to get partitions which have roughly the same expected runtime. New tests - those with no - recorded duration - are allocated in round-robin fashion to the + recorded duration - are allocated in round-robin fashion to the partitions created using test durations. :return: A list where each element is a distinct subset of test_ids, @@ -386,12 +424,14 @@ def partition_tests(self, test_ids, concurrency): partitions = [list() for i in range(concurrency)] timed_partitions = [[0.0, partition] for partition in partitions] time_data = self.repository.get_test_times(test_ids) - timed_tests = time_data['known'] - unknown_tests = time_data['unknown'] + timed_tests = time_data["known"] + unknown_tests = time_data["unknown"] # Group tests: generate group_id -> test_ids. group_ids = defaultdict(list) if self._group_callback is None: - group_callback = lambda _:None + + def group_callback(_): + return None else: group_callback = self._group_callback for test_id in test_ids: @@ -408,14 +448,19 @@ def partition_tests(self, test_ids, concurrency): unknown = [] for group_id, group_tests in group_ids.items(): untimed_ids = unknown_tests.intersection(group_tests) - group_time = sum([timed_tests[test_id] - for test_id in untimed_ids.symmetric_difference(group_tests)]) + group_time = sum( + [ + timed_tests[test_id] + for test_id in untimed_ids.symmetric_difference(group_tests) + ] + ) if not untimed_ids: timed[group_id] = group_time elif group_time: partial[group_id] = group_time else: unknown.append(group_id) + # Scheduling is NP complete in general, so we avoid aiming for # perfection. A quick approximation that is sufficient for our general # needs: @@ -423,16 +468,16 @@ def partition_tests(self, test_ids, concurrency): # allocate to partitions by putting each group in to the partition with # the current (lowest time, shortest length[in tests]) def consume_queue(groups): - queue = sorted( - groups.items(), key=operator.itemgetter(1), reverse=True) + queue = sorted(groups.items(), key=operator.itemgetter(1), reverse=True) for group_id, duration in queue: timed_partitions[0][0] = timed_partitions[0][0] + duration timed_partitions[0][1].extend(group_ids[group_id]) - timed_partitions.sort(key=lambda item:(item[0], len(item[1]))) + timed_partitions.sort(key=lambda item: (item[0], len(item[1]))) + consume_queue(timed) consume_queue(partial) # Assign groups with entirely unknown times in round robin fashion to - # the partitions. + # the partitions. for partition, group_id in zip(itertools.cycle(partitions), unknown): partition.extend(group_ids[group_id]) return partitions @@ -440,17 +485,18 @@ def consume_queue(groups): def callout_concurrency(self): """Callout for user defined concurrency.""" try: - concurrency_cmd = self._parser.get( - 'DEFAULT', 'test_run_concurrency') + concurrency_cmd = self._parser.get("DEFAULT", "test_run_concurrency") except configparser.NoOptionError: return None - run_proc = self.ui.subprocess_Popen(concurrency_cmd, shell=True, - stdout=subprocess.PIPE, stdin=subprocess.PIPE) + run_proc = self.ui.subprocess_Popen( + concurrency_cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE + ) out, err = run_proc.communicate() if run_proc.returncode: raise ValueError( - "test_run_concurrency failed: exit code %d, stderr='%s'" % ( - run_proc.returncode, err.decode('utf8', 'replace'))) + "test_run_concurrency failed: exit code %d, stderr='%s'" + % (run_proc.returncode, err.decode("utf8", "replace")) + ) return int(out.strip()) def local_concurrency(self): @@ -463,7 +509,7 @@ def local_concurrency(self): class TestCommand(Fixture): """Represents the test command defined in .testr.conf. - + :ivar run_factory: The fixture to use to execute a command. :ivar oldschool: Use failing.list rather than a unique file path. @@ -473,7 +519,7 @@ class TestCommand(Fixture): happens. This is not done per-run-command, because test bisection (amongst other things) uses multiple get_run_command configurations. """ - + run_factory = TestListingFixture oldschool = False @@ -504,68 +550,73 @@ def _dispose_instances(self): self._instances = None self._allocated_instances = None try: - dispose_cmd = self.get_parser().get('DEFAULT', 'instance_dispose') + dispose_cmd = self.get_parser().get("DEFAULT", "instance_dispose") except (ValueError, configparser.NoOptionError): return - variable_regex = '\$INSTANCE_IDS' - dispose_cmd = re.sub(variable_regex, ' '.join(sorted(instance.decode('utf') for instance in instances)), - dispose_cmd) - self.ui.output_values([('running', dispose_cmd)]) + variable_regex = "\$INSTANCE_IDS" + dispose_cmd = re.sub( + variable_regex, + " ".join(sorted(instance.decode("utf") for instance in instances)), + dispose_cmd, + ) + self.ui.output_values([("running", dispose_cmd)]) run_proc = self.ui.subprocess_Popen(dispose_cmd, shell=True) run_proc.communicate() if run_proc.returncode: - raise ValueError('Disposing of instances failed, return %d' % - run_proc.returncode) + raise ValueError( + "Disposing of instances failed, return %d" % run_proc.returncode + ) def get_parser(self): """Get a parser with the .testr.conf in it.""" parser = configparser.ConfigParser() # This possibly should push down into UI. - if self.ui.here == 'memory:': + if self.ui.here == "memory:": return parser - if not parser.read(os.path.join(self.ui.here, '.testr.conf')): + if not parser.read(os.path.join(self.ui.here, ".testr.conf")): raise ValueError("No .testr.conf config file") return parser def get_run_command(self, test_ids=None, testargs=(), test_filters=None): """Get the command that would be run to run tests. - + See TestListingFixture for the definition of test_ids and test_filters. """ if self._instances is None: - raise TypeError('TestCommand not setUp') + raise TypeError("TestCommand not setUp") parser = self.get_parser() try: - command = parser.get('DEFAULT', 'test_command') + command = parser.get("DEFAULT", "test_command") except configparser.NoOptionError as e: if e.message != "No option 'test_command' in section: 'DEFAULT'": raise raise ValueError("No test_command option present in .testr.conf") elements = [command] + list(testargs) - cmd = ' '.join(elements) - idoption = '' - if '$IDOPTION' in command: + cmd = " ".join(elements) + idoption = "" + if "$IDOPTION" in command: # IDOPTION is used, we must have it configured. try: - idoption = parser.get('DEFAULT', 'test_id_option') + idoption = parser.get("DEFAULT", "test_id_option") except configparser.NoOptionError as e: if e.message != "No option 'test_id_option' in section: 'DEFAULT'": raise raise ValueError("No test_id_option option present in .testr.conf") - listopt = '' - if '$LISTOPT' in command: + listopt = "" + if "$LISTOPT" in command: # LISTOPT is used, test_list_option must be configured. try: - listopt = parser.get('DEFAULT', 'test_list_option') + listopt = parser.get("DEFAULT", "test_list_option") except configparser.NoOptionError as e: if e.message != "No option 'test_list_option' in section: 'DEFAULT'": raise raise ValueError("No test_list_option option present in .testr.conf") try: - group_regex = parser.get('DEFAULT', 'group_regex') + group_regex = parser.get("DEFAULT", "group_regex") except configparser.NoOptionError: group_regex = None if group_regex: + def group_callback(test_id, regex=re.compile(group_regex)): match = regex.match(test_id) if match: @@ -573,22 +624,39 @@ def group_callback(test_id, regex=re.compile(group_regex)): else: group_callback = None if self.oldschool: - listpath = os.path.join(self.ui.here, 'failing.list') - result = self.run_factory(test_ids, cmd, listopt, idoption, - self.ui, self.repository, listpath=listpath, parser=parser, - test_filters=test_filters, instance_source=self, - group_callback=group_callback) + listpath = os.path.join(self.ui.here, "failing.list") + result = self.run_factory( + test_ids, + cmd, + listopt, + idoption, + self.ui, + self.repository, + listpath=listpath, + parser=parser, + test_filters=test_filters, + instance_source=self, + group_callback=group_callback, + ) else: - result = self.run_factory(test_ids, cmd, listopt, idoption, - self.ui, self.repository, parser=parser, - test_filters=test_filters, instance_source=self, - group_callback=group_callback) + result = self.run_factory( + test_ids, + cmd, + listopt, + idoption, + self.ui, + self.repository, + parser=parser, + test_filters=test_filters, + instance_source=self, + group_callback=group_callback, + ) return result def get_filter_tags(self): parser = self.get_parser() try: - tags = parser.get('DEFAULT', 'filter_tags') + tags = parser.get("DEFAULT", "filter_tags") except configparser.NoOptionError as e: if e.message != "No option 'filter_tags' in section: 'DEFAULT'": raise @@ -597,26 +665,25 @@ def get_filter_tags(self): def obtain_instance(self, concurrency): """If possible, get one or more test run environment instance ids. - + Note this is not threadsafe: calling it from multiple threads would likely result in shared results. """ while len(self._instances) < concurrency: try: - cmd = self.get_parser().get('DEFAULT', 'instance_provision') + cmd = self.get_parser().get("DEFAULT", "instance_provision") except configparser.NoOptionError: # Instance allocation not configured return None - variable_regex = '\$INSTANCE_COUNT' - cmd = re.sub(variable_regex, - str(concurrency - len(self._instances)), cmd) - self.ui.output_values([('running', cmd)]) - proc = self.ui.subprocess_Popen( - cmd, shell=True, stdout=subprocess.PIPE) + variable_regex = "\$INSTANCE_COUNT" + cmd = re.sub(variable_regex, str(concurrency - len(self._instances)), cmd) + self.ui.output_values([("running", cmd)]) + proc = self.ui.subprocess_Popen(cmd, shell=True, stdout=subprocess.PIPE) out, _ = proc.communicate() if proc.returncode: - raise ValueError('Provisioning instances failed, return %d' % - proc.returncode) + raise ValueError( + "Provisioning instances failed, return %d" % proc.returncode + ) new_instances = set([item.strip() for item in out.split()]) self._instances.update(new_instances) # Cached first. diff --git a/testrepository/testlist.py b/testrepository/testlist.py index 18044cc..973d5b1 100644 --- a/testrepository/testlist.py +++ b/testrepository/testlist.py @@ -1,11 +1,11 @@ # # Copyright (c) 2012 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -26,19 +26,19 @@ def write_list(stream, test_ids): :param stream: A file-like object. :param test_ids: An iterable of test ids. """ - stream.write(('\n'.join(list(test_ids) + [''])).encode('utf8')) + stream.write(("\n".join(list(test_ids) + [""])).encode("utf8")) def parse_list(list_bytes): """Parse list_bytes into a list of test ids.""" - return [id.strip() for id in list_bytes.decode('utf8').split('\n') - if id.strip()] + return [id.strip() for id in list_bytes.decode("utf8").split("\n") if id.strip()] def parse_enumeration(enumeration_bytes): """Parse enumeration_bytes into a list of test_ids.""" - parser = ByteStreamToStreamResult(BytesIO(enumeration_bytes), - non_subunit_name='stdout') + parser = ByteStreamToStreamResult( + BytesIO(enumeration_bytes), non_subunit_name="stdout" + ) result = StreamResult() parser.run(result) - return [event[1] for event in result._events if event[2]=='exists'] + return [event[1] for event in result._events if event[2] == "exists"] diff --git a/testrepository/tests/__init__.py b/testrepository/tests/__init__.py index d6f38dc..d874ee9 100644 --- a/testrepository/tests/__init__.py +++ b/testrepository/tests/__init__.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009, 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -29,7 +29,7 @@ class _Wildcard(object): """Object that is equal to everything.""" def __repr__(self): - return '*' + return "*" def __eq__(self, other): return True @@ -42,7 +42,6 @@ def __ne__(self, other): class StubTestCommand: - def __init__(self, filter_tags=None): self.results = [] self.filter_tags = filter_tags or set() @@ -56,31 +55,32 @@ def get_filter_tags(self): def test_suite(): packages = [ - 'arguments', - 'commands', - 'repository', - 'ui', - ] + "arguments", + "commands", + "repository", + "ui", + ] names = [ - 'arguments', - 'commands', - 'matchers', - 'monkeypatch', - 'repository', - 'results', - 'setup', - 'stubpackage', - 'testcommand', - 'testr', - 'ui', - ] - module_names = ['testrepository.tests.test_' + name for name in names] + "arguments", + "commands", + "matchers", + "monkeypatch", + "repository", + "results", + "setup", + "stubpackage", + "testcommand", + "testr", + "ui", + ] + module_names = ["testrepository.tests.test_" + name for name in names] loader = unittest.TestLoader() suite = loader.loadTestsFromNames(module_names) result = testresources.OptimisingTestSuite() result.addTests(generate_scenarios(suite)) for pkgname in packages: - pkg = __import__('testrepository.tests.' + pkgname, globals(), - locals(), ['test_suite']) + pkg = __import__( + "testrepository.tests." + pkgname, globals(), locals(), ["test_suite"] + ) result.addTests(generate_scenarios(pkg.test_suite())) return result diff --git a/testrepository/tests/arguments/__init__.py b/testrepository/tests/arguments/__init__.py index 479c9ca..8414189 100644 --- a/testrepository/tests/arguments/__init__.py +++ b/testrepository/tests/arguments/__init__.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -16,14 +16,14 @@ import unittest + def test_suite(): names = [ - 'command', - 'doubledash', - 'path', - 'string', - ] - module_names = ['testrepository.tests.arguments.test_' + name for name in - names] + "command", + "doubledash", + "path", + "string", + ] + module_names = ["testrepository.tests.arguments.test_" + name for name in names] loader = unittest.TestLoader() return loader.loadTestsFromNames(module_names) diff --git a/testrepository/tests/arguments/test_command.py b/testrepository/tests/arguments/test_command.py index 805e754..1024da7 100644 --- a/testrepository/tests/arguments/test_command.py +++ b/testrepository/tests/arguments/test_command.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -22,14 +22,14 @@ class TestArgument(ResourcedTestCase): - def test_looks_up_command(self): - arg = command.CommandArgument('name') - result = arg.parse(['load']) + arg = command.CommandArgument("name") + result = arg.parse(["load"]) self.assertEqual([load.load], result) def test_no_command(self): - arg = command.CommandArgument('name') - self.assertThat(lambda: arg.parse(['one']), - raises(ValueError("Could not find command 'one'."))) - + arg = command.CommandArgument("name") + self.assertThat( + lambda: arg.parse(["one"]), + raises(ValueError("Could not find command 'one'.")), + ) diff --git a/testrepository/tests/arguments/test_doubledash.py b/testrepository/tests/arguments/test_doubledash.py index d0787f7..fb2c100 100644 --- a/testrepository/tests/arguments/test_doubledash.py +++ b/testrepository/tests/arguments/test_doubledash.py @@ -1,11 +1,11 @@ # # Copyright (c) 2012 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -19,15 +19,14 @@ class TestArgument(ResourcedTestCase): - def test_parses_as_string(self): arg = doubledash.DoubledashArgument() - result = arg.parse(['--']) - self.assertEqual(['--'], result) + result = arg.parse(["--"]) + self.assertEqual(["--"], result) def test_fixed_name(self): arg = doubledash.DoubledashArgument() - self.assertEqual('doubledash', arg.name) + self.assertEqual("doubledash", arg.name) def test_fixed_min_max(self): arg = doubledash.DoubledashArgument() @@ -36,8 +35,7 @@ def test_fixed_min_max(self): def test_parses_non_dash_dash_as_nothing(self): arg = doubledash.DoubledashArgument() - args = ['foo', '--'] + args = ["foo", "--"] result = arg.parse(args) self.assertEqual([], result) - self.assertEqual(['foo', '--'], args) - + self.assertEqual(["foo", "--"], args) diff --git a/testrepository/tests/arguments/test_path.py b/testrepository/tests/arguments/test_path.py index e159836..a70c128 100644 --- a/testrepository/tests/arguments/test_path.py +++ b/testrepository/tests/arguments/test_path.py @@ -1,11 +1,11 @@ # # Copyright (c) 2012 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -26,24 +26,23 @@ class TestArgument(ResourcedTestCase): - def test_parses_as_string(self): existingfile = tempfile.NamedTemporaryFile() self.addCleanup(existingfile.close) - arg = path.ExistingPathArgument('path') + arg = path.ExistingPathArgument("path") result = arg.parse([existingfile.name]) self.assertEqual([existingfile.name], result) def test_rejects_doubledash(self): base = self.useFixture(TempDir()).path - arg = path.ExistingPathArgument('path') + arg = path.ExistingPathArgument("path") self.addCleanup(os.chdir, os.getcwd()) os.chdir(base) - with open('--', 'wt') as f:pass - self.assertThat(lambda: arg.parse(['--']), raises(ValueError)) + with open("--", "wt"): + pass + self.assertThat(lambda: arg.parse(["--"]), raises(ValueError)) def test_rejects_missing_file(self): base = self.useFixture(TempDir()).path - arg = path.ExistingPathArgument('path') - self.assertThat(lambda: arg.parse([join(base, 'foo')]), - raises(ValueError)) + arg = path.ExistingPathArgument("path") + self.assertThat(lambda: arg.parse([join(base, "foo")]), raises(ValueError)) diff --git a/testrepository/tests/arguments/test_string.py b/testrepository/tests/arguments/test_string.py index f2758b5..c54ebc8 100644 --- a/testrepository/tests/arguments/test_string.py +++ b/testrepository/tests/arguments/test_string.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -21,12 +21,11 @@ class TestArgument(ResourcedTestCase): - def test_parses_as_string(self): - arg = string.StringArgument('name') - result = arg.parse(['load']) - self.assertEqual(['load'], result) + arg = string.StringArgument("name") + result = arg.parse(["load"]) + self.assertEqual(["load"], result) def test_rejects_doubledash(self): - arg = string.StringArgument('name') - self.assertThat(lambda: arg.parse(['--']), raises(ValueError)) + arg = string.StringArgument("name") + self.assertThat(lambda: arg.parse(["--"]), raises(ValueError)) diff --git a/testrepository/tests/commands/__init__.py b/testrepository/tests/commands/__init__.py index e69418a..62dc15c 100644 --- a/testrepository/tests/commands/__init__.py +++ b/testrepository/tests/commands/__init__.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009, 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -16,21 +16,21 @@ import unittest + def test_suite(): names = [ - 'commands', - 'failing', - 'help', - 'init', - 'last', - 'list_tests', - 'load', - 'quickstart', - 'run', - 'slowest', - 'stats', - ] - module_names = ['testrepository.tests.commands.test_' + name for name in - names] + "commands", + "failing", + "help", + "init", + "last", + "list_tests", + "load", + "quickstart", + "run", + "slowest", + "stats", + ] + module_names = ["testrepository.tests.commands.test_" + name for name in names] loader = unittest.TestLoader() return loader.loadTestsFromNames(module_names) diff --git a/testrepository/tests/commands/test_commands.py b/testrepository/tests/commands/test_commands.py index e3ab1ce..b3d3331 100644 --- a/testrepository/tests/commands/test_commands.py +++ b/testrepository/tests/commands/test_commands.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009, 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -20,7 +20,6 @@ class TestCommandCommands(ResourcedTestCase): - def get_test_ui_and_cmd(self): ui = UI() cmd = commands.commands(ui) @@ -31,10 +30,9 @@ def test_shows_a_table_of_commands(self): ui, cmd = self.get_test_ui_and_cmd() cmd.execute() self.assertEqual(1, len(ui.outputs)) - self.assertEqual('table', ui.outputs[0][0]) - self.assertEqual(('command', 'description'), ui.outputs[0][1][0]) + self.assertEqual("table", ui.outputs[0][0]) + self.assertEqual(("command", "description"), ui.outputs[0][1][0]) command_names = [row[0] for row in ui.outputs[0][1]] summaries = [row[1] for row in ui.outputs[0][1]] - self.assertTrue('load' in command_names) - self.assertTrue( - 'Load a subunit stream into a repository.' in summaries) + self.assertTrue("load" in command_names) + self.assertTrue("Load a subunit stream into a repository." in summaries) diff --git a/testrepository/tests/commands/test_failing.py b/testrepository/tests/commands/test_failing.py index 78f4cef..b1deac4 100644 --- a/testrepository/tests/commands/test_failing.py +++ b/testrepository/tests/commands/test_failing.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -14,15 +14,13 @@ """Tests for the failing command.""" -import doctest from io import BytesIO from subunit.v2 import ByteStreamToStreamResult import testtools from testtools.matchers import ( - DocTestMatches, Equals, - ) +) from testtools.testresult.doubles import StreamResult from testrepository.commands import failing @@ -30,13 +28,11 @@ from testrepository.repository import memory from testrepository.tests import ( ResourcedTestCase, - StubTestCommand, Wildcard, - ) +) class TestCommand(ResourcedTestCase): - def get_test_ui_and_cmd(self, options=(), args=()): ui = UI(options=options, args=args) cmd = failing.failing(ui) @@ -49,15 +45,26 @@ def test_shows_failures_from_last_run(self): repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() - inserter.status(test_id='failing', test_status='fail') - inserter.status(test_id='ok', test_status='success') + inserter.status(test_id="failing", test_status="fail") + inserter.status(test_id="ok", test_status="success") inserter.stopTestRun() self.assertEqual(1, cmd.execute()) # We should have seen test outputs (of the failure) and summary data. - self.assertEqual([ - ('results', Wildcard), - ('summary', False, 1, None, Wildcard, None, [('id', 0, None), ('failures', 1, None)])], - ui.outputs) + self.assertEqual( + [ + ("results", Wildcard), + ( + "summary", + False, + 1, + None, + Wildcard, + None, + [("id", 0, None), ("failures", 1, None)], + ), + ], + ui.outputs, + ) suite = ui.outputs[0][1] result = testtools.StreamSummary() result.startTestRun() @@ -69,17 +76,17 @@ def test_shows_failures_from_last_run(self): self.assertEqual(1, len(result.errors)) def test_with_subunit_shows_subunit_stream(self): - ui, cmd = self.get_test_ui_and_cmd(options=[('subunit', True)]) + ui, cmd = self.get_test_ui_and_cmd(options=[("subunit", True)]) cmd.repository_factory = memory.RepositoryFactory() repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() - inserter.status(test_id='failing', test_status='fail') - inserter.status(test_id='ok', test_status='success') + inserter.status(test_id="failing", test_status="fail") + inserter.status(test_id="ok", test_status="success") inserter.stopTestRun() self.assertEqual(0, cmd.execute()) self.assertEqual(1, len(ui.outputs)) - self.assertEqual('stream', ui.outputs[0][0]) + self.assertEqual("stream", ui.outputs[0][0]) as_subunit = BytesIO(ui.outputs[0][1]) stream = ByteStreamToStreamResult(as_subunit) log = StreamResult() @@ -89,63 +96,91 @@ def test_with_subunit_shows_subunit_stream(self): finally: log.stopTestRun() self.assertEqual( - [tuple(ev) for ev in log._events], [ - ('startTestRun',), - ('status', 'failing', 'inprogress', None, True, None, None, False, - None, None, Wildcard), - ('status', 'failing', 'fail', None, True, None, None, False, None, - None, Wildcard), - ('stopTestRun',) - ]) + [tuple(ev) for ev in log._events], + [ + ("startTestRun",), + ( + "status", + "failing", + "inprogress", + None, + True, + None, + None, + False, + None, + None, + Wildcard, + ), + ( + "status", + "failing", + "fail", + None, + True, + None, + None, + False, + None, + None, + Wildcard, + ), + ("stopTestRun",), + ], + ) def test_with_subunit_no_failures_exit_0(self): - ui, cmd = self.get_test_ui_and_cmd(options=[('subunit', True)]) + ui, cmd = self.get_test_ui_and_cmd(options=[("subunit", True)]) cmd.repository_factory = memory.RepositoryFactory() repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() - inserter.status(test_id='ok', test_status='success') + inserter.status(test_id="ok", test_status="success") inserter.stopTestRun() self.assertEqual(0, cmd.execute()) self.assertEqual(1, len(ui.outputs)) - self.assertEqual('stream', ui.outputs[0][0]) - self.assertThat(ui.outputs[0][1], Equals(b'')) + self.assertEqual("stream", ui.outputs[0][0]) + self.assertThat(ui.outputs[0][1], Equals(b"")) def test_with_list_shows_list_of_tests(self): - ui, cmd = self.get_test_ui_and_cmd(options=[('list', True)]) + ui, cmd = self.get_test_ui_and_cmd(options=[("list", True)]) cmd.repository_factory = memory.RepositoryFactory() repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() - inserter.status(test_id='failing1', test_status='fail') - inserter.status(test_id='ok', test_status='success') - inserter.status(test_id='failing2', test_status='fail') + inserter.status(test_id="failing1", test_status="fail") + inserter.status(test_id="ok", test_status="success") + inserter.status(test_id="failing2", test_status="fail") inserter.stopTestRun() self.assertEqual(1, cmd.execute(), ui.outputs) self.assertEqual(1, len(ui.outputs)) - self.assertEqual('tests', ui.outputs[0][0]) + self.assertEqual("tests", ui.outputs[0][0]) self.assertEqual( - set(['failing1', 'failing2']), - set([test.id() for test in ui.outputs[0][1]])) + set(["failing1", "failing2"]), set([test.id() for test in ui.outputs[0][1]]) + ) def test_uses_get_failing(self): ui, cmd = self.get_test_ui_and_cmd() cmd.repository_factory = memory.RepositoryFactory() calls = [] open = cmd.repository_factory.open + def decorate_open_with_get_failing(url): repo = open(url) inserter = repo.get_inserter() inserter.startTestRun() - inserter.status(test_id='failing', test_status='fail') - inserter.status(test_id='ok', test_status='success') + inserter.status(test_id="failing", test_status="fail") + inserter.status(test_id="ok", test_status="success") inserter.stopTestRun() orig = repo.get_failing + def get_failing(): calls.append(True) return orig() + repo.get_failing = get_failing return repo + cmd.repository_factory.open = decorate_open_with_get_failing cmd.repository_factory.initialise(ui.here) self.assertEqual(1, cmd.execute()) diff --git a/testrepository/tests/commands/test_help.py b/testrepository/tests/commands/test_help.py index fc9b469..7386d1d 100644 --- a/testrepository/tests/commands/test_help.py +++ b/testrepository/tests/commands/test_help.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -24,26 +24,25 @@ class TestCommand(ResourcedTestCase): - - def get_test_ui_and_cmd(self,args=()): + def get_test_ui_and_cmd(self, args=()): ui = UI(args=args) cmd = help.help(ui) ui.set_command(cmd) return ui, cmd def test_shows_rest_of__doc__(self): - ui, cmd = self.get_test_ui_and_cmd(args=['load']) + ui, cmd = self.get_test_ui_and_cmd(args=["load"]) cmd.execute() expected_doc = getdoc(load.load) self.assertThat(ui.outputs[-1][1], Contains(expected_doc)) def test_shows_cmd_arguments(self): - ui, cmd = self.get_test_ui_and_cmd(args=['load']) + ui, cmd = self.get_test_ui_and_cmd(args=["load"]) cmd.execute() self.assertThat(ui.outputs[-1][1], Contains("streams*")) def test_shows_cmd_partial(self): - ui, cmd = self.get_test_ui_and_cmd(args=['load']) + ui, cmd = self.get_test_ui_and_cmd(args=["load"]) cmd.execute() self.assertThat(ui.outputs[-1][1], Contains("--partial")) @@ -51,4 +50,4 @@ def test_shows_general_help_with_no_args(self): ui, cmd = self.get_test_ui_and_cmd() self.assertEqual(0, cmd.execute()) self.assertEqual(1, len(ui.outputs)) - self.assertEqual('rest', ui.outputs[0][0]) + self.assertEqual("rest", ui.outputs[0][0]) diff --git a/testrepository/tests/commands/test_init.py b/testrepository/tests/commands/test_init.py index 0331b37..f512f50 100644 --- a/testrepository/tests/commands/test_init.py +++ b/testrepository/tests/commands/test_init.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -22,12 +22,12 @@ class TestCommandInit(ResourcedTestCase): - def test_init_no_args_no_questions_no_output(self): ui = UI() cmd = init.init(ui) calls = [] - cmd.repository_factory = RecordingRepositoryFactory(calls, - memory.RepositoryFactory()) + cmd.repository_factory = RecordingRepositoryFactory( + calls, memory.RepositoryFactory() + ) cmd.execute() - self.assertEqual([('initialise', ui.here)], calls) + self.assertEqual([("initialise", ui.here)], calls) diff --git a/testrepository/tests/commands/test_last.py b/testrepository/tests/commands/test_last.py index 80571b9..125b62d 100644 --- a/testrepository/tests/commands/test_last.py +++ b/testrepository/tests/commands/test_last.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -18,7 +18,6 @@ from subunit.v2 import ByteStreamToStreamResult import testtools -from testtools.matchers import Equals from testtools.testresult.doubles import StreamResult from testrepository.commands import last @@ -26,13 +25,11 @@ from testrepository.repository import memory from testrepository.tests import ( ResourcedTestCase, - StubTestCommand, Wildcard, - ) +) class TestCommand(ResourcedTestCase): - def get_test_ui_and_cmd(self, args=(), options=()): ui = UI(args=args, options=options) cmd = last.last(ui) @@ -45,17 +42,27 @@ def test_shows_last_run_first_run(self): repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() - inserter.status(test_id='failing', test_status='fail') - inserter.status(test_id='ok', test_status='success') + inserter.status(test_id="failing", test_status="fail") + inserter.status(test_id="ok", test_status="success") inserter.stopTestRun() id = inserter.get_id() self.assertEqual(1, cmd.execute()) # We should have seen test outputs (of the failure) and summary data. - self.assertEqual([ - ('results', Wildcard), - ('summary', False, 2, None, Wildcard, Wildcard, - [('id', id, None), ('failures', 1, None)])], - ui.outputs) + self.assertEqual( + [ + ("results", Wildcard), + ( + "summary", + False, + 2, + None, + Wildcard, + Wildcard, + [("id", id, None), ("failures", 1, None)], + ), + ], + ui.outputs, + ) suite = ui.outputs[0][1] result = testtools.StreamSummary() result.startTestRun() @@ -69,8 +76,8 @@ def test_shows_last_run_first_run(self): def _add_run(self, repo): inserter = repo.get_inserter() inserter.startTestRun() - inserter.status(test_id='failing', test_status='fail') - inserter.status(test_id='ok', test_status='success') + inserter.status(test_id="failing", test_status="fail") + inserter.status(test_id="ok", test_status="success") inserter.stopTestRun() return inserter.get_id() @@ -82,11 +89,21 @@ def test_shows_last_run(self): id = self._add_run(repo) self.assertEqual(1, cmd.execute()) # We should have seen test outputs (of the failure) and summary data. - self.assertEqual([ - ('results', Wildcard), - ('summary', False, 2, 0, Wildcard, Wildcard, - [('id', id, None), ('failures', 1, 0)])], - ui.outputs) + self.assertEqual( + [ + ("results", Wildcard), + ( + "summary", + False, + 2, + 0, + Wildcard, + Wildcard, + [("id", id, None), ("failures", 1, 0)], + ), + ], + ui.outputs, + ) suite = ui.outputs[0][1] result = testtools.StreamSummary() result.startTestRun() @@ -98,15 +115,18 @@ def test_shows_last_run(self): self.assertEqual(2, result.testsRun) def test_shows_subunit_stream(self): - ui, cmd = self.get_test_ui_and_cmd(options=[('subunit', True)]) + ui, cmd = self.get_test_ui_and_cmd(options=[("subunit", True)]) cmd.repository_factory = memory.RepositoryFactory() repo = cmd.repository_factory.initialise(ui.here) self._add_run(repo) self.assertEqual(0, cmd.execute()) # We should have seen test outputs (of the failure) and summary data. - self.assertEqual([ - ('stream', Wildcard), - ], ui.outputs) + self.assertEqual( + [ + ("stream", Wildcard), + ], + ui.outputs, + ) as_subunit = BytesIO(ui.outputs[0][1]) stream = ByteStreamToStreamResult(as_subunit) log = StreamResult() @@ -116,11 +136,35 @@ def test_shows_subunit_stream(self): finally: log.stopTestRun() self.assertEqual( - log._events, [ - ('startTestRun',), - ('status', 'failing', 'fail', None, True, None, None, False, - None, None, None), - ('status', 'ok', 'success', None, True, None, None, False, None, - None, None), - ('stopTestRun',) - ]) + log._events, + [ + ("startTestRun",), + ( + "status", + "failing", + "fail", + None, + True, + None, + None, + False, + None, + None, + None, + ), + ( + "status", + "ok", + "success", + None, + True, + None, + None, + False, + None, + None, + None, + ), + ("stopTestRun",), + ], + ) diff --git a/testrepository/tests/commands/test_list_tests.py b/testrepository/tests/commands/test_list_tests.py index af662b5..d26b415 100644 --- a/testrepository/tests/commands/test_list_tests.py +++ b/testrepository/tests/commands/test_list_tests.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -24,15 +24,12 @@ from testrepository.commands import list_tests from testrepository.ui.model import UI from testrepository.repository import memory -from testrepository.tests import ResourcedTestCase, Wildcard +from testrepository.tests import ResourcedTestCase from testrepository.tests.stubpackage import TempDirResource -from testrepository.tests.test_repository import make_test -from testrepository.tests.test_testcommand import FakeTestCommand class TestCommand(ResourcedTestCase): - - resources = [('tempdir', TempDirResource())] + resources = [("tempdir", TempDirResource())] def get_test_ui_and_cmd(self, options=(), args=()): self.dirty() @@ -44,77 +41,91 @@ def get_test_ui_and_cmd(self, options=(), args=()): def dirty(self): # Ugly: TODO - improve testresources to make this go away. - dict(self.resources)['tempdir']._dirty = True + dict(self.resources)["tempdir"]._dirty = True def config_path(self): - return os.path.join(self.tempdir, '.testr.conf') + return os.path.join(self.tempdir, ".testr.conf") def set_config(self, text): - with open(self.config_path(), 'wt') as stream: + with open(self.config_path(), "wt") as stream: stream.write(text) def setup_repo(self, cmd, ui): repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() - inserter.status(test_id='passing', test_status='success') - inserter.status(test_id='failing', test_status='fail') + inserter.status(test_id="passing", test_status="success") + inserter.status(test_id="failing", test_status="fail") inserter.stopTestRun() def test_no_config_file_errors(self): ui, cmd = self.get_test_ui_and_cmd() self.assertEqual(3, cmd.execute()) self.assertEqual(1, len(ui.outputs)) - self.assertEqual('error', ui.outputs[0][0]) - self.assertThat(ui.outputs[0][1], - MatchesException(ValueError('No .testr.conf config file'))) + self.assertEqual("error", ui.outputs[0][0]) + self.assertThat( + ui.outputs[0][1], MatchesException(ValueError("No .testr.conf config file")) + ) def test_calls_list_tests(self): - ui, cmd = self.get_test_ui_and_cmd(args=('--', 'bar', 'quux')) + ui, cmd = self.get_test_ui_and_cmd(args=("--", "bar", "quux")) cmd.repository_factory = memory.RepositoryFactory() buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) - stream.status(test_id='returned', test_status='exists') - stream.status(test_id='values', test_status='exists') + stream.status(test_id="returned", test_status="exists") + stream.status(test_id="values", test_status="exists") subunit_bytes = buffer.getvalue() ui.proc_outputs = [subunit_bytes] self.setup_repo(cmd, ui) self.set_config( - '[DEFAULT]\ntest_command=foo $LISTOPT $IDOPTION\n' - 'test_id_option=--load-list $IDFILE\n' - 'test_list_option=--list\n') + "[DEFAULT]\ntest_command=foo $LISTOPT $IDOPTION\n" + "test_id_option=--load-list $IDFILE\n" + "test_list_option=--list\n" + ) self.assertEqual(0, cmd.execute()) - expected_cmd = 'foo --list bar quux' - self.assertEqual([ - ('values', [('running', expected_cmd)]), - ('popen', (expected_cmd,), - {'shell': True, 'stdout': PIPE, 'stdin': PIPE}), - ('communicate',), - ('stream', b'returned\nvalues\n'), - ], ui.outputs) + expected_cmd = "foo --list bar quux" + self.assertEqual( + [ + ("values", [("running", expected_cmd)]), + ( + "popen", + (expected_cmd,), + {"shell": True, "stdout": PIPE, "stdin": PIPE}, + ), + ("communicate",), + ("stream", b"returned\nvalues\n"), + ], + ui.outputs, + ) def test_filters_use_filtered_list(self): - ui, cmd = self.get_test_ui_and_cmd( - args=('returned', '--', 'bar', 'quux')) + ui, cmd = self.get_test_ui_and_cmd(args=("returned", "--", "bar", "quux")) cmd.repository_factory = memory.RepositoryFactory() buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) - stream.status(test_id='returned', test_status='exists') - stream.status(test_id='values', test_status='exists') + stream.status(test_id="returned", test_status="exists") + stream.status(test_id="values", test_status="exists") subunit_bytes = buffer.getvalue() ui.proc_outputs = [subunit_bytes] self.setup_repo(cmd, ui) self.set_config( - '[DEFAULT]\ntest_command=foo $LISTOPT $IDOPTION\n' - 'test_id_option=--load-list $IDFILE\n' - 'test_list_option=--list\n') + "[DEFAULT]\ntest_command=foo $LISTOPT $IDOPTION\n" + "test_id_option=--load-list $IDFILE\n" + "test_list_option=--list\n" + ) retcode = cmd.execute() - expected_cmd = 'foo --list bar quux' - self.assertEqual([ - ('values', [('running', expected_cmd)]), - ('popen', (expected_cmd,), - {'shell': True, 'stdout': PIPE, 'stdin': PIPE}), - ('communicate',), - ('stream', b'returned\n'), - ], ui.outputs) + expected_cmd = "foo --list bar quux" + self.assertEqual( + [ + ("values", [("running", expected_cmd)]), + ( + "popen", + (expected_cmd,), + {"shell": True, "stdout": PIPE, "stdin": PIPE}, + ), + ("communicate",), + ("stream", b"returned\n"), + ], + ui.outputs, + ) self.assertEqual(0, retcode) diff --git a/testrepository/tests/commands/test_load.py b/testrepository/tests/commands/test_load.py index 5b3a3e8..6e06b53 100644 --- a/testrepository/tests/commands/test_load.py +++ b/testrepository/tests/commands/test_load.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009, 2012 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -22,89 +22,89 @@ import iso8601 import testtools -from testtools.content import text_content from testtools.matchers import MatchesException -from testtools.tests.helpers import LoggingResult from testrepository.commands import load from testrepository.ui.model import UI from testrepository.tests import ( ResourcedTestCase, - StubTestCommand, Wildcard, - ) +) from testrepository.tests.test_repository import RecordingRepositoryFactory -from testrepository.tests.repository.test_file import HomeDirTempDir from testrepository.repository import memory, RepositoryNotFound class TestCommandLoad(ResourcedTestCase): - def test_load_loads_subunit_stream_to_default_repository(self): - ui = UI([('subunit', b'')]) + ui = UI([("subunit", b"")]) cmd = load.load(ui) ui.set_command(cmd) calls = [] - cmd.repository_factory = RecordingRepositoryFactory(calls, - memory.RepositoryFactory()) + cmd.repository_factory = RecordingRepositoryFactory( + calls, memory.RepositoryFactory() + ) repo = cmd.repository_factory.initialise(ui.here) del calls[:] cmd.execute() # Right repo - self.assertEqual([('open', ui.here)], calls) + self.assertEqual([("open", ui.here)], calls) # Stream consumed - self.assertFalse('subunit' in ui.input_streams) + self.assertFalse("subunit" in ui.input_streams) # Results loaded self.assertEqual(1, repo.count()) def test_load_loads_named_file_if_given(self): datafile = NamedTemporaryFile() self.addCleanup(datafile.close) - ui = UI([('subunit', b'')], args=[datafile.name]) + ui = UI([("subunit", b"")], args=[datafile.name]) cmd = load.load(ui) ui.set_command(cmd) calls = [] - cmd.repository_factory = RecordingRepositoryFactory(calls, - memory.RepositoryFactory()) + cmd.repository_factory = RecordingRepositoryFactory( + calls, memory.RepositoryFactory() + ) repo = cmd.repository_factory.initialise(ui.here) del calls[:] self.assertEqual(0, cmd.execute()) # Right repo - self.assertEqual([('open', ui.here)], calls) + self.assertEqual([("open", ui.here)], calls) # Stream not consumed - otherwise CLI would block when someone runs # 'testr load foo'. XXX: Be nice if we could declare that the argument, # which is a path, is to be an input stream. - self.assertTrue('subunit' in ui.input_streams) + self.assertTrue("subunit" in ui.input_streams) # Results loaded self.assertEqual(1, repo.count()) def test_load_initialises_repo_if_doesnt_exist_and_init_forced(self): - ui = UI([('subunit', b'')], options=[('force_init', True)]) + ui = UI([("subunit", b"")], options=[("force_init", True)]) cmd = load.load(ui) ui.set_command(cmd) calls = [] - cmd.repository_factory = RecordingRepositoryFactory(calls, - memory.RepositoryFactory()) + cmd.repository_factory = RecordingRepositoryFactory( + calls, memory.RepositoryFactory() + ) del calls[:] cmd.execute() - self.assertEqual([('open', ui.here), ('initialise', ui.here)], calls) + self.assertEqual([("open", ui.here), ("initialise", ui.here)], calls) def test_load_errors_if_repo_doesnt_exist(self): - ui = UI([('subunit', b'')]) + ui = UI([("subunit", b"")]) cmd = load.load(ui) ui.set_command(cmd) calls = [] - cmd.repository_factory = RecordingRepositoryFactory(calls, - memory.RepositoryFactory()) + cmd.repository_factory = RecordingRepositoryFactory( + calls, memory.RepositoryFactory() + ) del calls[:] cmd.execute() - self.assertEqual([('open', ui.here)], calls) - self.assertEqual([('error', Wildcard)], ui.outputs) + self.assertEqual([("open", ui.here)], calls) + self.assertEqual([("error", Wildcard)], ui.outputs) self.assertThat( - ui.outputs[0][1], MatchesException(RepositoryNotFound('memory:'))) + ui.outputs[0][1], MatchesException(RepositoryNotFound("memory:")) + ) def test_load_returns_0_normally(self): - ui = UI([('subunit', b'')]) + ui = UI([("subunit", b"")]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() @@ -114,10 +114,10 @@ def test_load_returns_0_normally(self): def test_load_returns_1_on_failed_stream(self): buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) - stream.status(test_id='foo', test_status='inprogress') - stream.status(test_id='foo', test_status='fail') + stream.status(test_id="foo", test_status="inprogress") + stream.status(test_id="foo", test_status="fail") subunit_bytes = buffer.getvalue() - ui = UI([('subunit', subunit_bytes)]) + ui = UI([("subunit", subunit_bytes)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() @@ -127,40 +127,64 @@ def test_load_returns_1_on_failed_stream(self): def test_load_new_shows_test_failures(self): buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) - stream.status(test_id='foo', test_status='inprogress') - stream.status(test_id='foo', test_status='fail') + stream.status(test_id="foo", test_status="inprogress") + stream.status(test_id="foo", test_status="fail") subunit_bytes = buffer.getvalue() - ui = UI([('subunit', subunit_bytes)]) + ui = UI([("subunit", subunit_bytes)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() cmd.repository_factory.initialise(ui.here) self.assertEqual(1, cmd.execute()) self.assertEqual( - [('summary', False, 1, None, Wildcard, None, - [('id', 0, None), ('failures', 1, None)])], - ui.outputs[1:]) + [ + ( + "summary", + False, + 1, + None, + Wildcard, + None, + [("id", 0, None), ("failures", 1, None)], + ) + ], + ui.outputs[1:], + ) def test_load_new_shows_test_failure_details(self): buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) - stream.status(test_id='foo', test_status='inprogress') - stream.status(test_id='foo', test_status='fail', - file_name="traceback", mime_type='text/plain;charset=utf8', - file_bytes=b'arg\n') + stream.status(test_id="foo", test_status="inprogress") + stream.status( + test_id="foo", + test_status="fail", + file_name="traceback", + mime_type="text/plain;charset=utf8", + file_bytes=b"arg\n", + ) subunit_bytes = buffer.getvalue() - ui = UI([('subunit', subunit_bytes)]) + ui = UI([("subunit", subunit_bytes)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() cmd.repository_factory.initialise(ui.here) self.assertEqual(1, cmd.execute()) suite = ui.outputs[0][1] - self.assertEqual([ - ('results', Wildcard), - ('summary', False, 1, None, Wildcard, None, - [('id', 0, None), ('failures', 1, None)])], - ui.outputs) + self.assertEqual( + [ + ("results", Wildcard), + ( + "summary", + False, + 1, + None, + Wildcard, + None, + [("id", 0, None), ("failures", 1, None)], + ), + ], + ui.outputs, + ) result = testtools.StreamSummary() result.startTestRun() try: @@ -173,35 +197,48 @@ def test_load_new_shows_test_failure_details(self): def test_load_new_shows_test_skips(self): buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) - stream.status(test_id='foo', test_status='inprogress') - stream.status(test_id='foo', test_status='skip') + stream.status(test_id="foo", test_status="inprogress") + stream.status(test_id="foo", test_status="skip") subunit_bytes = buffer.getvalue() - ui = UI([('subunit', subunit_bytes)]) + ui = UI([("subunit", subunit_bytes)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() cmd.repository_factory.initialise(ui.here) self.assertEqual(0, cmd.execute()) self.assertEqual( - [('results', Wildcard), - ('summary', True, 1, None, Wildcard, None, - [('id', 0, None), ('skips', 1, None)])], - ui.outputs) + [ + ("results", Wildcard), + ( + "summary", + True, + 1, + None, + Wildcard, + None, + [("id", 0, None), ("skips", 1, None)], + ), + ], + ui.outputs, + ) def test_load_new_shows_test_summary_no_tests(self): - ui = UI([('subunit', b'')]) + ui = UI([("subunit", b"")]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() cmd.repository_factory.initialise(ui.here) self.assertEqual(0, cmd.execute()) self.assertEqual( - [('results', Wildcard), - ('summary', True, 0, None, None, None, [('id', 0, None)])], - ui.outputs) + [ + ("results", Wildcard), + ("summary", True, 0, None, None, None, [("id", 0, None)]), + ], + ui.outputs, + ) def test_load_quiet_shows_nothing(self): - ui = UI([('subunit', b'')], [('quiet', True)]) + ui = UI([("subunit", b"")], [("quiet", True)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() @@ -210,7 +247,7 @@ def test_load_quiet_shows_nothing(self): self.assertEqual([], ui.outputs) def test_load_abort_over_interactive_stream(self): - ui = UI([('subunit', b''), ('interactive', b'a\n')]) + ui = UI([("subunit", b""), ("interactive", b"a\n")]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() @@ -218,13 +255,23 @@ def test_load_abort_over_interactive_stream(self): ret = cmd.execute() self.assertEqual( ui.outputs, - [('results', Wildcard), - ('summary', False, 1, None, None, None, - [('id', 0, None), ('failures', 1, None)])]) + [ + ("results", Wildcard), + ( + "summary", + False, + 1, + None, + None, + None, + [("id", 0, None), ("failures", 1, None)], + ), + ], + ) self.assertEqual(1, ret) def test_partial_passed_to_repo(self): - ui = UI([('subunit', b'')], [('quiet', True), ('partial', True)]) + ui = UI([("subunit", b"")], [("quiet", True), ("partial", True)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() @@ -232,19 +279,20 @@ def test_partial_passed_to_repo(self): retcode = cmd.execute() self.assertEqual([], ui.outputs) self.assertEqual(0, retcode) - self.assertEqual(True, - cmd.repository_factory.repos[ui.here].get_test_run(0)._partial) + self.assertEqual( + True, cmd.repository_factory.repos[ui.here].get_test_run(0)._partial + ) def test_load_timed_run(self): buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) time = datetime(2011, 1, 1, 0, 0, 1, tzinfo=iso8601.UTC) - stream.status(test_id='foo', test_status='inprogress', timestamp=time) - stream.status(test_id='foo', test_status='success', - timestamp=time+timedelta(seconds=2)) + stream.status(test_id="foo", test_status="inprogress", timestamp=time) + stream.status( + test_id="foo", test_status="success", timestamp=time + timedelta(seconds=2) + ) timed_bytes = buffer.getvalue() - ui = UI( - [('subunit', timed_bytes)]) + ui = UI([("subunit", timed_bytes)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() @@ -255,8 +303,8 @@ def test_load_timed_run(self): # ThreadsafeForwardingResult (via ConcurrentTestSuite) that suppresses # time information not involved in the start or stop of a test. self.assertEqual( - [('summary', True, 1, None, 2.0, None, [('id', 0, None)])], - ui.outputs[1:]) + [("summary", True, 1, None, 2.0, None, [("id", 0, None)])], ui.outputs[1:] + ) def test_load_second_run(self): # If there's a previous run in the database, then show information @@ -265,16 +313,20 @@ def test_load_second_run(self): buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) time = datetime(2011, 1, 2, 0, 0, 1, tzinfo=iso8601.UTC) - stream.status(test_id='foo', test_status='inprogress', timestamp=time) - stream.status(test_id='foo', test_status='fail', - timestamp=time+timedelta(seconds=2)) - stream.status(test_id='bar', test_status='inprogress', - timestamp=time+timedelta(seconds=4)) - stream.status(test_id='bar', test_status='fail', - timestamp=time+timedelta(seconds=6)) + stream.status(test_id="foo", test_status="inprogress", timestamp=time) + stream.status( + test_id="foo", test_status="fail", timestamp=time + timedelta(seconds=2) + ) + stream.status( + test_id="bar", + test_status="inprogress", + timestamp=time + timedelta(seconds=4), + ) + stream.status( + test_id="bar", test_status="fail", timestamp=time + timedelta(seconds=6) + ) timed_bytes = buffer.getvalue() - ui = UI( - [('subunit', timed_bytes)]) + ui = UI([("subunit", timed_bytes)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() @@ -285,13 +337,29 @@ def test_load_second_run(self): inserter = repo._get_inserter(False) # Insert a run with different results. inserter.startTestRun() - inserter.status(test_id=self.id(), test_status='inprogress', - timestamp=datetime(2011, 1, 1, 0, 0, 1, tzinfo=iso8601.UTC)) - inserter.status(test_id=self.id(), test_status='fail', - timestamp=datetime(2011, 1, 1, 0, 0, 10, tzinfo=iso8601.UTC)) + inserter.status( + test_id=self.id(), + test_status="inprogress", + timestamp=datetime(2011, 1, 1, 0, 0, 1, tzinfo=iso8601.UTC), + ) + inserter.status( + test_id=self.id(), + test_status="fail", + timestamp=datetime(2011, 1, 1, 0, 0, 10, tzinfo=iso8601.UTC), + ) inserter.stopTestRun() self.assertEqual(1, cmd.execute()) self.assertEqual( - [('summary', False, 2, 1, 6.0, -3.0, - [('id', 1, None), ('failures', 2, 1)])], - ui.outputs[1:]) + [ + ( + "summary", + False, + 2, + 1, + 6.0, + -3.0, + [("id", 1, None), ("failures", 2, 1)], + ) + ], + ui.outputs[1:], + ) diff --git a/testrepository/tests/commands/test_quickstart.py b/testrepository/tests/commands/test_quickstart.py index 756a8cf..f0fe3f1 100644 --- a/testrepository/tests/commands/test_quickstart.py +++ b/testrepository/tests/commands/test_quickstart.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -20,8 +20,7 @@ class TestCommand(ResourcedTestCase): - - def get_test_ui_and_cmd(self,args=()): + def get_test_ui_and_cmd(self, args=()): ui = UI(args=args) cmd = quickstart.quickstart(ui) ui.set_command(cmd) @@ -31,5 +30,5 @@ def test_shows_some_rest(self): ui, cmd = self.get_test_ui_and_cmd() self.assertEqual(0, cmd.execute()) self.assertEqual(1, len(ui.outputs)) - self.assertEqual('rest', ui.outputs[0][0]) - self.assertTrue('Overview' in ui.outputs[0][1]) + self.assertEqual("rest", ui.outputs[0][0]) + self.assertTrue("Overview" in ui.outputs[0][1]) diff --git a/testrepository/tests/commands/test_run.py b/testrepository/tests/commands/test_run.py index 096cc40..381087a 100644 --- a/testrepository/tests/commands/test_run.py +++ b/testrepository/tests/commands/test_run.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -22,16 +22,15 @@ from fixtures import ( Fixture, MonkeyPatch, - ) +) import subunit -from subunit import RemotedTestCase from testscenarios.scenarios import multiply_scenarios from testtools.matchers import ( Equals, HasLength, MatchesException, MatchesListwise, - ) +) from testrepository.commands import run from testrepository.ui.model import UI, ProcessModel @@ -40,18 +39,21 @@ from testrepository.tests import ResourcedTestCase, Wildcard from testrepository.tests.stubpackage import TempDirResource from testrepository.tests.test_testcommand import FakeTestCommand -from testrepository.tests.test_repository import make_test class TestCommand(ResourcedTestCase): + resources = [("tempdir", TempDirResource())] - resources = [('tempdir', TempDirResource())] - - def get_test_ui_and_cmd(self, options=(), args=(), proc_outputs=(), - proc_results=()): + def get_test_ui_and_cmd( + self, options=(), args=(), proc_outputs=(), proc_results=() + ): self.dirty() - ui = UI(options=options, args=args, proc_outputs=proc_outputs, - proc_results=proc_results) + ui = UI( + options=options, + args=args, + proc_outputs=proc_outputs, + proc_results=proc_results, + ) ui.here = self.tempdir cmd = run.run(ui) ui.set_command(cmd) @@ -59,23 +61,23 @@ def get_test_ui_and_cmd(self, options=(), args=(), proc_outputs=(), def dirty(self): # Ugly: TODO - improve testresources to make this go away. - dict(self.resources)['tempdir']._dirty = True + dict(self.resources)["tempdir"]._dirty = True def config_path(self): - return os.path.join(self.tempdir, '.testr.conf') + return os.path.join(self.tempdir, ".testr.conf") def set_config(self, text): - with open(self.config_path(), 'wt') as stream: + with open(self.config_path(), "wt") as stream: stream.write(text) def setup_repo(self, cmd, ui, failures=True): repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() - inserter.status(test_id='passing', test_status='success') + inserter.status(test_id="passing", test_status="success") if failures: - inserter.status(test_id='failing1', test_status='fail') - inserter.status(test_id='failing2', test_status='fail') + inserter.status(test_id="failing1", test_status="fail") + inserter.status(test_id="failing2", test_status="fail") inserter.stopTestRun() def test_no_config_file_errors(self): @@ -83,228 +85,300 @@ def test_no_config_file_errors(self): cmd.repository_factory.initialise(ui.here) self.assertEqual(3, cmd.execute()) self.assertEqual(1, len(ui.outputs)) - self.assertEqual('error', ui.outputs[0][0]) - self.assertThat(ui.outputs[0][1], - MatchesException(ValueError('No .testr.conf config file'))) + self.assertEqual("error", ui.outputs[0][0]) + self.assertThat( + ui.outputs[0][1], MatchesException(ValueError("No .testr.conf config file")) + ) def test_no_config_settings_errors(self): ui, cmd = self.get_test_ui_and_cmd() cmd.repository_factory.initialise(ui.here) - self.set_config('') + self.set_config("") self.assertEqual(3, cmd.execute()) self.assertEqual(1, len(ui.outputs)) - self.assertEqual('error', ui.outputs[0][0]) - self.assertThat(ui.outputs[0][1], MatchesException(ValueError( - 'No test_command option present in .testr.conf'))) + self.assertEqual("error", ui.outputs[0][0]) + self.assertThat( + ui.outputs[0][1], + MatchesException( + ValueError("No test_command option present in .testr.conf") + ), + ) def test_IDFILE_failures(self): - ui, cmd = self.get_test_ui_and_cmd(options=[('failing', True)]) + ui, cmd = self.get_test_ui_and_cmd(options=[("failing", True)]) cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) self.set_config( - '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n') + "[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n" + ) cmd.command_factory = FakeTestCommand result = cmd.execute() - listfile = os.path.join(ui.here, 'failing.list') - expected_cmd = 'foo --load-list %s' % listfile - self.assertEqual([ - ('values', [('running', expected_cmd)]), - ('popen', (expected_cmd,), - {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), - ('results', Wildcard), - ('summary', True, 0, -3, None, None, [('id', 1, None)]) - ], ui.outputs) + listfile = os.path.join(ui.here, "failing.list") + expected_cmd = "foo --load-list %s" % listfile + self.assertEqual( + [ + ("values", [("running", expected_cmd)]), + ( + "popen", + (expected_cmd,), + {"shell": True, "stdin": PIPE, "stdout": PIPE}, + ), + ("results", Wildcard), + ("summary", True, 0, -3, None, None, [("id", 1, None)]), + ], + ui.outputs, + ) # TODO: check the list file is written, and deleted. self.assertEqual(0, result) def test_IDLIST_failures(self): - ui, cmd = self.get_test_ui_and_cmd(options=[('failing', True)]) + ui, cmd = self.get_test_ui_and_cmd(options=[("failing", True)]) cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) - self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST\n') + self.set_config("[DEFAULT]\ntest_command=foo $IDLIST\n") self.assertEqual(0, cmd.execute()) - expected_cmd = 'foo failing1 failing2' - self.assertEqual([ - ('values', [('running', expected_cmd)]), - ('popen', (expected_cmd,), - {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), - ('results', Wildcard), - ('summary', True, 0, -3, None, None, [('id', 1, None)]), - ], ui.outputs) + expected_cmd = "foo failing1 failing2" + self.assertEqual( + [ + ("values", [("running", expected_cmd)]), + ( + "popen", + (expected_cmd,), + {"shell": True, "stdin": PIPE, "stdout": PIPE}, + ), + ("results", Wildcard), + ("summary", True, 0, -3, None, None, [("id", 1, None)]), + ], + ui.outputs, + ) # Failing causes partial runs to be used. - self.assertEqual(True, - cmd.repository_factory.repos[ui.here].get_test_run(1)._partial) + self.assertEqual( + True, cmd.repository_factory.repos[ui.here].get_test_run(1)._partial + ) def test_IDLIST_default_is_empty(self): ui, cmd = self.get_test_ui_and_cmd() cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) - self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST\n') + self.set_config("[DEFAULT]\ntest_command=foo $IDLIST\n") self.assertEqual(0, cmd.execute()) - expected_cmd = 'foo ' - self.assertEqual([ - ('values', [('running', expected_cmd)]), - ('popen', (expected_cmd,), - {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), - ('results', Wildcard), - ('summary', True, 0, -3, None, None, [('id', 1, None)]) - ], ui.outputs) + expected_cmd = "foo " + self.assertEqual( + [ + ("values", [("running", expected_cmd)]), + ( + "popen", + (expected_cmd,), + {"shell": True, "stdin": PIPE, "stdout": PIPE}, + ), + ("results", Wildcard), + ("summary", True, 0, -3, None, None, [("id", 1, None)]), + ], + ui.outputs, + ) def test_IDLIST_default_passed_normally(self): ui, cmd = self.get_test_ui_and_cmd() cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST\ntest_id_list_default=whoo yea\n') + "[DEFAULT]\ntest_command=foo $IDLIST\ntest_id_list_default=whoo yea\n" + ) self.assertEqual(0, cmd.execute()) - expected_cmd = 'foo whoo yea' - self.assertEqual([ - ('values', [('running', expected_cmd)]), - ('popen', (expected_cmd,), - {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), - ('results', Wildcard), - ('summary', True, 0, -3, None, None, [('id', 1, None)]) - ], ui.outputs) + expected_cmd = "foo whoo yea" + self.assertEqual( + [ + ("values", [("running", expected_cmd)]), + ( + "popen", + (expected_cmd,), + {"shell": True, "stdin": PIPE, "stdout": PIPE}, + ), + ("results", Wildcard), + ("summary", True, 0, -3, None, None, [("id", 1, None)]), + ], + ui.outputs, + ) def test_IDFILE_not_passed_normally(self): ui, cmd = self.get_test_ui_and_cmd() cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) self.set_config( - '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n') + "[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n" + ) self.assertEqual(0, cmd.execute()) - expected_cmd = 'foo ' - self.assertEqual([ - ('values', [('running', expected_cmd)]), - ('popen', (expected_cmd,), - {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), - ('results', Wildcard), - ('summary', True, 0, -3, None, None, [('id', 1, None)]), - ], ui.outputs) + expected_cmd = "foo " + self.assertEqual( + [ + ("values", [("running", expected_cmd)]), + ( + "popen", + (expected_cmd,), + {"shell": True, "stdin": PIPE, "stdout": PIPE}, + ), + ("results", Wildcard), + ("summary", True, 0, -3, None, None, [("id", 1, None)]), + ], + ui.outputs, + ) def capture_ids(self, list_result=None): params = [] + def capture_ids(self, ids, args, test_filters=None): params.append([self, ids, args, test_filters]) result = Fixture() - result.run_tests = lambda:[] + result.run_tests = lambda: [] if list_result is not None: - result.list_tests = lambda:list(list_result) + result.list_tests = lambda: list(list_result) return result + return params, capture_ids def test_load_list_failing_takes_id_intersection(self): list_file = tempfile.NamedTemporaryFile() self.addCleanup(list_file.close) - write_list(list_file, ['foo', 'quux', 'failing1']) + write_list(list_file, ["foo", "quux", "failing1"]) # The extra tests - foo, quux - won't match known failures, and the # unlisted failure failing2 won't match the list. - expected_ids = set(['failing1']) + expected_ids = set(["failing1"]) list_file.flush() ui, cmd = self.get_test_ui_and_cmd( - options=[('load_list', list_file.name), ('failing', True)]) + options=[("load_list", list_file.name), ("failing", True)] + ) cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) self.set_config( - '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n') + "[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n" + ) params, capture_ids = self.capture_ids() - self.useFixture(MonkeyPatch( - 'testrepository.testcommand.TestCommand.get_run_command', - capture_ids)) + self.useFixture( + MonkeyPatch( + "testrepository.testcommand.TestCommand.get_run_command", capture_ids + ) + ) cmd_result = cmd.execute() - self.assertEqual([ - ('results', Wildcard), - ('summary', True, 0, -3, None, None, [('id', 1, None)]) - ], ui.outputs) + self.assertEqual( + [ + ("results", Wildcard), + ("summary", True, 0, -3, None, None, [("id", 1, None)]), + ], + ui.outputs, + ) self.assertEqual(0, cmd_result) self.assertEqual([[Wildcard, expected_ids, [], None]], params) def test_load_list_passes_ids(self): list_file = tempfile.NamedTemporaryFile() self.addCleanup(list_file.close) - expected_ids = set(['foo', 'quux', 'bar']) + expected_ids = set(["foo", "quux", "bar"]) write_list(list_file, expected_ids) list_file.flush() - ui, cmd = self.get_test_ui_and_cmd( - options=[('load_list', list_file.name)]) + ui, cmd = self.get_test_ui_and_cmd(options=[("load_list", list_file.name)]) cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) self.set_config( - '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n') + "[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n" + ) params, capture_ids = self.capture_ids() - self.useFixture(MonkeyPatch( - 'testrepository.testcommand.TestCommand.get_run_command', - capture_ids)) + self.useFixture( + MonkeyPatch( + "testrepository.testcommand.TestCommand.get_run_command", capture_ids + ) + ) cmd_result = cmd.execute() - self.assertEqual([ - ('results', Wildcard), - ('summary', True, 0, -3, None, None, [('id', 1, None)]) - ], ui.outputs) + self.assertEqual( + [ + ("results", Wildcard), + ("summary", True, 0, -3, None, None, [("id", 1, None)]), + ], + ui.outputs, + ) self.assertEqual(0, cmd_result) self.assertEqual([[Wildcard, expected_ids, [], None]], params) def test_extra_options_passed_in(self): - ui, cmd = self.get_test_ui_and_cmd(args=('--', 'bar', 'quux')) + ui, cmd = self.get_test_ui_and_cmd(args=("--", "bar", "quux")) cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) self.set_config( - '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n') + "[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n" + ) self.assertEqual(0, cmd.execute()) - expected_cmd = 'foo bar quux' - self.assertEqual([ - ('values', [('running', expected_cmd)]), - ('popen', (expected_cmd,), - {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), - ('results', Wildcard), - ('summary', True, 0, -3, None, None, [('id', 1, None)]) - ], ui.outputs) + expected_cmd = "foo bar quux" + self.assertEqual( + [ + ("values", [("running", expected_cmd)]), + ( + "popen", + (expected_cmd,), + {"shell": True, "stdin": PIPE, "stdout": PIPE}, + ), + ("results", Wildcard), + ("summary", True, 0, -3, None, None, [("id", 1, None)]), + ], + ui.outputs, + ) def test_quiet_passed_down(self): - ui, cmd = self.get_test_ui_and_cmd(options=[('quiet', True)]) + ui, cmd = self.get_test_ui_and_cmd(options=[("quiet", True)]) cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) - self.set_config( - '[DEFAULT]\ntest_command=foo\n') + self.set_config("[DEFAULT]\ntest_command=foo\n") result = cmd.execute() - expected_cmd = 'foo' - self.assertEqual([ - ('values', [('running', expected_cmd)]), - ('popen', (expected_cmd,), - {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), - ], ui.outputs) + expected_cmd = "foo" + self.assertEqual( + [ + ("values", [("running", expected_cmd)]), + ( + "popen", + (expected_cmd,), + {"shell": True, "stdin": PIPE, "stdout": PIPE}, + ), + ], + ui.outputs, + ) self.assertEqual(0, result) def test_partial_passed_to_repo(self): - ui, cmd = self.get_test_ui_and_cmd( - options=[('quiet', True), ('partial', True)]) + ui, cmd = self.get_test_ui_and_cmd(options=[("quiet", True), ("partial", True)]) cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) - self.set_config( - '[DEFAULT]\ntest_command=foo\n') + self.set_config("[DEFAULT]\ntest_command=foo\n") result = cmd.execute() - expected_cmd = 'foo' - self.assertEqual([ - ('values', [('running', expected_cmd)]), - ('popen', (expected_cmd,), - {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), - ], ui.outputs) + expected_cmd = "foo" + self.assertEqual( + [ + ("values", [("running", expected_cmd)]), + ( + "popen", + (expected_cmd,), + {"shell": True, "stdin": PIPE, "stdout": PIPE}, + ), + ], + ui.outputs, + ) self.assertEqual(0, result) - self.assertEqual(True, - cmd.repository_factory.repos[ui.here].get_test_run(1)._partial) + self.assertEqual( + True, cmd.repository_factory.repos[ui.here].get_test_run(1)._partial + ) def test_load_failure_exposed(self): buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) - stream.status(test_id='foo', test_status='inprogress') - stream.status(test_id='foo', test_status='fail') + stream.status(test_id="foo", test_status="inprogress") + stream.status(test_id="foo", test_status="fail") subunit_bytes = buffer.getvalue() - ui, cmd = self.get_test_ui_and_cmd(options=[('quiet', True),], - proc_outputs=[subunit_bytes]) + ui, cmd = self.get_test_ui_and_cmd( + options=[ + ("quiet", True), + ], + proc_outputs=[subunit_bytes], + ) cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) - self.set_config('[DEFAULT]\ntest_command=foo\n') + self.set_config("[DEFAULT]\ntest_command=foo\n") result = cmd.execute() cmd.repository_factory.repos[ui.here].get_test_run(1) self.assertEqual(1, result) @@ -312,179 +386,224 @@ def test_load_failure_exposed(self): def test_process_exit_code_nonzero_causes_synthetic_error_test(self): buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) - stream.status(test_id='foo', test_status='inprogress') - stream.status(test_id='foo', test_status='success') + stream.status(test_id="foo", test_status="inprogress") + stream.status(test_id="foo", test_status="success") subunit_bytes = buffer.getvalue() - ui, cmd = self.get_test_ui_and_cmd(options=[('quiet', True),], + ui, cmd = self.get_test_ui_and_cmd( + options=[ + ("quiet", True), + ], proc_outputs=[subunit_bytes], - proc_results=[2]) - # 2 is non-zero, and non-zero triggers the behaviour of exiting - # with 1 - but we want to see that it doesn't pass-through the - # value literally. + proc_results=[2], + ) + # 2 is non-zero, and non-zero triggers the behaviour of exiting + # with 1 - but we want to see that it doesn't pass-through the + # value literally. cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) - self.set_config('[DEFAULT]\ntest_command=foo\n') + self.set_config("[DEFAULT]\ntest_command=foo\n") result = cmd.execute() self.assertEqual(1, result) run = cmd.repository_factory.repos[ui.here].get_test_run(1) - self.assertEqual([Wildcard, 'fail'], - [test['status'] for test in run._tests]) + self.assertEqual([Wildcard, "fail"], [test["status"] for test in run._tests]) def test_regex_test_filter(self): - ui, cmd = self.get_test_ui_and_cmd(args=('ab.*cd', '--', 'bar', 'quux')) + ui, cmd = self.get_test_ui_and_cmd(args=("ab.*cd", "--", "bar", "quux")) cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n' - 'test_id_option=--load-list $IDFILE\n' - 'test_list_option=--list\n') + "[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n" + "test_id_option=--load-list $IDFILE\n" + "test_list_option=--list\n" + ) params, capture_ids = self.capture_ids() - self.useFixture(MonkeyPatch( - 'testrepository.testcommand.TestCommand.get_run_command', - capture_ids)) + self.useFixture( + MonkeyPatch( + "testrepository.testcommand.TestCommand.get_run_command", capture_ids + ) + ) cmd_result = cmd.execute() - self.assertEqual([ - ('results', Wildcard), - ('summary', True, 0, -3, None, None, [('id', 1, None)]) - ], ui.outputs) + self.assertEqual( + [ + ("results", Wildcard), + ("summary", True, 0, -3, None, None, [("id", 1, None)]), + ], + ui.outputs, + ) self.assertEqual(0, cmd_result) self.assertThat(params[0][1], Equals(None)) - self.assertThat( - params[0][2], MatchesListwise([Equals('bar'), Equals('quux')])) - self.assertThat(params[0][3], MatchesListwise([Equals('ab.*cd')])) + self.assertThat(params[0][2], MatchesListwise([Equals("bar"), Equals("quux")])) + self.assertThat(params[0][3], MatchesListwise([Equals("ab.*cd")])) self.assertThat(params, HasLength(1)) def test_regex_test_filter_with_explicit_ids(self): ui, cmd = self.get_test_ui_and_cmd( - args=('g1', '--', 'bar', 'quux'),options=[('failing', True)]) + args=("g1", "--", "bar", "quux"), options=[("failing", True)] + ) cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n' - 'test_id_option=--load-list $IDFILE\n' - 'test_list_option=--list\n') + "[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n" + "test_id_option=--load-list $IDFILE\n" + "test_list_option=--list\n" + ) params, capture_ids = self.capture_ids() - self.useFixture(MonkeyPatch( - 'testrepository.testcommand.TestCommand.get_run_command', - capture_ids)) + self.useFixture( + MonkeyPatch( + "testrepository.testcommand.TestCommand.get_run_command", capture_ids + ) + ) cmd_result = cmd.execute() - self.assertEqual([ - ('results', Wildcard), - ('summary', True, 0, -3, None, None, [('id', 1, None)]) - ], ui.outputs) + self.assertEqual( + [ + ("results", Wildcard), + ("summary", True, 0, -3, None, None, [("id", 1, None)]), + ], + ui.outputs, + ) self.assertEqual(0, cmd_result) - self.assertThat(params[0][1], Equals(['failing1', 'failing2'])) - self.assertThat( - params[0][2], MatchesListwise([Equals('bar'), Equals('quux')])) - self.assertThat(params[0][3], MatchesListwise([Equals('g1')])) + self.assertThat(params[0][1], Equals(["failing1", "failing2"])) + self.assertThat(params[0][2], MatchesListwise([Equals("bar"), Equals("quux")])) + self.assertThat(params[0][3], MatchesListwise([Equals("g1")])) self.assertThat(params, HasLength(1)) def test_until_failure(self): - ui, cmd = self.get_test_ui_and_cmd(options=[('until_failure', True)]) + ui, cmd = self.get_test_ui_and_cmd(options=[("until_failure", True)]) buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) - stream.status(test_id='foo', test_status='inprogress') - stream.status(test_id='foo', test_status='success') + stream.status(test_id="foo", test_status="inprogress") + stream.status(test_id="foo", test_status="success") subunit_bytes1 = buffer.getvalue() buffer.seek(0) buffer.truncate() - stream.status(test_id='foo', test_status='inprogress') - stream.status(test_id='foo', test_status='fail') + stream.status(test_id="foo", test_status="inprogress") + stream.status(test_id="foo", test_status="fail") subunit_bytes2 = buffer.getvalue() ui.proc_outputs = [ - subunit_bytes1, # stream one, works - subunit_bytes2, # stream two, fails - ] + subunit_bytes1, # stream one, works + subunit_bytes2, # stream two, fails + ] ui.require_proc_stdout = True cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n' - 'test_id_option=--load-list $IDFILE\n' - 'test_list_option=--list\n') + "[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n" + "test_id_option=--load-list $IDFILE\n" + "test_list_option=--list\n" + ) cmd_result = cmd.execute() - expected_cmd = 'foo ' - self.assertEqual([ - ('values', [('running', expected_cmd)]), - ('popen', (expected_cmd,), - {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), - ('results', Wildcard), - ('summary', True, 1, -2, Wildcard, Wildcard, [('id', 1, None)]), - ('values', [('running', expected_cmd)]), - ('popen', (expected_cmd,), - {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), - ('results', Wildcard), - ('summary', False, 1, 0, Wildcard, Wildcard, - [('id', 2, None), ('failures', 1, 1)]) - ], ui.outputs) + expected_cmd = "foo " + self.assertEqual( + [ + ("values", [("running", expected_cmd)]), + ( + "popen", + (expected_cmd,), + {"shell": True, "stdin": PIPE, "stdout": PIPE}, + ), + ("results", Wildcard), + ("summary", True, 1, -2, Wildcard, Wildcard, [("id", 1, None)]), + ("values", [("running", expected_cmd)]), + ( + "popen", + (expected_cmd,), + {"shell": True, "stdin": PIPE, "stdout": PIPE}, + ), + ("results", Wildcard), + ( + "summary", + False, + 1, + 0, + Wildcard, + Wildcard, + [("id", 2, None), ("failures", 1, 1)], + ), + ], + ui.outputs, + ) self.assertEqual(1, cmd_result) def test_failure_no_tests_run_when_no_failures_failures(self): - ui, cmd = self.get_test_ui_and_cmd(options=[('failing', True)]) + ui, cmd = self.get_test_ui_and_cmd(options=[("failing", True)]) cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui, failures=False) self.set_config( - '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n') + "[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n" + ) cmd.command_factory = FakeTestCommand result = cmd.execute() - self.assertEqual([ - ('results', Wildcard), - ('summary', True, 0, -1, None, None, [('id', 1, None)]) - ], ui.outputs) + self.assertEqual( + [ + ("results", Wildcard), + ("summary", True, 0, -1, None, None, [("id", 1, None)]), + ], + ui.outputs, + ) self.assertEqual(0, result) def test_isolated_runs_multiple_processes(self): - ui, cmd = self.get_test_ui_and_cmd(options=[('isolated', True)]) + ui, cmd = self.get_test_ui_and_cmd(options=[("isolated", True)]) cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n' - 'test_id_option=--load-list $IDFILE\n' - 'test_list_option=--list\n') - params, capture_ids = self.capture_ids(list_result=['ab', 'cd', 'ef']) - self.useFixture(MonkeyPatch( - 'testrepository.testcommand.TestCommand.get_run_command', - capture_ids)) + "[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n" + "test_id_option=--load-list $IDFILE\n" + "test_list_option=--list\n" + ) + params, capture_ids = self.capture_ids(list_result=["ab", "cd", "ef"]) + self.useFixture( + MonkeyPatch( + "testrepository.testcommand.TestCommand.get_run_command", capture_ids + ) + ) cmd_result = cmd.execute() - self.assertEqual([ - ('results', Wildcard), - ('summary', True, 0, -3, None, None, [('id', 1, None)]), - ('results', Wildcard), - ('summary', True, 0, 0, None, None, [('id', 2, None)]), - ('results', Wildcard), - ('summary', True, 0, 0, None, None, [('id', 3, None)]), - ], ui.outputs) + self.assertEqual( + [ + ("results", Wildcard), + ("summary", True, 0, -3, None, None, [("id", 1, None)]), + ("results", Wildcard), + ("summary", True, 0, 0, None, None, [("id", 2, None)]), + ("results", Wildcard), + ("summary", True, 0, 0, None, None, [("id", 3, None)]), + ], + ui.outputs, + ) self.assertEqual(0, cmd_result) # once to list, then 3 each executing one test. - self.assertThat(params, HasLength(4)) + self.assertThat(params, HasLength(4)) self.assertThat(params[0][1], Equals(None)) - self.assertThat(params[1][1], Equals(['ab'])) - self.assertThat(params[2][1], Equals(['cd'])) - self.assertThat(params[3][1], Equals(['ef'])) + self.assertThat(params[1][1], Equals(["ab"])) + self.assertThat(params[2][1], Equals(["cd"])) + self.assertThat(params[3][1], Equals(["ef"])) def test_fails_if_repo_doesnt_exist(self): ui, cmd = self.get_test_ui_and_cmd(args=()) cmd.repository_factory = memory.RepositoryFactory() self.set_config( - '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n') + "[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n" + ) self.assertEqual(3, cmd.execute()) self.assertEqual(1, len(ui.outputs)) - self.assertEqual('error', ui.outputs[0][0]) + self.assertEqual("error", ui.outputs[0][0]) self.assertThat(ui.outputs[0][1], MatchesException(RepositoryNotFound)) def test_force_init(self): - ui, cmd = self.get_test_ui_and_cmd(options=[('force_init', True)]) + ui, cmd = self.get_test_ui_and_cmd(options=[("force_init", True)]) cmd.repository_factory = memory.RepositoryFactory() self.set_config( - '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n') + "[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n" + ) self.assertEqual(0, cmd.execute()) - self.assertEqual([ - ('values', [('running', 'foo ')]), - ('popen', ('foo ',), - {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), - ('results', Wildcard), - ('summary', True, 0, None, Wildcard, Wildcard, [('id', 0, None)]), - ], ui.outputs) + self.assertEqual( + [ + ("values", [("running", "foo ")]), + ("popen", ("foo ",), {"shell": True, "stdin": PIPE, "stdout": PIPE}), + ("results", Wildcard), + ("summary", True, 0, None, Wildcard, Wildcard, [("id", 0, None)]), + ], + ui.outputs, + ) def read_all(stream): @@ -500,7 +619,7 @@ def readline(stream): def readlines(stream): - return b''.join(stream.readlines()) + return b"".join(stream.readlines()) def accumulate(stream, reader): @@ -509,19 +628,22 @@ def accumulate(stream, reader): while content: accumulator.append(content) content = reader(stream) - return b''.join(accumulator) + return b"".join(accumulator) class TestReturnCodeToSubunit(ResourcedTestCase): - scenarios = multiply_scenarios( - [('readdefault', dict(reader=read_all)), - ('readsingle', dict(reader=read_single)), - ('readline', dict(reader=readline)), - ('readlines', dict(reader=readlines)), - ], - [('noeol', dict(stdout=b'foo\nbar')), - ('trailingeol', dict(stdout=b'foo\nbar\n'))]) + [ + ("readdefault", dict(reader=read_all)), + ("readsingle", dict(reader=read_single)), + ("readline", dict(reader=readline)), + ("readlines", dict(reader=readlines)), + ], + [ + ("noeol", dict(stdout=b"foo\nbar")), + ("trailingeol", dict(stdout=b"foo\nbar\n")), + ], + ) def test_returncode_0_no_change(self): proc = ProcessModel(None) @@ -539,10 +661,14 @@ def test_returncode_nonzero_fail_appended_to_content(self): stream = run.ReturnCodeToSubunit(proc) content = accumulate(stream, self.reader) buffer = BytesIO() - buffer.write(b'foo\nbar\n') + buffer.write(b"foo\nbar\n") stream = subunit.StreamResultToBytes(buffer) - stream.status(test_id='process-returncode', test_status='fail', - file_name='traceback', mime_type='text/plain;charset=utf8', - file_bytes=b'returncode 1') + stream.status( + test_id="process-returncode", + test_status="fail", + file_name="traceback", + mime_type="text/plain;charset=utf8", + file_bytes=b"returncode 1", + ) expected_content = buffer.getvalue() self.assertEqual(expected_content, content) diff --git a/testrepository/tests/commands/test_slowest.py b/testrepository/tests/commands/test_slowest.py index a791d89..fd3103f 100644 --- a/testrepository/tests/commands/test_slowest.py +++ b/testrepository/tests/commands/test_slowest.py @@ -20,7 +20,6 @@ timezone, ) -from testtools import PlaceHolder from testrepository.commands import slowest from testrepository.ui.model import UI @@ -29,7 +28,6 @@ class TestCommand(ResourcedTestCase): - def get_test_ui_and_cmd(self, options=(), args=()): ui = UI(options=options, args=args) cmd = slowest.slowest(ui) @@ -40,7 +38,7 @@ def test_shows_nothing_for_no_tests(self): """Having no tests leads to an error and no output.""" ui, cmd = self.get_test_ui_and_cmd() cmd.repository_factory = memory.RepositoryFactory() - repo = cmd.repository_factory.initialise(ui.here) + cmd.repository_factory.initialise(ui.here) self.assertEqual(3, cmd.execute()) self.assertEqual([], ui.outputs) @@ -55,10 +53,12 @@ def insert_one_test_with_runtime(self, inserter, runtime): """ test_id = self.getUniqueString() start_time = datetime.now(timezone.utc) - inserter.status(test_id=test_id, test_status='inprogress', - timestamp=start_time) - inserter.status(test_id=test_id, test_status='success', - timestamp=start_time + timedelta(seconds=runtime)) + inserter.status(test_id=test_id, test_status="inprogress", timestamp=start_time) + inserter.status( + test_id=test_id, + test_status="success", + timestamp=start_time + timedelta(seconds=runtime), + ) return test_id def test_shows_one_test_when_one_test(self): @@ -69,15 +69,19 @@ def test_shows_one_test_when_one_test(self): inserter = repo.get_inserter() inserter.startTestRun() runtime = 0.1 - test_id = self.insert_one_test_with_runtime( - inserter, runtime) + test_id = self.insert_one_test_with_runtime(inserter, runtime) inserter.stopTestRun() retcode = cmd.execute() self.assertEqual( - [('table', - [slowest.slowest.TABLE_HEADER] - + slowest.slowest.format_times([(test_id, runtime)]))], - ui.outputs) + [ + ( + "table", + [slowest.slowest.TABLE_HEADER] + + slowest.slowest.format_times([(test_id, runtime)]), + ) + ], + ui.outputs, + ) self.assertEqual(0, retcode) def test_orders_tests_based_on_runtime(self): @@ -88,30 +92,23 @@ def test_orders_tests_based_on_runtime(self): inserter = repo.get_inserter() inserter.startTestRun() runtime1 = 1.1 - test_id1 = self.insert_one_test_with_runtime( - inserter, runtime1) + test_id1 = self.insert_one_test_with_runtime(inserter, runtime1) runtime2 = 0.1 - test_id2 = self.insert_one_test_with_runtime( - inserter, runtime2) + test_id2 = self.insert_one_test_with_runtime(inserter, runtime2) inserter.stopTestRun() retcode = cmd.execute() - rows = [(test_id1, runtime1), - (test_id2, runtime2)] + rows = [(test_id1, runtime1), (test_id2, runtime2)] rows = slowest.slowest.format_times(rows) self.assertEqual(0, retcode) - self.assertEqual( - [('table', - [slowest.slowest.TABLE_HEADER] + rows)], - ui.outputs) + self.assertEqual([("table", [slowest.slowest.TABLE_HEADER] + rows)], ui.outputs) def insert_lots_of_tests_with_timing(self, repo): inserter = repo.get_inserter() inserter.startTestRun() runtimes = [float(r) for r in range(slowest.slowest.DEFAULT_ROWS_SHOWN + 1)] test_ids = [ - self.insert_one_test_with_runtime( - inserter, runtime) - for runtime in runtimes] + self.insert_one_test_with_runtime(inserter, runtime) for runtime in runtimes + ] inserter.stopTestRun() return test_ids, runtimes @@ -122,18 +119,16 @@ def test_limits_output_by_default(self): repo = cmd.repository_factory.initialise(ui.here) test_ids, runtimes = self.insert_lots_of_tests_with_timing(repo) retcode = cmd.execute() - rows = list(zip(reversed(test_ids), reversed(runtimes)) - )[:slowest.slowest.DEFAULT_ROWS_SHOWN] + rows = list(zip(reversed(test_ids), reversed(runtimes)))[ + : slowest.slowest.DEFAULT_ROWS_SHOWN + ] rows = slowest.slowest.format_times(rows) self.assertEqual(0, retcode) - self.assertEqual( - [('table', - [slowest.slowest.TABLE_HEADER] + rows)], - ui.outputs) + self.assertEqual([("table", [slowest.slowest.TABLE_HEADER] + rows)], ui.outputs) def test_option_to_show_all_rows_does_so(self): """When the all option is given all rows are shown.""" - ui, cmd = self.get_test_ui_and_cmd(options=[('all', True)]) + ui, cmd = self.get_test_ui_and_cmd(options=[("all", True)]) cmd.repository_factory = memory.RepositoryFactory() repo = cmd.repository_factory.initialise(ui.here) test_ids, runtimes = self.insert_lots_of_tests_with_timing(repo) @@ -141,7 +136,4 @@ def test_option_to_show_all_rows_does_so(self): rows = zip(reversed(test_ids), reversed(runtimes)) rows = slowest.slowest.format_times(rows) self.assertEqual(0, retcode) - self.assertEqual( - [('table', - [slowest.slowest.TABLE_HEADER] + rows)], - ui.outputs) + self.assertEqual([("table", [slowest.slowest.TABLE_HEADER] + rows)], ui.outputs) diff --git a/testrepository/tests/commands/test_stats.py b/testrepository/tests/commands/test_stats.py index 434d3ee..dc914d5 100644 --- a/testrepository/tests/commands/test_stats.py +++ b/testrepository/tests/commands/test_stats.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -21,8 +21,7 @@ class TestCommand(ResourcedTestCase): - - def get_test_ui_and_cmd(self,args=()): + def get_test_ui_and_cmd(self, args=()): ui = UI(args=args) cmd = stats.stats(ui) ui.set_command(cmd) @@ -39,4 +38,4 @@ def test_shows_number_of_runs(self): inserter.startTestRun() inserter.stopTestRun() self.assertEqual(0, cmd.execute()) - self.assertEqual([('values', [('runs', 2)])], ui.outputs) + self.assertEqual([("values", [("runs", 2)])], ui.outputs) diff --git a/testrepository/tests/monkeypatch.py b/testrepository/tests/monkeypatch.py index bd788c3..7d48ff9 100644 --- a/testrepository/tests/monkeypatch.py +++ b/testrepository/tests/monkeypatch.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -17,24 +17,27 @@ This has been moved to fixtures, and should be removed from here. """ + def monkeypatch(name, new_value): """Replace name with new_value. :return: A callable which will restore the original value. """ - location, attribute = name.rsplit('.', 1) + location, attribute = name.rsplit(".", 1) # Import, swallowing all errors as any element of location may be # a class or some such thing. try: __import__(location, {}, {}) except ImportError: pass - components = location.split('.') + components = location.split(".") current = __import__(components[0], {}, {}) for component in components[1:]: current = getattr(current, component) old_value = getattr(current, attribute) setattr(current, attribute, new_value) + def restore(): setattr(current, attribute, old_value) + return restore diff --git a/testrepository/tests/repository/__init__.py b/testrepository/tests/repository/__init__.py index 8d9ba38..f63bcc4 100644 --- a/testrepository/tests/repository/__init__.py +++ b/testrepository/tests/repository/__init__.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -16,11 +16,11 @@ import unittest + def test_suite(): names = [ - 'file', - ] - module_names = ['testrepository.tests.repository.test_' + name for name in - names] + "file", + ] + module_names = ["testrepository.tests.repository.test_" + name for name in names] loader = unittest.TestLoader() return loader.loadTestsFromNames(module_names) diff --git a/testrepository/tests/repository/test_file.py b/testrepository/tests/repository/test_file.py index 2409e53..459011a 100644 --- a/testrepository/tests/repository/test_file.py +++ b/testrepository/tests/repository/test_file.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -27,7 +27,6 @@ class FileRepositoryFixture(Fixture): - def __init__(self, case): self.tempdir = case.tempdir self.resource = case.resources[0][1] @@ -43,26 +42,25 @@ class HomeDirTempDir(Fixture): def setUp(self): super(HomeDirTempDir, self).setUp() - home_dir = os.path.expanduser('~') + home_dir = os.path.expanduser("~") self.temp_dir = tempfile.mkdtemp(dir=home_dir) self.addCleanup(shutil.rmtree, self.temp_dir) - self.short_path = os.path.join('~', os.path.basename(self.temp_dir)) + self.short_path = os.path.join("~", os.path.basename(self.temp_dir)) class TestFileRepository(ResourcedTestCase): - - resources = [('tempdir', TempDirResource())] + resources = [("tempdir", TempDirResource())] def test_initialise(self): self.useFixture(FileRepositoryFixture(self)) - base = os.path.join(self.tempdir, '.testrepository') - stream = open(os.path.join(base, 'format'), 'rt') + base = os.path.join(self.tempdir, ".testrepository") + stream = open(os.path.join(base, "format"), "rt") try: contents = stream.read() finally: stream.close() self.assertEqual("1\n", contents) - stream = open(os.path.join(base, 'next-stream'), 'rt') + stream = open(os.path.join(base, "next-stream"), "rt") try: contents = stream.read() finally: @@ -79,7 +77,7 @@ def test_inserter_output_path(self): inserter = repo.get_inserter() inserter.startTestRun() inserter.stopTestRun() - self.assertTrue(os.path.exists(os.path.join(repo.base, '0'))) + self.assertTrue(os.path.exists(os.path.join(repo.base, "0"))) def test_inserting_creates_id(self): # When inserting a stream, an id is returned from stopTestRun. @@ -97,14 +95,16 @@ def test_open_expands_user_directory(self): def test_next_stream_corruption_error(self): repo = self.useFixture(FileRepositoryFixture(self)).repo - open(os.path.join(repo.base, 'next-stream'), 'wb').close() - self.assertThat(repo.count, Raises( - MatchesException(ValueError("Corrupt next-stream file: ''")))) + open(os.path.join(repo.base, "next-stream"), "wb").close() + self.assertThat( + repo.count, + Raises(MatchesException(ValueError("Corrupt next-stream file: ''"))), + ) def test_get_test_run_unexpected_ioerror_errno(self): repo = self.useFixture(FileRepositoryFixture(self)).repo inserter = repo.get_inserter() inserter.startTestRun() inserter.stopTestRun() - os.chmod(os.path.join(repo.base, '0'), 0000) - self.assertRaises(IOError, repo.get_test_run, '0') + os.chmod(os.path.join(repo.base, "0"), 0000) + self.assertRaises(IOError, repo.get_test_run, "0") diff --git a/testrepository/tests/stubpackage.py b/testrepository/tests/stubpackage.py index 8a8c076..c159744 100644 --- a/testrepository/tests/stubpackage.py +++ b/testrepository/tests/stubpackage.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -20,8 +20,9 @@ from testresources import TestResource + class TempDirResource(TestResource): - """A temporary directory resource. + """A temporary directory resource. This resource is never considered dirty. """ @@ -35,34 +36,33 @@ def clean(self, resource): class StubPackage(object): """A temporary package for tests. - + :ivar base: The directory containing the package dir. """ class StubPackageResource(TestResource): - def __init__(self, packagename, modulelist, init=True): super(StubPackageResource, self).__init__() self.packagename = packagename self.modulelist = modulelist self.init = init - self.resources = [('base', TempDirResource())] + self.resources = [("base", TempDirResource())] def make(self, dependency_resources): result = StubPackage() - base = dependency_resources['base'] + base = dependency_resources["base"] root = os.path.join(base, self.packagename) os.mkdir(root) init_seen = not self.init for modulename, contents in self.modulelist: - stream = open(os.path.join(root, modulename), 'wt') + stream = open(os.path.join(root, modulename), "wt") try: stream.write(contents) finally: stream.close() - if modulename == '__init__.py': + if modulename == "__init__.py": init_seen = True if not init_seen: - open(os.path.join(root, '__init__.py'), 'wt').close() + open(os.path.join(root, "__init__.py"), "wt").close() return result diff --git a/testrepository/tests/test_arguments.py b/testrepository/tests/test_arguments.py index a10175b..3735138 100644 --- a/testrepository/tests/test_arguments.py +++ b/testrepository/tests/test_arguments.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -17,66 +17,67 @@ from testtools.matchers import ( Equals, raises, - ) +) from testrepository import arguments from testrepository.tests import ResourcedTestCase class TestAbstractArgument(ResourcedTestCase): - def test_init_base(self): - arg = arguments.AbstractArgument('name') - self.assertEqual('name', arg.name) - self.assertEqual('name', arg.summary()) + arg = arguments.AbstractArgument("name") + self.assertEqual("name", arg.name) + self.assertEqual("name", arg.summary()) def test_init_optional(self): - arg = arguments.AbstractArgument('name', min=0) + arg = arguments.AbstractArgument("name", min=0) self.assertEqual(0, arg.minimum_count) - self.assertEqual('name?', arg.summary()) + self.assertEqual("name?", arg.summary()) def test_init_repeating(self): - arg = arguments.AbstractArgument('name', max=None) + arg = arguments.AbstractArgument("name", max=None) self.assertEqual(None, arg.maximum_count) - self.assertEqual('name+', arg.summary()) + self.assertEqual("name+", arg.summary()) def test_init_optional_repeating(self): - arg = arguments.AbstractArgument('name', min=0, max=None) + arg = arguments.AbstractArgument("name", min=0, max=None) self.assertEqual(None, arg.maximum_count) - self.assertEqual('name*', arg.summary()) + self.assertEqual("name*", arg.summary()) def test_init_arbitrary(self): - arg = arguments.AbstractArgument('name', max=2) - self.assertEqual('name{1,2}', arg.summary()) + arg = arguments.AbstractArgument("name", max=2) + self.assertEqual("name{1,2}", arg.summary()) def test_init_arbitrary_infinite(self): - arg = arguments.AbstractArgument('name', min=2, max=None) - self.assertEqual('name{2,}', arg.summary()) + arg = arguments.AbstractArgument("name", min=2, max=None) + self.assertEqual("name{2,}", arg.summary()) def test_parsing_calls__parse_one(self): calls = [] + class AnArgument(arguments.AbstractArgument): def _parse_one(self, arg): calls.append(arg) - return ('1', arg) - argument = AnArgument('foo', max=2) - args = ['thing', 'other', 'stranger'] + return ("1", arg) + + argument = AnArgument("foo", max=2) + args = ["thing", "other", "stranger"] # results are returned - self.assertEqual([('1', 'thing'), ('1', 'other')], - argument.parse(args)) + self.assertEqual([("1", "thing"), ("1", "other")], argument.parse(args)) # used args are removed - self.assertEqual(['stranger'], args) + self.assertEqual(["stranger"], args) # parse function was used - self.assertEqual(['thing', 'other'], calls) + self.assertEqual(["thing", "other"], calls) def test_parsing_unlimited(self): class AnArgument(arguments.AbstractArgument): def _parse_one(self, arg): return arg - argument = AnArgument('foo', max=None) - args = ['thing', 'other'] + + argument = AnArgument("foo", max=None) + args = ["thing", "other"] # results are returned - self.assertEqual(['thing', 'other'], argument.parse(args)) + self.assertEqual(["thing", "other"], argument.parse(args)) # used args are removed self.assertEqual([], args) @@ -84,17 +85,19 @@ def test_parsing_too_few(self): class AnArgument(arguments.AbstractArgument): def _parse_one(self, arg): return arg - argument = AnArgument('foo') + + argument = AnArgument("foo") self.assertThat(lambda: argument.parse([]), raises(ValueError)) def test_parsing_optional_not_matching(self): class AnArgument(arguments.AbstractArgument): def _parse_one(self, arg): - raise ValueError('not an argument') - argument = AnArgument('foo', min=0) - args = ['a', 'b'] + raise ValueError("not an argument") + + argument = AnArgument("foo", min=0) + args = ["a", "b"] self.assertThat(argument.parse(args), Equals([])) - self.assertThat(args, Equals(['a', 'b'])) + self.assertThat(args, Equals(["a", "b"])) # No interface tests for now, because the interface we expect is really just diff --git a/testrepository/tests/test_commands.py b/testrepository/tests/test_commands.py index f2248ab..c48525f 100644 --- a/testrepository/tests/test_commands.py +++ b/testrepository/tests/test_commands.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009, 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -23,7 +23,7 @@ IsInstance, MatchesException, raises, - ) +) from testrepository import commands from testrepository.repository import file @@ -31,7 +31,7 @@ from testrepository.tests.monkeypatch import monkeypatch from testrepository.tests.stubpackage import ( StubPackageResource, - ) +) from testrepository.ui import cli, model @@ -40,112 +40,122 @@ class TemporaryCommand(object): class TemporaryCommandResource(TestResource): - def __init__(self, cmd_name): TestResource.__init__(self) - cmd_name = cmd_name.replace('-', '_') - self.resources.append(('pkg', - StubPackageResource('commands', - [('%s.py' % cmd_name, - """from testrepository.commands import Command + cmd_name = cmd_name.replace("-", "_") + self.resources.append( + ( + "pkg", + StubPackageResource( + "commands", + [ + ( + "%s.py" % cmd_name, + """from testrepository.commands import Command class %s(Command): def run(self): pass -""" % cmd_name)], init=False))) +""" + % cmd_name, + ) + ], + init=False, + ), + ) + ) self.cmd_name = cmd_name def make(self, dependency_resources): - pkg = dependency_resources['pkg'] + pkg = dependency_resources["pkg"] result = TemporaryCommand() - result.path = os.path.join(pkg.base, 'commands') + result.path = os.path.join(pkg.base, "commands") commands.__path__.append(result.path) return result def clean(self, resource): commands.__path__.remove(resource.path) - name = 'testrepository.commands.%s' % self.cmd_name + name = "testrepository.commands.%s" % self.cmd_name if name in sys.modules: del sys.modules[name] class TestFindCommand(ResourcedTestCase): - - resources = [('cmd', TemporaryCommandResource('foo'))] + resources = [("cmd", TemporaryCommandResource("foo"))] def test_looksupcommand(self): - cmd = commands._find_command('foo') + cmd = commands._find_command("foo") self.assertIsInstance(cmd(None), commands.Command) def test_missing_command(self): - self.assertThat(lambda: commands._find_command('bar'), - raises(KeyError)) + self.assertThat(lambda: commands._find_command("bar"), raises(KeyError)) def test_sets_name(self): - cmd = commands._find_command('foo') - self.assertEqual('foo', cmd.name) + cmd = commands._find_command("foo") + self.assertEqual("foo", cmd.name) class TestNameMangling(ResourcedTestCase): - - resources = [('cmd', TemporaryCommandResource('foo-bar'))] + resources = [("cmd", TemporaryCommandResource("foo-bar"))] def test_looksupcommand(self): - cmd = commands._find_command('foo-bar') + cmd = commands._find_command("foo-bar") self.assertIsInstance(cmd(None), commands.Command) def test_sets_name(self): - cmd = commands._find_command('foo-bar') + cmd = commands._find_command("foo-bar") # The name is preserved, so that 'testr commands' shows something # sensible. - self.assertEqual('foo-bar', cmd.name) + self.assertEqual("foo-bar", cmd.name) class TestIterCommands(ResourcedTestCase): - resources = [ - ('cmd1', TemporaryCommandResource('one')), - ('cmd2', TemporaryCommandResource('two')), - ] + ("cmd1", TemporaryCommandResource("one")), + ("cmd2", TemporaryCommandResource("two")), + ] def test_iter_commands(self): cmds = list(commands.iter_commands()) cmds = [cmd(None).name for cmd in cmds] # We don't care about all the built in commands - cmds = [cmd for cmd in cmds if cmd in ('one', 'two')] - self.assertEqual(['one', 'two'], cmds) + cmds = [cmd for cmd in cmds if cmd in ("one", "two")] + self.assertEqual(["one", "two"], cmds) class TestRunArgv(ResourcedTestCase): - def stub__find_command(self, cmd_run): self.calls = [] - self.addCleanup(monkeypatch('testrepository.commands._find_command', - self._find_command)) + self.addCleanup( + monkeypatch("testrepository.commands._find_command", self._find_command) + ) self.cmd_run = cmd_run def _find_command(self, cmd_name): self.calls.append(cmd_name) real_run = self.cmd_run + class SampleCommand(commands.Command): """A command that is used for testing.""" + def execute(self): return real_run(self) + return SampleCommand def test_looks_up_cmd(self): - self.stub__find_command(lambda x:0) - commands.run_argv(['testr', 'foo'], 'in', 'out', 'err') - self.assertEqual(['foo'], self.calls) + self.stub__find_command(lambda x: 0) + commands.run_argv(["testr", "foo"], "in", "out", "err") + self.assertEqual(["foo"], self.calls) def test_looks_up_cmd_skips_options(self): - self.stub__find_command(lambda x:0) - commands.run_argv(['testr', '--version', 'foo'], 'in', 'out', 'err') - self.assertEqual(['foo'], self.calls) + self.stub__find_command(lambda x: 0) + commands.run_argv(["testr", "--version", "foo"], "in", "out", "err") + self.assertEqual(["foo"], self.calls) def test_no_cmd_issues_help(self): - self.stub__find_command(lambda x:0) - commands.run_argv(['testr', '--version'], 'in', 'out', 'err') - self.assertEqual(['help'], self.calls) + self.stub__find_command(lambda x: 0) + commands.run_argv(["testr", "--version"], "in", "out", "err") + self.assertEqual(["help"], self.calls) def capture_ui(self, cmd): self.ui = cmd.ui @@ -153,23 +163,20 @@ def capture_ui(self, cmd): def test_runs_cmd_with_CLI_UI(self): self.stub__find_command(self.capture_ui) - commands.run_argv(['testr', '--version', 'foo'], 'in', 'out', 'err') - self.assertEqual(['foo'], self.calls) + commands.run_argv(["testr", "--version", "foo"], "in", "out", "err") + self.assertEqual(["foo"], self.calls) self.assertIsInstance(self.ui, cli.UI) def test_returns_0_when_None_returned_from_execute(self): - self.stub__find_command(lambda x:None) - self.assertEqual(0, commands.run_argv(['testr', 'foo'], 'in', 'out', - 'err')) + self.stub__find_command(lambda x: None) + self.assertEqual(0, commands.run_argv(["testr", "foo"], "in", "out", "err")) def test_returns_execute_result(self): - self.stub__find_command(lambda x:1) - self.assertEqual(1, commands.run_argv(['testr', 'foo'], 'in', 'out', - 'err')) + self.stub__find_command(lambda x: 1) + self.assertEqual(1, commands.run_argv(["testr", "foo"], "in", "out", "err")) class TestGetCommandParser(ResourcedTestCase): - def test_trivial(self): cmd = InstrumentedCommand(model.UI()) parser = commands.get_command_parser(cmd) @@ -178,7 +185,7 @@ def test_trivial(self): class InstrumentedCommand(commands.Command): """A command which records methods called on it. - + The first line is the summary. """ @@ -186,19 +193,18 @@ def _init(self): self.calls = [] def execute(self): - self.calls.append('execute') + self.calls.append("execute") return commands.Command.execute(self) def run(self): - self.calls.append('run') + self.calls.append("run") class TestAbstractCommand(ResourcedTestCase): - def test_execute_calls_run(self): cmd = InstrumentedCommand(model.UI()) self.assertEqual(0, cmd.execute()) - self.assertEqual(['execute', 'run'], cmd.calls) + self.assertEqual(["execute", "run"], cmd.calls) def test_execute_calls_set_command(self): ui = model.UI() @@ -210,6 +216,7 @@ def test_execute_does_not_run_if_set_command_errors(self): class FailUI(object): def set_command(self, ui): return False + cmd = InstrumentedCommand(FailUI()) self.assertEqual(1, cmd.execute()) @@ -217,12 +224,13 @@ def test_shows_errors_from_execute_returns_3(self): class FailCommand(commands.Command): def run(self): raise Exception("foo") + ui = model.UI() cmd = FailCommand(ui) self.assertEqual(3, cmd.execute()) self.assertEqual(1, len(ui.outputs)) - self.assertEqual('error', ui.outputs[0][0]) - self.assertThat(ui.outputs[0][1], MatchesException(Exception('foo'))) + self.assertEqual("error", ui.outputs[0][0]) + self.assertThat(ui.outputs[0][1], MatchesException(Exception("foo"))) def test_default_repository_factory(self): cmd = commands.Command(model.UI()) @@ -230,5 +238,6 @@ def test_default_repository_factory(self): def test_get_summary(self): cmd = InstrumentedCommand - self.assertEqual('A command which records methods called on it.', - cmd.get_summary()) + self.assertEqual( + "A command which records methods called on it.", cmd.get_summary() + ) diff --git a/testrepository/tests/test_matchers.py b/testrepository/tests/test_matchers.py index b97b50c..937f8b7 100644 --- a/testrepository/tests/test_matchers.py +++ b/testrepository/tests/test_matchers.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -14,21 +14,20 @@ """Tests for matchers used by or for testing testrepository.""" -import sys from testtools import TestCase class TestWildcard(TestCase): - def test_wildcard_equals_everything(self): from testrepository.tests import Wildcard + self.assertTrue(Wildcard == 5) - self.assertTrue(Wildcard == 'orange') - self.assertTrue('orange' == Wildcard) + self.assertTrue(Wildcard == "orange") + self.assertTrue("orange" == Wildcard) self.assertTrue(5 == Wildcard) def test_wildcard_not_equals_nothing(self): from testrepository.tests import Wildcard - self.assertFalse(Wildcard != 5) - self.assertFalse(Wildcard != 'orange') + self.assertFalse(Wildcard != 5) + self.assertFalse(Wildcard != "orange") diff --git a/testrepository/tests/test_monkeypatch.py b/testrepository/tests/test_monkeypatch.py index e0c5a0e..9f59c83 100644 --- a/testrepository/tests/test_monkeypatch.py +++ b/testrepository/tests/test_monkeypatch.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -19,11 +19,10 @@ reference = 23 -class TestMonkeyPatch(ResourcedTestCase): +class TestMonkeyPatch(ResourcedTestCase): def test_patch_and_restore(self): - cleanup = monkeypatch( - 'testrepository.tests.test_monkeypatch.reference', 45) + cleanup = monkeypatch("testrepository.tests.test_monkeypatch.reference", 45) self.assertEqual(45, reference) cleanup() self.assertEqual(23, reference) diff --git a/testrepository/tests/test_repository.py b/testrepository/tests/test_repository.py index cf3b733..33cda0b 100644 --- a/testrepository/tests/test_repository.py +++ b/testrepository/tests/test_repository.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009, 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -17,36 +17,35 @@ from datetime import ( datetime, timedelta, - ) -import doctest +) import iso8601 from subunit import ( v2, - ) +) from testresources import TestResource from testtools import ( clone_test_with_new_id, content, PlaceHolder, - ) +) import testtools from testtools.testresult.doubles import ( ExtendedTestResult, StreamResult, - ) -from testtools.matchers import DocTestMatches, raises +) +from testtools.matchers import raises from testrepository import repository from testrepository.repository import file, memory from testrepository.tests import ( ResourcedTestCase, Wildcard, - ) +) from testrepository.tests.stubpackage import ( TempDirResource, - ) +) class RecordingRepositoryFactory(object): @@ -57,16 +56,15 @@ def __init__(self, calls, decorated): self.factory = decorated def initialise(self, url): - self.calls.append(('initialise', url)) + self.calls.append(("initialise", url)) return self.factory.initialise(url) def open(self, url): - self.calls.append(('open', url)) + self.calls.append(("open", url)) return self.factory.open(url) class DirtyTempDirResource(TempDirResource): - def __init__(self): TempDirResource.__init__(self) self._dirty = True @@ -81,20 +79,27 @@ def _setResource(self, new_resource): class MemoryRepositoryFactoryResource(TestResource): - def make(self, dependency_resources): return memory.RepositoryFactory() # what repository implementations do we need to test? repo_implementations = [ - ('file', {'repo_impl': file.RepositoryFactory(), - 'resources': [('sample_url', DirtyTempDirResource())] - }), - ('memory', { - 'resources': [('repo_impl', MemoryRepositoryFactoryResource())], - 'sample_url': 'memory:'}), - ] + ( + "file", + { + "repo_impl": file.RepositoryFactory(), + "resources": [("sample_url", DirtyTempDirResource())], + }, + ), + ( + "memory", + { + "resources": [("repo_impl", MemoryRepositoryFactoryResource())], + "sample_url": "memory:", + }, + ), +] class Case(ResourcedTestCase): @@ -108,11 +113,9 @@ def unexpected_success(self): class FailingCase: - def run(self, result): result.startTest(self) - result.addError( - self, None, details={'traceback': content.text_content("tb")}) + result.addError(self, None, details={"traceback": content.text_content("tb")}) result.stopTest(self) @@ -127,30 +130,32 @@ def make_test(id, should_pass): def run_timed(id, duration, result, enumeration=False): """Make and run a test taking duration seconds. - + :param enumeration: If True, don't run, just enumerate. """ start = datetime.now(tz=iso8601.UTC) if enumeration: - result.status(test_id=id, test_status='exists', timestamp=start) + result.status(test_id=id, test_status="exists", timestamp=start) else: - result.status(test_id=id, test_status='inprogress', timestamp=start) - result.status(test_id=id, test_status='success', - timestamp=start + timedelta(seconds=duration)) + result.status(test_id=id, test_status="inprogress", timestamp=start) + result.status( + test_id=id, + test_status="success", + timestamp=start + timedelta(seconds=duration), + ) class TestRepositoryErrors(ResourcedTestCase): - def test_not_found(self): - url = 'doesntexistatall' + url = "doesntexistatall" error = repository.RepositoryNotFound(url) self.assertEqual( - 'No repository found in %s. Create one by running "testr init".' - % url, str(error)) + 'No repository found in %s. Create one by running "testr init".' % url, + str(error), + ) class TestRepositoryContract(ResourcedTestCase): - scenarios = repo_implementations def get_failing(self, repo): @@ -187,10 +192,12 @@ def test_can_get_inserter(self): def test_insert_stream_smoke(self): # We can insert some data into the repository. repo = self.repo_impl.initialise(self.sample_url) + class Case(ResourcedTestCase): def method(self): pass - case = Case('method') + + case = Case("method") result = repo.get_inserter() legacy_result = testtools.ExtendedToStreamDecorator(result) legacy_result.startTestRun() @@ -204,9 +211,10 @@ def test_open(self): self.repo_impl.open(self.sample_url) def test_open_non_existent(self): - url = 'doesntexistatall' - self.assertThat(lambda: self.repo_impl.open(url), - raises(repository.RepositoryNotFound(url))) + url = "doesntexistatall" + self.assertThat( + lambda: self.repo_impl.open(url), raises(repository.RepositoryNotFound(url)) + ) def test_inserting_creates_id(self): # When inserting a stream, an id is returned from stopTestRun. @@ -230,8 +238,7 @@ def test_count(self): def test_latest_id_empty(self): repo = self.repo_impl.initialise(self.sample_url) - self.assertThat(repo.latest_id, - raises(KeyError("No tests in repository"))) + self.assertThat(repo.latest_id, raises(KeyError("No tests in repository"))) def test_latest_id_nonempty(self): repo = self.repo_impl.initialise(self.sample_url) @@ -253,13 +260,13 @@ def test_get_failing_one_run(self): result = repo.get_inserter() legacy_result = testtools.ExtendedToStreamDecorator(result) legacy_result.startTestRun() - make_test('passing', True).run(legacy_result) - make_test('failing', False).run(legacy_result) + make_test("passing", True).run(legacy_result) + make_test("failing", False).run(legacy_result) legacy_result.stopTestRun() analyzed = self.get_failing(repo) self.assertEqual(1, analyzed.testsRun) self.assertEqual(1, len(analyzed.errors)) - self.assertEqual('failing', analyzed.errors[0][0].id()) + self.assertEqual("failing", analyzed.errors[0][0].id()) def test_unexpected_success(self): # Unexpected successes get forwarded too. (Test added because of a @@ -268,13 +275,13 @@ def test_unexpected_success(self): result = repo.get_inserter() legacy_result = testtools.ExtendedToStreamDecorator(result) legacy_result.startTestRun() - test = clone_test_with_new_id(Case('unexpected_success'), 'unexpected_success') + test = clone_test_with_new_id(Case("unexpected_success"), "unexpected_success") test.run(legacy_result) legacy_result.stopTestRun() analyzed = self.get_last_run(repo) self.assertEqual(1, analyzed.testsRun) self.assertEqual(1, len(analyzed.unexpectedSuccesses)) - self.assertEqual('unexpected_success', analyzed.unexpectedSuccesses[0].id()) + self.assertEqual("unexpected_success", analyzed.unexpectedSuccesses[0].id()) def test_get_failing_complete_runs_delete_missing_failures(self): # failures from complete runs replace all failures. @@ -282,20 +289,20 @@ def test_get_failing_complete_runs_delete_missing_failures(self): result = repo.get_inserter() legacy_result = testtools.ExtendedToStreamDecorator(result) legacy_result.startTestRun() - make_test('passing', True).run(legacy_result) - make_test('failing', False).run(legacy_result) - make_test('missing', False).run(legacy_result) + make_test("passing", True).run(legacy_result) + make_test("failing", False).run(legacy_result) + make_test("missing", False).run(legacy_result) legacy_result.stopTestRun() result = repo.get_inserter() legacy_result = testtools.ExtendedToStreamDecorator(result) legacy_result.startTestRun() - make_test('passing', False).run(legacy_result) - make_test('failing', True).run(legacy_result) + make_test("passing", False).run(legacy_result) + make_test("failing", True).run(legacy_result) legacy_result.stopTestRun() analyzed = self.get_failing(repo) self.assertEqual(1, analyzed.testsRun) self.assertEqual(1, len(analyzed.errors)) - self.assertEqual('passing', analyzed.errors[0][0].id()) + self.assertEqual("passing", analyzed.errors[0][0].id()) def test_get_failing_partial_runs_preserve_missing_failures(self): # failures from two runs add to existing failures, and successes remove @@ -304,21 +311,22 @@ def test_get_failing_partial_runs_preserve_missing_failures(self): result = repo.get_inserter() legacy_result = testtools.ExtendedToStreamDecorator(result) legacy_result.startTestRun() - make_test('passing', True).run(legacy_result) - make_test('failing', False).run(legacy_result) - make_test('missing', False).run(legacy_result) + make_test("passing", True).run(legacy_result) + make_test("failing", False).run(legacy_result) + make_test("missing", False).run(legacy_result) legacy_result.stopTestRun() result = repo.get_inserter(partial=True) legacy_result = testtools.ExtendedToStreamDecorator(result) legacy_result.startTestRun() - make_test('passing', False).run(legacy_result) - make_test('failing', True).run(legacy_result) + make_test("passing", False).run(legacy_result) + make_test("failing", True).run(legacy_result) legacy_result.stopTestRun() analyzed = self.get_failing(repo) self.assertEqual(2, analyzed.testsRun) self.assertEqual(2, len(analyzed.errors)) - self.assertEqual(set(['passing', 'missing']), - set([test[0].id() for test in analyzed.errors])) + self.assertEqual( + set(["passing", "missing"]), set([test[0].id() for test in analyzed.errors]) + ) def test_get_test_run_missing_keyerror(self): repo = self.repo_impl.initialise(self.sample_url) @@ -326,8 +334,7 @@ def test_get_test_run_missing_keyerror(self): result.startTestRun() result.stopTestRun() inserted = result.get_id() - self.assertThat(lambda:repo.get_test_run(inserted - 1), - raises(KeyError)) + self.assertThat(lambda: repo.get_test_run(inserted - 1), raises(KeyError)) def test_get_test_run(self): repo = self.repo_impl.initialise(self.sample_url) @@ -358,31 +365,35 @@ def test_get_test_run_get_id(self): self.assertEqual(inserted, run.get_id()) def test_get_test_run_preserves_time(self): - self.skip('Fix me before releasing.') + self.skip("Fix me before releasing.") # The test run outputs the time events that it received. now = datetime(2001, 1, 1, 0, 0, 0, tzinfo=iso8601.UTC) second = timedelta(seconds=1) repo = self.repo_impl.initialise(self.sample_url) test_id = self.getUniqueString() - test = make_test(test_id, True) + make_test(test_id, True) result = repo.get_inserter() result.startTestRun() - result.status(timestamp=now, test_id=test_id, test_status='inprogress') - result.status(timestamp=(now + 1 * second), test_id=test_id, test_status='success') + result.status(timestamp=now, test_id=test_id, test_status="inprogress") + result.status( + timestamp=(now + 1 * second), test_id=test_id, test_status="success" + ) inserted = result.stopTestRun() run = repo.get_test_run(inserted) result = ExtendedTestResult() run.get_test().run(result) self.assertEqual( - [('time', now), - ('tags', set(), set()), - ('startTest', Wildcard), - ('time', now + 1 * second), - ('addSuccess', Wildcard), - ('stopTest', Wildcard), - ('tags', set(), set()), - ], - result._events) + [ + ("time", now), + ("tags", set(), set()), + ("startTest", Wildcard), + ("time", now + 1 * second), + ("addSuccess", Wildcard), + ("stopTest", Wildcard), + ("tags", set(), set()), + ], + result._events, + ) def test_get_failing_get_id(self): repo = self.repo_impl.initialise(self.sample_url) @@ -397,7 +408,9 @@ def test_get_failing_get_subunit_stream(self): result = repo.get_inserter() legacy_result = testtools.ExtendedToStreamDecorator(result) legacy_result.startTestRun() - make_test('testrepository.tests.test_repository.Case.method', False).run(legacy_result) + make_test("testrepository.tests.test_repository.Case.method", False).run( + legacy_result + ) legacy_result.stopTestRun() run = repo.get_failing() as_subunit = run.get_subunit_stream() @@ -409,50 +422,60 @@ def test_get_failing_get_subunit_stream(self): finally: log.stopTestRun() self.assertEqual( - [tuple(ev) for ev in log._events], [ - ('startTestRun',), - ('status', - 'testrepository.tests.test_repository.Case.method', - 'inprogress', - None, - True, - None, - None, - False, - None, - None, - Wildcard), - ('status', - 'testrepository.tests.test_repository.Case.method', - None, - None, - True, - 'traceback', - Wildcard, - True, - Wildcard, - None, - Wildcard), - ('status', - 'testrepository.tests.test_repository.Case.method', - 'fail', - None, - True, - None, - None, - False, - None, - None, - Wildcard), - ('stopTestRun',) - ]) + [tuple(ev) for ev in log._events], + [ + ("startTestRun",), + ( + "status", + "testrepository.tests.test_repository.Case.method", + "inprogress", + None, + True, + None, + None, + False, + None, + None, + Wildcard, + ), + ( + "status", + "testrepository.tests.test_repository.Case.method", + None, + None, + True, + "traceback", + Wildcard, + True, + Wildcard, + None, + Wildcard, + ), + ( + "status", + "testrepository.tests.test_repository.Case.method", + "fail", + None, + True, + None, + None, + False, + None, + None, + Wildcard, + ), + ("stopTestRun",), + ], + ) def test_get_subunit_from_test_run(self): repo = self.repo_impl.initialise(self.sample_url) result = repo.get_inserter() legacy_result = testtools.ExtendedToStreamDecorator(result) legacy_result.startTestRun() - make_test('testrepository.tests.test_repository.Case.method', True).run(legacy_result) + make_test("testrepository.tests.test_repository.Case.method", True).run( + legacy_result + ) legacy_result.stopTestRun() inserted = result.get_id() run = repo.get_test_run(inserted) @@ -467,38 +490,45 @@ def test_get_subunit_from_test_run(self): self.assertEqual( [tuple(ev) for ev in log._events], [ - ('startTestRun',), - ('status', - 'testrepository.tests.test_repository.Case.method', - 'inprogress', - None, - True, - None, - None, - False, - None, - None, - Wildcard), - ('status', - 'testrepository.tests.test_repository.Case.method', - 'success', - None, - True, - None, - None, - False, - None, - None, - Wildcard), - ('stopTestRun',) - ]) + ("startTestRun",), + ( + "status", + "testrepository.tests.test_repository.Case.method", + "inprogress", + None, + True, + None, + None, + False, + None, + None, + Wildcard, + ), + ( + "status", + "testrepository.tests.test_repository.Case.method", + "success", + None, + True, + None, + None, + False, + None, + None, + Wildcard, + ), + ("stopTestRun",), + ], + ) def test_get_test_from_test_run(self): repo = self.repo_impl.initialise(self.sample_url) result = repo.get_inserter() legacy_result = testtools.ExtendedToStreamDecorator(result) legacy_result.startTestRun() - make_test('testrepository.tests.test_repository.Case.method', True).run(legacy_result) + make_test("testrepository.tests.test_repository.Case.method", True).run( + legacy_result + ) legacy_result.stopTestRun() inserted = result.get_id() run = repo.get_test_run(inserted) @@ -513,35 +543,33 @@ def test_get_test_from_test_run(self): def test_get_times_unknown_tests_are_unknown(self): repo = self.repo_impl.initialise(self.sample_url) - test_ids = set(['foo', 'bar']) - self.assertEqual(test_ids, repo.get_test_times(test_ids)['unknown']) + test_ids = set(["foo", "bar"]) + self.assertEqual(test_ids, repo.get_test_times(test_ids)["unknown"]) def test_inserted_test_times_known(self): repo = self.repo_impl.initialise(self.sample_url) result = repo.get_inserter() legacy_result = testtools.ExtendedToStreamDecorator(result) legacy_result.startTestRun() - test_name = 'testrepository.tests.test_repository.Case.method' + test_name = "testrepository.tests.test_repository.Case.method" run_timed(test_name, 0.1, legacy_result) legacy_result.stopTestRun() - self.assertEqual({test_name: 0.1}, - repo.get_test_times([test_name])['known']) + self.assertEqual({test_name: 0.1}, repo.get_test_times([test_name])["known"]) def test_inserted_exists_no_impact_on_test_times(self): repo = self.repo_impl.initialise(self.sample_url) result = repo.get_inserter() legacy_result = testtools.ExtendedToStreamDecorator(result) legacy_result.startTestRun() - test_name = 'testrepository.tests.test_repository.Case.method' + test_name = "testrepository.tests.test_repository.Case.method" run_timed(test_name, 0.1, legacy_result) legacy_result.stopTestRun() result = repo.get_inserter() result.startTestRun() - test_name = 'testrepository.tests.test_repository.Case.method' + test_name = "testrepository.tests.test_repository.Case.method" run_timed(test_name, 0.2, result, True) result.stopTestRun() - self.assertEqual({test_name: 0.1}, - repo.get_test_times([test_name])['known']) + self.assertEqual({test_name: 0.1}, repo.get_test_times([test_name])["known"]) def test_get_test_ids(self): repo = self.repo_impl.initialise(self.sample_url) diff --git a/testrepository/tests/test_results.py b/testrepository/tests/test_results.py index a92f8f1..e13d572 100644 --- a/testrepository/tests/test_results.py +++ b/testrepository/tests/test_results.py @@ -15,7 +15,7 @@ from datetime import ( datetime, timedelta, - ) +) import sys from testtools import TestCase @@ -24,7 +24,6 @@ class TestSummarizingResult(TestCase): - def test_empty(self): result = SummarizingResult() result.startTestRun() @@ -46,11 +45,11 @@ def test_num_failures(self): result = SummarizingResult() result.startTestRun() try: - 1/0 + 1 / 0 except ZeroDivisionError: - error = sys.exc_info() - result.status(test_id='foo', test_status='fail') - result.status(test_id='foo', test_status='fail') + sys.exc_info() + result.status(test_id="foo", test_status="fail") + result.status(test_id="foo", test_status="fail") result.stopTestRun() self.assertEqual(2, result.get_num_failures()) @@ -58,6 +57,6 @@ def test_tests_run(self): result = SummarizingResult() result.startTestRun() for i in range(5): - result.status(test_id='foo', test_status='success') + result.status(test_id="foo", test_status="success") result.stopTestRun() self.assertEqual(5, result.testsRun) diff --git a/testrepository/tests/test_setup.py b/testrepository/tests/test_setup.py index ecefb8e..b252574 100644 --- a/testrepository/tests/test_setup.py +++ b/testrepository/tests/test_setup.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -21,32 +21,46 @@ from testtools import ( TestCase, - ) +) from testtools.matchers import ( DocTestMatches, MatchesAny, - ) +) -class TestCanSetup(TestCase): +class TestCanSetup(TestCase): def test_bdist(self): # Single smoke test to make sure we can build a package. - path = os.path.join(os.path.dirname(__file__), '..', '..', 'setup.py') - proc = subprocess.Popen([sys.executable, path, 'bdist'], - stdin=subprocess.PIPE, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, universal_newlines=True) + path = os.path.join(os.path.dirname(__file__), "..", "..", "setup.py") + proc = subprocess.Popen( + [sys.executable, path, "bdist"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) output, err = proc.communicate() - self.assertThat(output, MatchesAny( - # win32 - DocTestMatches("""... + self.assertThat( + output, + MatchesAny( + # win32 + DocTestMatches( + """... running install_scripts ... Installing testr script... -...""", doctest.ELLIPSIS), - # unixen - DocTestMatches("""... +...""", + doctest.ELLIPSIS, + ), + # unixen + DocTestMatches( + """... Installing testr script to build/.../bin -...""", doctest.ELLIPSIS) - )) - self.assertEqual(0, proc.returncode, - "Setup failed out=%r err=%r" % (output, err)) +...""", + doctest.ELLIPSIS, + ), + ), + ) + self.assertEqual( + 0, proc.returncode, "Setup failed out=%r err=%r" % (output, err) + ) diff --git a/testrepository/tests/test_stubpackage.py b/testrepository/tests/test_stubpackage.py index 7374465..bae41e8 100644 --- a/testrepository/tests/test_stubpackage.py +++ b/testrepository/tests/test_stubpackage.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -20,31 +20,27 @@ from testrepository.tests.stubpackage import ( StubPackageResource, TempDirResource, - ) +) class TestStubPackageResource(ResourcedTestCase): - def test_has_tempdir(self): - resource = StubPackageResource('foo', []) + resource = StubPackageResource("foo", []) self.assertEqual(1, len(resource.resources)) self.assertIsInstance(resource.resources[0][1], TempDirResource) def test_writes_package(self): - resource = StubPackageResource('foo', [('bar.py', 'woo')]) + resource = StubPackageResource("foo", [("bar.py", "woo")]) pkg = resource.getResource() self.addCleanup(resource.finishedWith, pkg) - self.assertEqual('', open(os.path.join(pkg.base, 'foo', - '__init__.py')).read()) - self.assertEqual('woo', open(os.path.join(pkg.base, 'foo', - 'bar.py')).read()) + self.assertEqual("", open(os.path.join(pkg.base, "foo", "__init__.py")).read()) + self.assertEqual("woo", open(os.path.join(pkg.base, "foo", "bar.py")).read()) def test_no__init__(self): - resource = StubPackageResource('foo', [('bar.py', 'woo')], init=False) + resource = StubPackageResource("foo", [("bar.py", "woo")], init=False) pkg = resource.getResource() self.addCleanup(resource.finishedWith, pkg) - self.assertFalse(os.path.exists(os.path.join(pkg.base, 'foo', - '__init__.py'))) + self.assertFalse(os.path.exists(os.path.join(pkg.base, "foo", "__init__.py"))) class TestTempDirResource(ResourcedTestCase): diff --git a/testrepository/tests/test_testcommand.py b/testrepository/tests/test_testcommand.py index 050e6cd..3362b49 100644 --- a/testrepository/tests/test_testcommand.py +++ b/testrepository/tests/test_testcommand.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -23,30 +23,26 @@ from testtools.matchers import ( Equals, MatchesAny, - MatchesException, raises, - ) -from testtools.testresult.doubles import ExtendedTestResult +) from testrepository.commands import run from testrepository.ui.model import UI from testrepository.repository import memory from testrepository.testcommand import TestCommand -from testrepository.tests import ResourcedTestCase, Wildcard +from testrepository.tests import ResourcedTestCase from testrepository.tests.stubpackage import TempDirResource from testrepository.tests.test_repository import run_timed class FakeTestCommand(TestCommand): - def __init__(self, ui, repo): TestCommand.__init__(self, ui, repo) self.oldschool = True class TestTestCommand(ResourcedTestCase): - - resources = [('tempdir', TempDirResource())] + resources = [("tempdir", TempDirResource())] def get_test_ui_and_cmd(self, options=(), args=(), repository=None): self.dirty() @@ -64,13 +60,13 @@ def get_test_ui_and_cmd2(self, options=(), args=()): def dirty(self): # Ugly: TODO - improve testresources to make this go away. - dict(self.resources)['tempdir']._dirty = True + dict(self.resources)["tempdir"]._dirty = True def config_path(self): - return os.path.join(self.tempdir, '.testr.conf') + return os.path.join(self.tempdir, ".testr.conf") def set_config(self, text): - stream = open(self.config_path(), 'wt') + stream = open(self.config_path(), "wt") try: stream.write(text) finally: @@ -94,7 +90,7 @@ def test_TestCommand_get_run_command_outside_setUp_fails(self): ui = UI() ui.here = self.tempdir command = TestCommand(ui, None) - self.set_config('[DEFAULT]\ntest_command=foo\n') + self.set_config("[DEFAULT]\ntest_command=foo\n") self.assertThat(command.get_run_command, raises(TypeError)) command.setUp() command.cleanUp() @@ -103,48 +99,57 @@ def test_TestCommand_get_run_command_outside_setUp_fails(self): def test_TestCommand_cleanUp_disposes_instances(self): ui, command = self.get_test_ui_and_cmd() self.set_config( - '[DEFAULT]\ntest_command=foo\n' - 'instance_dispose=bar $INSTANCE_IDS\n') - command._instances.update([b'baz', b'quux']) + "[DEFAULT]\ntest_command=foo\ninstance_dispose=bar $INSTANCE_IDS\n" + ) + command._instances.update([b"baz", b"quux"]) command.cleanUp() command.setUp() - self.assertEqual([ - ('values', [('running', 'bar baz quux')]), - ('popen', ('bar baz quux',), {'shell': True}), - ('communicate',)], ui.outputs) + self.assertEqual( + [ + ("values", [("running", "bar baz quux")]), + ("popen", ("bar baz quux",), {"shell": True}), + ("communicate",), + ], + ui.outputs, + ) def test_TestCommand_cleanUp_disposes_instances_fail_raises(self): ui, command = self.get_test_ui_and_cmd() ui.proc_results = [1] self.set_config( - '[DEFAULT]\ntest_command=foo\n' - 'instance_dispose=bar $INSTANCE_IDS\n') - command._instances.update([b'baz', b'quux']) - self.assertThat(command.cleanUp, - raises(ValueError('Disposing of instances failed, return 1'))) + "[DEFAULT]\ntest_command=foo\ninstance_dispose=bar $INSTANCE_IDS\n" + ) + command._instances.update([b"baz", b"quux"]) + self.assertThat( + command.cleanUp, + raises(ValueError("Disposing of instances failed, return 1")), + ) command.setUp() def test_get_run_command_no_config_file_errors(self): ui, command = self.get_test_ui_and_cmd() - self.assertThat(command.get_run_command, - raises(ValueError('No .testr.conf config file'))) + self.assertThat( + command.get_run_command, raises(ValueError("No .testr.conf config file")) + ) def test_get_run_command_no_config_settings_errors(self): ui, command = self.get_test_ui_and_cmd() - self.set_config('') - self.assertThat(command.get_run_command, - raises(ValueError( - 'No test_command option present in .testr.conf'))) + self.set_config("") + self.assertThat( + command.get_run_command, + raises(ValueError("No test_command option present in .testr.conf")), + ) def test_get_run_command_returns_fixture_makes_IDFILE(self): ui, command = self.get_test_ui_and_cmd() self.set_config( - '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n') - fixture = command.get_run_command(['failing', 'alsofailing']) + "[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n" + ) + fixture = command.get_run_command(["failing", "alsofailing"]) try: fixture.setUp() list_file_path = fixture.list_file_name - source = open(list_file_path, 'rt') + source = open(list_file_path, "rt") try: list_file_content = source.read() finally: @@ -157,81 +162,81 @@ def test_get_run_command_returns_fixture_makes_IDFILE(self): def test_get_run_command_IDFILE_variable_setting(self): ui, command = self.get_test_ui_and_cmd() self.set_config( - '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n') - fixture = self.useFixture( - command.get_run_command(['failing', 'alsofailing'])) - expected_cmd = 'foo --load-list %s' % fixture.list_file_name + "[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n" + ) + fixture = self.useFixture(command.get_run_command(["failing", "alsofailing"])) + expected_cmd = "foo --load-list %s" % fixture.list_file_name self.assertEqual(expected_cmd, fixture.cmd) def test_get_run_command_IDLIST_variable_setting(self): ui, command = self.get_test_ui_and_cmd() - self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST\n') - fixture = self.useFixture( - command.get_run_command(['failing', 'alsofailing'])) - expected_cmd = 'foo failing alsofailing' + self.set_config("[DEFAULT]\ntest_command=foo $IDLIST\n") + fixture = self.useFixture(command.get_run_command(["failing", "alsofailing"])) + expected_cmd = "foo failing alsofailing" self.assertEqual(expected_cmd, fixture.cmd) def test_get_run_command_IDLIST_default_is_empty(self): ui, command = self.get_test_ui_and_cmd() - self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST\n') + self.set_config("[DEFAULT]\ntest_command=foo $IDLIST\n") fixture = self.useFixture(command.get_run_command()) - expected_cmd = 'foo ' + expected_cmd = "foo " self.assertEqual(expected_cmd, fixture.cmd) def test_get_run_command_default_and_list_expands(self): ui, command = self.get_test_ui_and_cmd() buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) - stream.status(test_id='returned', test_status='exists') - stream.status(test_id='ids', test_status='exists') + stream.status(test_id="returned", test_status="exists") + stream.status(test_id="ids", test_status="exists") subunit_bytes = buffer.getvalue() ui.proc_outputs = [subunit_bytes] ui.options = optparse.Values() ui.options.parallel = True ui.options.concurrency = 2 self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n' - 'test_id_list_default=whoo yea\n' - 'test_list_option=--list\n') + "[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n" + "test_id_list_default=whoo yea\n" + "test_list_option=--list\n" + ) fixture = self.useFixture(command.get_run_command()) - expected_cmd = 'foo returned ids ' + expected_cmd = "foo returned ids " self.assertEqual(expected_cmd, fixture.cmd) def test_get_run_command_IDLIST_default_passed_normally(self): ui, command = self.get_test_ui_and_cmd() self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST\ntest_id_list_default=whoo yea\n') + "[DEFAULT]\ntest_command=foo $IDLIST\ntest_id_list_default=whoo yea\n" + ) fixture = self.useFixture(command.get_run_command()) - expected_cmd = 'foo whoo yea' + expected_cmd = "foo whoo yea" self.assertEqual(expected_cmd, fixture.cmd) def test_IDOPTION_evalutes_empty_string_no_ids(self): ui, command = self.get_test_ui_and_cmd() self.set_config( - '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n') + "[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n" + ) fixture = self.useFixture(command.get_run_command()) - expected_cmd = 'foo ' + expected_cmd = "foo " self.assertEqual(expected_cmd, fixture.cmd) def test_group_regex_option(self): ui, command = self.get_test_ui_and_cmd() self.set_config( - '[DEFAULT]\ntest_command=foo $IDOPTION\n' - 'test_id_option=--load-list $IDFILE\n' - 'group_regex=([^\\.]+\\.)+\n') + "[DEFAULT]\ntest_command=foo $IDOPTION\n" + "test_id_option=--load-list $IDFILE\n" + "group_regex=([^\\.]+\\.)+\n" + ) fixture = self.useFixture(command.get_run_command()) - self.assertEqual( - 'pkg.class.', fixture._group_callback('pkg.class.test_method')) + self.assertEqual("pkg.class.", fixture._group_callback("pkg.class.test_method")) def test_extra_args_passed_in(self): ui, command = self.get_test_ui_and_cmd() self.set_config( - '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n') - fixture = self.useFixture(command.get_run_command( - testargs=('bar', 'quux'))) - expected_cmd = 'foo bar quux' + "[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n" + ) + fixture = self.useFixture(command.get_run_command(testargs=("bar", "quux"))) + expected_cmd = "foo bar quux" self.assertEqual(expected_cmd, fixture.cmd) def test_list_tests_requests_concurrency_instances(self): @@ -241,88 +246,124 @@ def test_list_tests_requests_concurrency_instances(self): # This covers the case for non-listing runs as well, as the code path # is common. self.dirty() - ui = UI(options= [('concurrency', 2), ('parallel', True)]) + ui = UI(options=[("concurrency", 2), ("parallel", True)]) ui.here = self.tempdir cmd = run.run(ui) ui.set_command(cmd) - ui.proc_outputs = [b'returned\ninstances\n'] + ui.proc_outputs = [b"returned\ninstances\n"] command = self.useFixture(TestCommand(ui, None)) self.set_config( - '[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n' - 'test_list_option=--list\n' - 'instance_provision=provision -c $INSTANCE_COUNT\n' - 'instance_execute=quux $INSTANCE_ID -- $COMMAND\n') - fixture = self.useFixture(command.get_run_command(test_ids=['1'])) + "[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n" + "test_list_option=--list\n" + "instance_provision=provision -c $INSTANCE_COUNT\n" + "instance_execute=quux $INSTANCE_ID -- $COMMAND\n" + ) + fixture = self.useFixture(command.get_run_command(test_ids=["1"])) fixture.list_tests() - self.assertEqual(set([b'returned', b'instances']), command._instances) + self.assertEqual(set([b"returned", b"instances"]), command._instances) self.assertEqual(set([]), command._allocated_instances) - self.assertThat(ui.outputs, MatchesAny(Equals([ - ('values', [('running', 'provision -c 2')]), - ('popen', ('provision -c 2',), {'shell': True, 'stdout': -1}), - ('communicate',), - ('values', [('running', 'quux instances -- foo --list whoo yea')]), - ('popen',('quux instances -- foo --list whoo yea',), - {'shell': True, 'stdin': -1, 'stdout': -1}), - ('communicate',)]), Equals([ - ('values', [('running', 'provision -c 2')]), - ('popen', ('provision -c 2',), {'shell': True, 'stdout': -1}), - ('communicate',), - ('values', [('running', 'quux returned -- foo --list whoo yea')]), - ('popen',('quux returned -- foo --list whoo yea',), - {'shell': True, 'stdin': -1, 'stdout': -1}), - ('communicate',)]))) + self.assertThat( + ui.outputs, + MatchesAny( + Equals( + [ + ("values", [("running", "provision -c 2")]), + ("popen", ("provision -c 2",), {"shell": True, "stdout": -1}), + ("communicate",), + ( + "values", + [("running", "quux instances -- foo --list whoo yea")], + ), + ( + "popen", + ("quux instances -- foo --list whoo yea",), + {"shell": True, "stdin": -1, "stdout": -1}, + ), + ("communicate",), + ] + ), + Equals( + [ + ("values", [("running", "provision -c 2")]), + ("popen", ("provision -c 2",), {"shell": True, "stdout": -1}), + ("communicate",), + ( + "values", + [("running", "quux returned -- foo --list whoo yea")], + ), + ( + "popen", + ("quux returned -- foo --list whoo yea",), + {"shell": True, "stdin": -1, "stdout": -1}, + ), + ("communicate",), + ] + ), + ), + ) def test_list_tests_uses_instances(self): ui, command = self.get_test_ui_and_cmd() self.set_config( - '[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n' - 'test_list_option=--list\n' - 'instance_execute=quux $INSTANCE_ID -- $COMMAND\n') + "[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n" + "test_list_option=--list\n" + "instance_execute=quux $INSTANCE_ID -- $COMMAND\n" + ) fixture = self.useFixture(command.get_run_command()) - command._instances.add(b'bar') + command._instances.add(b"bar") fixture.list_tests() - self.assertEqual(set([b'bar']), command._instances) + self.assertEqual(set([b"bar"]), command._instances) self.assertEqual(set([]), command._allocated_instances) - self.assertEqual([ - ('values', [('running', 'quux bar -- foo --list whoo yea')]), - ('popen', ('quux bar -- foo --list whoo yea',), - {'shell': True, 'stdin': -1, 'stdout': -1}), ('communicate',)], - ui.outputs) + self.assertEqual( + [ + ("values", [("running", "quux bar -- foo --list whoo yea")]), + ( + "popen", + ("quux bar -- foo --list whoo yea",), + {"shell": True, "stdin": -1, "stdout": -1}, + ), + ("communicate",), + ], + ui.outputs, + ) def test_list_tests_cmd(self): ui, command = self.get_test_ui_and_cmd() self.set_config( - '[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n' - 'test_list_option=--list\n') + "[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n" + "test_list_option=--list\n" + ) fixture = self.useFixture(command.get_run_command()) - expected_cmd = 'foo --list whoo yea' + expected_cmd = "foo --list whoo yea" self.assertEqual(expected_cmd, fixture.list_cmd) def test_list_tests_parsing(self): buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) - stream.status(test_id='returned', test_status='exists') - stream.status(test_id='ids', test_status='exists') + stream.status(test_id="returned", test_status="exists") + stream.status(test_id="ids", test_status="exists") subunit_bytes = buffer.getvalue() ui, command = self.get_test_ui_and_cmd() ui.proc_outputs = [subunit_bytes] self.set_config( - '[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n' - 'test_list_option=--list\n') + "[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n" + "test_list_option=--list\n" + ) fixture = self.useFixture(command.get_run_command()) - self.assertEqual(set(['returned', 'ids']), set(fixture.list_tests())) + self.assertEqual(set(["returned", "ids"]), set(fixture.list_tests())) def test_list_tests_nonzero_exit(self): ui, command = self.get_test_ui_and_cmd() ui.proc_results = [1] self.set_config( - '[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n' - 'test_list_option=--list\n') + "[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n" + "test_list_option=--list\n" + ) fixture = self.useFixture(command.get_run_command()) - self.assertThat(lambda:fixture.list_tests(), raises(ValueError)) + self.assertThat(lambda: fixture.list_tests(), raises(ValueError)) def test_partition_tests_smoke(self): - repo = memory.RepositoryFactory().initialise('memory:') + repo = memory.RepositoryFactory().initialise("memory:") # Seed with 1 slow and 2 tests making up 2/3 the time. result = repo.get_inserter() result.startTestRun() @@ -332,20 +373,21 @@ def test_partition_tests_smoke(self): result.stopTestRun() ui, command = self.get_test_ui_and_cmd(repository=repo) self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n' - 'test_list_option=--list\n') + "[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\ntest_list_option=--list\n" + ) fixture = self.useFixture(command.get_run_command()) # partitioning by two generates 'slow' and the two fast ones as partitions # flushed out by equal numbers of unknown duration tests. - test_ids = frozenset(['slow', 'fast1', 'fast2', 'unknown1', 'unknown2', - 'unknown3', 'unknown4']) + test_ids = frozenset( + ["slow", "fast1", "fast2", "unknown1", "unknown2", "unknown3", "unknown4"] + ) partitions = fixture.partition_tests(test_ids, 2) - self.assertTrue('slow' in partitions[0]) - self.assertFalse('fast1' in partitions[0]) - self.assertFalse('fast2' in partitions[0]) - self.assertFalse('slow' in partitions[1]) - self.assertTrue('fast1' in partitions[1]) - self.assertTrue('fast2' in partitions[1]) + self.assertTrue("slow" in partitions[0]) + self.assertFalse("fast1" in partitions[0]) + self.assertFalse("fast2" in partitions[0]) + self.assertFalse("slow" in partitions[1]) + self.assertTrue("fast1" in partitions[1]) + self.assertTrue("fast2" in partitions[1]) self.assertEqual(3, len(partitions[0])) self.assertEqual(4, len(partitions[1])) @@ -355,7 +397,7 @@ def test_partition_tests_914359(self): # but in practice, if a test is recorded with 0 duration (e.g. due to a # bug), it is better to have them split out rather than all in one # partition. 0 duration tests are unlikely to really be 0 duration. - repo = memory.RepositoryFactory().initialise('memory:') + repo = memory.RepositoryFactory().initialise("memory:") # Seed with two 0-duration tests. result = repo.get_inserter() result.startTestRun() @@ -363,17 +405,16 @@ def test_partition_tests_914359(self): run_timed("zero2", 0, result) result.stopTestRun() ui, command = self.get_test_ui_and_cmd(repository=repo) - self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST\n') + self.set_config("[DEFAULT]\ntest_command=foo $IDLIST\n") fixture = self.useFixture(command.get_run_command()) # partitioning by two should generate two one-entry partitions. - test_ids = frozenset(['zero1', 'zero2']) + test_ids = frozenset(["zero1", "zero2"]) partitions = fixture.partition_tests(test_ids, 2) self.assertEqual(1, len(partitions[0])) self.assertEqual(1, len(partitions[1])) def test_partition_tests_with_grouping(self): - repo = memory.RepositoryFactory().initialise('memory:') + repo = memory.RepositoryFactory().initialise("memory:") result = repo.get_inserter() result.startTestRun() run_timed("TestCase1.slow", 3, result) @@ -382,158 +423,190 @@ def test_partition_tests_with_grouping(self): result.stopTestRun() ui, command = self.get_test_ui_and_cmd(repository=repo) self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n' - 'test_list_option=--list\n') + "[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\ntest_list_option=--list\n" + ) fixture = self.useFixture(command.get_run_command()) - test_ids = frozenset(['TestCase1.slow', 'TestCase1.fast', - 'TestCase1.fast2', 'TestCase2.fast1', - 'TestCase3.test1', 'TestCase3.test2', - 'TestCase2.fast2', 'TestCase4.test', - 'testdir.testfile.TestCase5.test']) - regex = 'TestCase[0-5]' - def group_id(test_id, regex=re.compile('TestCase[0-5]')): + test_ids = frozenset( + [ + "TestCase1.slow", + "TestCase1.fast", + "TestCase1.fast2", + "TestCase2.fast1", + "TestCase3.test1", + "TestCase3.test2", + "TestCase2.fast2", + "TestCase4.test", + "testdir.testfile.TestCase5.test", + ] + ) + + def group_id(test_id, regex=re.compile("TestCase[0-5]")): match = regex.match(test_id) if match: return match.group(0) + # There isn't a public way to define a group callback [as yet]. fixture._group_callback = group_id partitions = fixture.partition_tests(test_ids, 2) # Timed groups are deterministic: - self.assertTrue('TestCase2.fast1' in partitions[0]) - self.assertTrue('TestCase2.fast2' in partitions[0]) - self.assertTrue('TestCase1.slow' in partitions[1]) - self.assertTrue('TestCase1.fast' in partitions[1]) - self.assertTrue('TestCase1.fast2' in partitions[1]) + self.assertTrue("TestCase2.fast1" in partitions[0]) + self.assertTrue("TestCase2.fast2" in partitions[0]) + self.assertTrue("TestCase1.slow" in partitions[1]) + self.assertTrue("TestCase1.fast" in partitions[1]) + self.assertTrue("TestCase1.fast2" in partitions[1]) # Untimed groups just need to be kept together: - if 'TestCase3.test1' in partitions[0]: - self.assertTrue('TestCase3.test2' in partitions[0]) - if 'TestCase4.test' not in partitions[0]: - self.assertTrue('TestCase4.test' in partitions[1]) - if 'testdir.testfile.TestCase5.test' not in partitions[0]: - self.assertTrue('testdir.testfile.TestCase5.test' in partitions[1]) + if "TestCase3.test1" in partitions[0]: + self.assertTrue("TestCase3.test2" in partitions[0]) + if "TestCase4.test" not in partitions[0]: + self.assertTrue("TestCase4.test" in partitions[1]) + if "testdir.testfile.TestCase5.test" not in partitions[0]: + self.assertTrue("testdir.testfile.TestCase5.test" in partitions[1]) def test_run_tests_with_instances(self): # when there are instances and no instance_execute, run_tests acts as # normal. ui, command = self.get_test_ui_and_cmd() - self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST\n') - command._instances.update([b'foo', b'bar']) + self.set_config("[DEFAULT]\ntest_command=foo $IDLIST\n") + command._instances.update([b"foo", b"bar"]) fixture = self.useFixture(command.get_run_command()) - procs = fixture.run_tests() - self.assertEqual([ - ('values', [('running', 'foo ')]), - ('popen', ('foo ',), {'shell': True, 'stdin': -1, 'stdout': -1})], - ui.outputs) + fixture.run_tests() + self.assertEqual( + [ + ("values", [("running", "foo ")]), + ("popen", ("foo ",), {"shell": True, "stdin": -1, "stdout": -1}), + ], + ui.outputs, + ) def test_run_tests_with_existing_instances_configured(self): # when there are instances present, they are pulled out for running # tests. ui, command = self.get_test_ui_and_cmd() self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST\n' - 'instance_execute=quux $INSTANCE_ID -- $COMMAND\n') - command._instances.add(b'bar') - fixture = self.useFixture(command.get_run_command(test_ids=['1'])) + "[DEFAULT]\ntest_command=foo $IDLIST\n" + "instance_execute=quux $INSTANCE_ID -- $COMMAND\n" + ) + command._instances.add(b"bar") + fixture = self.useFixture(command.get_run_command(test_ids=["1"])) procs = fixture.run_tests() - self.assertEqual([ - ('values', [('running', 'quux bar -- foo 1')]), - ('popen', ('quux bar -- foo 1',), - {'shell': True, 'stdin': -1, 'stdout': -1})], - ui.outputs) + self.assertEqual( + [ + ("values", [("running", "quux bar -- foo 1")]), + ( + "popen", + ("quux bar -- foo 1",), + {"shell": True, "stdin": -1, "stdout": -1}, + ), + ], + ui.outputs, + ) # No --parallel, so the one instance should have been allocated. - self.assertEqual(set([b'bar']), command._instances) - self.assertEqual(set([b'bar']), command._allocated_instances) + self.assertEqual(set([b"bar"]), command._instances) + self.assertEqual(set([b"bar"]), command._allocated_instances) # And after the process is run, bar is returned for re-use. procs[0].stdout.read() procs[0].wait() self.assertEqual(0, procs[0].returncode) - self.assertEqual(set([b'bar']), command._instances) + self.assertEqual(set([b"bar"]), command._instances) self.assertEqual(set(), command._allocated_instances) - + def test_run_tests_allocated_instances_skipped(self): ui, command = self.get_test_ui_and_cmd() self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST\n' - 'instance_execute=quux $INSTANCE_ID -- $COMMAND\n') - command._instances.update([b'bar', b'baz']) - command._allocated_instances.add(b'baz') - fixture = self.useFixture(command.get_run_command(test_ids=['1'])) + "[DEFAULT]\ntest_command=foo $IDLIST\n" + "instance_execute=quux $INSTANCE_ID -- $COMMAND\n" + ) + command._instances.update([b"bar", b"baz"]) + command._allocated_instances.add(b"baz") + fixture = self.useFixture(command.get_run_command(test_ids=["1"])) procs = fixture.run_tests() - self.assertEqual([ - ('values', [('running', 'quux bar -- foo 1')]), - ('popen', ('quux bar -- foo 1',), - {'shell': True, 'stdin': -1, 'stdout': -1})], - ui.outputs) + self.assertEqual( + [ + ("values", [("running", "quux bar -- foo 1")]), + ( + "popen", + ("quux bar -- foo 1",), + {"shell": True, "stdin": -1, "stdout": -1}, + ), + ], + ui.outputs, + ) # No --parallel, so the one instance should have been allocated. - self.assertEqual(set([b'bar', b'baz']), command._instances) - self.assertEqual(set([b'bar', b'baz']), command._allocated_instances) + self.assertEqual(set([b"bar", b"baz"]), command._instances) + self.assertEqual(set([b"bar", b"baz"]), command._allocated_instances) # And after the process is run, bar is returned for re-use. procs[0].wait() procs[0].stdout.read() self.assertEqual(0, procs[0].returncode) - self.assertEqual(set([b'bar', b'baz']), command._instances) - self.assertEqual(set([b'baz']), command._allocated_instances) + self.assertEqual(set([b"bar", b"baz"]), command._instances) + self.assertEqual(set([b"baz"]), command._allocated_instances) def test_run_tests_list_file_in_FILES(self): ui, command = self.get_test_ui_and_cmd() self.set_config( - '[DEFAULT]\ntest_command=foo $IDFILE\n' - 'instance_execute=quux $INSTANCE_ID $FILES -- $COMMAND\n') - command._instances.add(b'bar') - fixture = self.useFixture(command.get_run_command(test_ids=['1'])) + "[DEFAULT]\ntest_command=foo $IDFILE\n" + "instance_execute=quux $INSTANCE_ID $FILES -- $COMMAND\n" + ) + command._instances.add(b"bar") + fixture = self.useFixture(command.get_run_command(test_ids=["1"])) list_file = fixture.list_file_name procs = fixture.run_tests() - expected_cmd = 'quux bar %s -- foo %s' % (list_file, list_file) - self.assertEqual([ - ('values', [('running', expected_cmd)]), - ('popen', (expected_cmd,), - {'shell': True, 'stdin': -1, 'stdout': -1})], - ui.outputs) + expected_cmd = "quux bar %s -- foo %s" % (list_file, list_file) + self.assertEqual( + [ + ("values", [("running", expected_cmd)]), + ("popen", (expected_cmd,), {"shell": True, "stdin": -1, "stdout": -1}), + ], + ui.outputs, + ) # No --parallel, so the one instance should have been allocated. - self.assertEqual(set([b'bar']), command._instances) - self.assertEqual(set([b'bar']), command._allocated_instances) + self.assertEqual(set([b"bar"]), command._instances) + self.assertEqual(set([b"bar"]), command._allocated_instances) # And after the process is run, bar is returned for re-use. procs[0].stdout.read() self.assertEqual(0, procs[0].returncode) - self.assertEqual(set([b'bar']), command._instances) + self.assertEqual(set([b"bar"]), command._instances) self.assertEqual(set(), command._allocated_instances) def test_filter_tags_parsing(self): ui, command = self.get_test_ui_and_cmd() - self.set_config('[DEFAULT]\nfilter_tags=foo bar\n') - self.assertEqual(set(['foo', 'bar']), command.get_filter_tags()) + self.set_config("[DEFAULT]\nfilter_tags=foo bar\n") + self.assertEqual(set(["foo", "bar"]), command.get_filter_tags()) def test_callout_concurrency(self): ui, command = self.get_test_ui_and_cmd() - ui.proc_outputs = [b'4'] - self.set_config( - '[DEFAULT]\ntest_run_concurrency=probe\n' - 'test_command=foo\n') + ui.proc_outputs = [b"4"] + self.set_config("[DEFAULT]\ntest_run_concurrency=probe\ntest_command=foo\n") fixture = self.useFixture(command.get_run_command()) self.assertEqual(4, fixture.callout_concurrency()) - self.assertEqual([ - ('popen', ('probe',), {'shell': True, 'stdin': -1, 'stdout': -1}), - ('communicate',)], ui.outputs) + self.assertEqual( + [ + ("popen", ("probe",), {"shell": True, "stdin": -1, "stdout": -1}), + ("communicate",), + ], + ui.outputs, + ) def test_callout_concurrency_failed(self): ui, command = self.get_test_ui_and_cmd() ui.proc_results = [1] - self.set_config( - '[DEFAULT]\ntest_run_concurrency=probe\n' - 'test_command=foo\n') + self.set_config("[DEFAULT]\ntest_run_concurrency=probe\ntest_command=foo\n") fixture = self.useFixture(command.get_run_command()) - self.assertThat(lambda:fixture.callout_concurrency(), raises( - ValueError("test_run_concurrency failed: exit code 1, stderr=''"))) - self.assertEqual([ - ('popen', ('probe',), {'shell': True, 'stdin': -1, 'stdout': -1}), - ('communicate',)], ui.outputs) + self.assertThat( + lambda: fixture.callout_concurrency(), + raises(ValueError("test_run_concurrency failed: exit code 1, stderr=''")), + ) + self.assertEqual( + [ + ("popen", ("probe",), {"shell": True, "stdin": -1, "stdout": -1}), + ("communicate",), + ], + ui.outputs, + ) def test_callout_concurrency_not_set(self): ui, command = self.get_test_ui_and_cmd() - self.set_config( - '[DEFAULT]\n' - 'test_command=foo\n') + self.set_config("[DEFAULT]\ntest_command=foo\n") fixture = self.useFixture(command.get_run_command()) self.assertEqual(None, fixture.callout_concurrency()) self.assertEqual([], ui.outputs) @@ -541,36 +614,46 @@ def test_callout_concurrency_not_set(self): def test_filter_tests_by_regex_only(self): buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) - stream.status(test_id='returned', test_status='exists') - stream.status(test_id='ids', test_status='exists') + stream.status(test_id="returned", test_status="exists") + stream.status(test_id="ids", test_status="exists") subunit_bytes = buffer.getvalue() ui, command = self.get_test_ui_and_cmd() ui.proc_outputs = [subunit_bytes] self.set_config( - '[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n' - 'test_list_option=--list\n') - filters = ['return'] + "[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n" + "test_list_option=--list\n" + ) + filters = ["return"] fixture = self.useFixture(command.get_run_command(test_filters=filters)) - self.assertEqual(['returned'], fixture.test_ids) + self.assertEqual(["returned"], fixture.test_ids) def test_filter_tests_by_regex_supplied_ids(self): ui, command = self.get_test_ui_and_cmd() - ui.proc_outputs = [b'returned\nids\n'] + ui.proc_outputs = [b"returned\nids\n"] self.set_config( - '[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n' - 'test_list_option=--list\n') - filters = ['return'] - fixture = self.useFixture(command.get_run_command( - test_ids=['return', 'of', 'the', 'king'], test_filters=filters)) - self.assertEqual(['return'], fixture.test_ids) + "[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n" + "test_list_option=--list\n" + ) + filters = ["return"] + fixture = self.useFixture( + command.get_run_command( + test_ids=["return", "of", "the", "king"], test_filters=filters + ) + ) + self.assertEqual(["return"], fixture.test_ids) def test_filter_tests_by_regex_supplied_ids_multi_match(self): ui, command = self.get_test_ui_and_cmd() - ui.proc_outputs = [b'returned\nids\n'] + ui.proc_outputs = [b"returned\nids\n"] self.set_config( - '[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n' - 'test_list_option=--list\n') - filters = ['return'] - fixture = self.useFixture(command.get_run_command( - test_ids=['return', 'of', 'the', 'king', 'thereisnoreturn'], test_filters=filters)) - self.assertEqual(['return', 'thereisnoreturn'], fixture.test_ids) + "[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n" + "test_list_option=--list\n" + ) + filters = ["return"] + fixture = self.useFixture( + command.get_run_command( + test_ids=["return", "of", "the", "king", "thereisnoreturn"], + test_filters=filters, + ) + ) + self.assertEqual(["return", "thereisnoreturn"], fixture.test_ids) diff --git a/testrepository/tests/test_testr.py b/testrepository/tests/test_testr.py index fa0d6aa..e38c978 100644 --- a/testrepository/tests/test_testr.py +++ b/testrepository/tests/test_testr.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -22,7 +22,7 @@ from testresources import TestResource from testtools.matchers import ( DocTestMatches, - ) +) from testrepository.tests import ResourcedTestCase from testrepository.tests.stubpackage import StubPackageResource @@ -36,37 +36,52 @@ def __init__(self, testrpath): def execute(self, args): # sys.executable is used so that this works on windows. - proc = subprocess.Popen([sys.executable, self.execpath] + args, - env={'PYTHONPATH': self.stubpackage.base}, - stdout=subprocess.PIPE, stdin=subprocess.PIPE, - stderr=subprocess.STDOUT, universal_newlines=True) + proc = subprocess.Popen( + [sys.executable, self.execpath] + args, + env={"PYTHONPATH": self.stubpackage.base}, + stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) out, err = proc.communicate() return proc.returncode, out class StubbedTestrResource(TestResource): - - resources = [("stubpackage", StubPackageResource('testrepository', - [('commands.py', r"""import sys + resources = [ + ( + "stubpackage", + StubPackageResource( + "testrepository", + [ + ( + "commands.py", + r"""import sys def run_argv(argv, stdin, stdout, stderr): sys.stdout.write("%s %s %s\n" % (sys.stdin is stdin, sys.stdout is stdout, sys.stderr is stderr)) sys.stdout.write("%s\n" % argv) return len(argv) - 1 -""")]))] +""", + ) + ], + ), + ) + ] def make(self, dependency_resources): - stub = dependency_resources['stubpackage'] - path = os.path.join(os.path.dirname(__file__), '..', '..', 'testr') + stub = dependency_resources["stubpackage"] + path = os.path.join(os.path.dirname(__file__), "..", "..", "testr") # Make a copy of the testr script as running in place uses the current # library, not the stub library. - execpath = os.path.join(stub.base, 'testr') - source = open(path, 'rb') + execpath = os.path.join(stub.base, "testr") + source = open(path, "rb") try: testr_contents = source.read() finally: source.close() - target = open(execpath, 'wb') + target = open(execpath, "wb") try: target.write(testr_contents) finally: @@ -77,16 +92,28 @@ def make(self, dependency_resources): class TestExecuted(ResourcedTestCase): """Tests that execute testr. These tests are (moderately) expensive!.""" - resources = [('testr', StubbedTestrResource())] + resources = [("testr", StubbedTestrResource())] def test_runs_and_returns_run_argv_some_args(self): status, output = self.testr.execute(["foo bar", "baz"]) self.assertEqual(2, status) - self.assertThat(output, DocTestMatches("""True True True -[..., 'foo bar', 'baz']\n""", doctest.ELLIPSIS)) + self.assertThat( + output, + DocTestMatches( + """True True True +[..., 'foo bar', 'baz']\n""", + doctest.ELLIPSIS, + ), + ) def test_runs_and_returns_run_argv_no_args(self): status, output = self.testr.execute([]) - self.assertThat(output, DocTestMatches("""True True True -[...]\n""", doctest.ELLIPSIS)) + self.assertThat( + output, + DocTestMatches( + """True True True +[...]\n""", + doctest.ELLIPSIS, + ), + ) self.assertEqual(0, status) diff --git a/testrepository/tests/test_ui.py b/testrepository/tests/test_ui.py index b6f294e..6a5159c 100644 --- a/testrepository/tests/test_ui.py +++ b/testrepository/tests/test_ui.py @@ -20,7 +20,6 @@ import sys from fixtures import EnvironmentVariable -from testtools.content import text_content from testtools.matchers import raises from testrepository import arguments, commands @@ -46,7 +45,7 @@ def cli_ui_factory(input_streams=None, options=(), args=()): for option, value in options: # only bool handled so far if value: - argv.append('--%s' % option) + argv.append("--%s" % option) return cli.UI(argv, stdin, stdout, stderr) @@ -57,14 +56,13 @@ def decorator_ui_factory(input_streams=None, options=(), args=()): # what ui implementations do we need to test? ui_implementations = [ - ('CLIUI', {'ui_factory': cli_ui_factory}), - ('ModelUI', {'ui_factory': model.UI}), - ('DecoratorUI', {'ui_factory': decorator_ui_factory}), - ] + ("CLIUI", {"ui_factory": cli_ui_factory}), + ("ModelUI", {"ui_factory": model.UI}), + ("DecoratorUI", {"ui_factory": decorator_ui_factory}), +] class TestUIContract(ResourcedTestCase): - scenarios = ui_implementations def get_test_ui(self): @@ -74,10 +72,10 @@ def get_test_ui(self): return ui def test_factory_noargs(self): - ui = self.ui_factory() + self.ui_factory() def test_factory_input_stream_args(self): - ui = self.ui_factory([('subunit', b'value')]) + self.ui_factory([("subunit", b"value")]) def test_here(self): ui = self.get_test_ui() @@ -86,23 +84,23 @@ def test_here(self): def test_iter_streams_load_stdin_use_case(self): # A UI can be asked for the streams that a command has indicated it # accepts, which is what load < foo will require. - ui = self.ui_factory([('subunit', b'test: foo\nsuccess: foo\n')]) + ui = self.ui_factory([("subunit", b"test: foo\nsuccess: foo\n")]) cmd = commands.Command(ui) - cmd.input_streams = ['subunit+'] + cmd.input_streams = ["subunit+"] ui.set_command(cmd) results = [] - for result in ui.iter_streams('subunit'): + for result in ui.iter_streams("subunit"): results.append(result.read()) - self.assertEqual([b'test: foo\nsuccess: foo\n'], results) + self.assertEqual([b"test: foo\nsuccess: foo\n"], results) def test_iter_streams_unexpected_type_raises(self): ui = self.get_test_ui() - self.assertThat(lambda: ui.iter_streams('subunit'), raises(KeyError)) + self.assertThat(lambda: ui.iter_streams("subunit"), raises(KeyError)) def test_output_error(self): - self.useFixture(EnvironmentVariable('TESTR_PDB')) + self.useFixture(EnvironmentVariable("TESTR_PDB")) try: - raise Exception('fooo') + raise Exception("fooo") except Exception: err_tuple = sys.exc_info() ui = self.get_test_ui() @@ -111,7 +109,7 @@ def test_output_error(self): def test_output_rest(self): # output some ReST - used for help and docs. ui = self.get_test_ui() - ui.output_rest('') + ui.output_rest("") def test_output_stream(self): # a stream of bytes can be output. @@ -121,22 +119,22 @@ def test_output_stream(self): def test_output_stream_non_utf8(self): # When the stream has non-utf8 bytes it still outputs correctly. ui = self.get_test_ui() - ui.output_stream(BytesIO(b'\xfa')) + ui.output_stream(BytesIO(b"\xfa")) def test_output_table(self): # output_table shows a table. ui = self.get_test_ui() - ui.output_table([('col1', 'col2'), ('row1c1','row1c2')]) + ui.output_table([("col1", "col2"), ("row1c1", "row1c2")]) def test_output_tests(self): # output_tests can be called, and takes a list of tests to output. ui = self.get_test_ui() - ui.output_tests([self, self.__class__('test_output_table')]) + ui.output_tests([self, self.__class__("test_output_table")]) def test_output_values(self): # output_values can be called and takes a list of things to output. ui = self.get_test_ui() - ui.output_values([('foo', 1), ('bar', 'quux')]) + ui.output_values([("foo", 1), ("bar", "quux")]) def test_output_summary(self): # output_summary can be called, takes success boolean and list of @@ -151,28 +149,28 @@ def test_set_command(self): self.assertEqual(True, ui.set_command(cmd)) def test_set_command_checks_args_unwanted_arg(self): - ui = self.ui_factory(args=['foo']) + ui = self.ui_factory(args=["foo"]) cmd = commands.Command(ui) self.assertEqual(False, ui.set_command(cmd)) def test_set_command_checks_args_missing_arg(self): ui = self.ui_factory() cmd = commands.Command(ui) - cmd.args = [arguments.command.CommandArgument('foo')] + cmd.args = [arguments.command.CommandArgument("foo")] self.assertEqual(False, ui.set_command(cmd)) def test_set_command_checks_args_invalid_arg(self): - ui = self.ui_factory(args=['a']) + ui = self.ui_factory(args=["a"]) cmd = commands.Command(ui) - cmd.args = [arguments.command.CommandArgument('foo')] + cmd.args = [arguments.command.CommandArgument("foo")] self.assertEqual(False, ui.set_command(cmd)) def test_args_are_exposed_at_arguments(self): - ui = self.ui_factory(args=['load']) + ui = self.ui_factory(args=["load"]) cmd = commands.Command(ui) - cmd.args = [arguments.command.CommandArgument('foo')] + cmd.args = [arguments.command.CommandArgument("foo")] self.assertEqual(True, ui.set_command(cmd)) - self.assertEqual({'foo':[load.load]}, ui.arguments) + self.assertEqual({"foo": [load.load]}, ui.arguments) def test_set_command_with_no_name_works(self): # Degrade gracefully if the name attribute has not been set. @@ -185,47 +183,62 @@ def test_options_at_options(self): self.assertEqual(False, ui.options.quiet) def test_options_when_set_at_options(self): - ui = self.ui_factory(options=[('quiet', True)]) + ui = self.ui_factory(options=[("quiet", True)]) cmd = commands.Command(ui) ui.set_command(cmd) self.assertEqual(True, ui.options.quiet) def test_options_on_command_picked_up(self): - ui = self.ui_factory(options=[('subunit', True)]) + ui = self.ui_factory(options=[("subunit", True)]) cmd = commands.Command(ui) - cmd.options = [optparse.Option("--subunit", action="store_true", - default=False, help="Show output as a subunit stream.")] + cmd.options = [ + optparse.Option( + "--subunit", + action="store_true", + default=False, + help="Show output as a subunit stream.", + ) + ] ui.set_command(cmd) self.assertEqual(True, ui.options.subunit) # And when not given the default works. ui = self.ui_factory() cmd = commands.Command(ui) - cmd.options = [optparse.Option("--subunit", action="store_true", - default=False, help="Show output as a subunit stream.")] + cmd.options = [ + optparse.Option( + "--subunit", + action="store_true", + default=False, + help="Show output as a subunit stream.", + ) + ] ui.set_command(cmd) self.assertEqual(False, ui.options.subunit) def test_exec_subprocess(self): # exec_subprocess should 'work like popen'. ui = self.ui_factory() - proc = ui.subprocess_Popen([sys.executable, "-V"], - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + proc = ui.subprocess_Popen( + [sys.executable, "-V"], stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) out, err = proc.communicate() proc.returncode def test_subprocesses_have_stdin(self): # exec_subprocess should 'work like popen'. ui = self.ui_factory() - proc = ui.subprocess_Popen([sys.executable, "-V"], - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + proc = ui.subprocess_Popen( + [sys.executable, "-V"], stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) proc.stdout.read(0) out, err = proc.communicate() def test_subprocesses_have_stdout(self): # exec_subprocess should 'work like popen'. ui = self.ui_factory() - proc = ui.subprocess_Popen([sys.executable, "-V"], - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + proc = ui.subprocess_Popen( + [sys.executable, "-V"], stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) proc.stdout.read(0) out, err = proc.communicate() @@ -244,8 +257,10 @@ def test_make_result_previous_run(self): ui = self.ui_factory() ui.set_command(commands.Command(ui)) result, summary = ui.make_result( - lambda: None, StubTestCommand(), - previous_run=memory.Repository().get_failing()) + lambda: None, + StubTestCommand(), + previous_run=memory.Repository().get_failing(), + ) result.startTestRun() result.status() result.stopTestRun() diff --git a/testrepository/tests/ui/__init__.py b/testrepository/tests/ui/__init__.py index 234a736..34c9358 100644 --- a/testrepository/tests/ui/__init__.py +++ b/testrepository/tests/ui/__init__.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -16,12 +16,12 @@ import unittest + def test_suite(): names = [ - 'cli', - 'decorator', - ] - module_names = ['testrepository.tests.ui.test_' + name for name in - names] + "cli", + "decorator", + ] + module_names = ["testrepository.tests.ui.test_" + name for name in names] loader = unittest.TestLoader() return loader.loadTestsFromNames(module_names) diff --git a/testrepository/tests/ui/test_cli.py b/testrepository/tests/ui/test_cli.py index ef95fb4..483027b 100644 --- a/testrepository/tests/ui/test_cli.py +++ b/testrepository/tests/ui/test_cli.py @@ -23,13 +23,12 @@ from textwrap import dedent from fixtures import EnvironmentVariable -import subunit import testtools from testtools import TestCase from testtools.matchers import ( DocTestMatches, MatchesException, - ) +) from testrepository import arguments from testrepository import commands @@ -39,14 +38,14 @@ def get_test_ui_and_cmd(options=(), args=()): - stdout = TextIOWrapper(BytesIO(), 'utf8', line_buffering=True) + stdout = TextIOWrapper(BytesIO(), "utf8", line_buffering=True) stdin = StringIO() stderr = StringIO() argv = list(args) for option, value in options: # only bool handled so far if value: - argv.append('--%s' % option) + argv.append("--%s" % option) ui = cli.UI(argv, stdin, stdout, stderr) cmd = run.run(ui) ui.set_command(cmd) @@ -54,10 +53,9 @@ def get_test_ui_and_cmd(options=(), args=()): class TestCLIUI(ResourcedTestCase): - def setUp(self): super(TestCLIUI, self).setUp() - self.useFixture(EnvironmentVariable('TESTR_PDB')) + self.useFixture(EnvironmentVariable("TESTR_PDB")) def test_construct(self): stdout = BytesIO() @@ -67,49 +65,49 @@ def test_construct(self): def test_stream_comes_from_stdin(self): stdout = BytesIO() - stdin = BytesIO(b'foo\n') + stdin = BytesIO(b"foo\n") stderr = BytesIO() ui = cli.UI([], stdin, stdout, stderr) cmd = commands.Command(ui) - cmd.input_streams = ['subunit'] + cmd.input_streams = ["subunit"] ui.set_command(cmd) results = [] - for stream in ui.iter_streams('subunit'): + for stream in ui.iter_streams("subunit"): results.append(stream.read()) - self.assertEqual([b'foo\n'], results) + self.assertEqual([b"foo\n"], results) def test_stream_type_honoured(self): # The CLI UI has only one stdin, so when a command asks for a stream # type it didn't declare, no streams are found. stdout = BytesIO() - stdin = BytesIO(b'foo\n') + stdin = BytesIO(b"foo\n") stderr = BytesIO() ui = cli.UI([], stdin, stdout, stderr) cmd = commands.Command(ui) - cmd.input_streams = ['subunit+', 'interactive?'] + cmd.input_streams = ["subunit+", "interactive?"] ui.set_command(cmd) results = [] - for stream in ui.iter_streams('interactive'): + for stream in ui.iter_streams("interactive"): results.append(stream.read()) self.assertEqual([], results) def test_dash_d_sets_here_option(self): stdout = BytesIO() - stdin = BytesIO(b'foo\n') + stdin = BytesIO(b"foo\n") stderr = BytesIO() - ui = cli.UI(['-d', '/nowhere/'], stdin, stdout, stderr) + ui = cli.UI(["-d", "/nowhere/"], stdin, stdout, stderr) cmd = commands.Command(ui) ui.set_command(cmd) - self.assertEqual('/nowhere/', ui.here) + self.assertEqual("/nowhere/", ui.here) def test_outputs_error_string(self): try: - raise Exception('fooo') + raise Exception("fooo") except Exception: err_tuple = sys.exc_info() - expected = str(err_tuple[1]) + '\n' + expected = str(err_tuple[1]) + "\n" bytestream = BytesIO() - stdout = TextIOWrapper(bytestream, 'utf8', line_buffering=True) + stdout = TextIOWrapper(bytestream, "utf8", line_buffering=True) stdin = StringIO() stderr = StringIO() ui = cli.UI([], stdin, stdout, stderr) @@ -117,14 +115,14 @@ def test_outputs_error_string(self): self.assertThat(stderr.getvalue(), DocTestMatches(expected)) def test_error_enters_pdb_when_TESTR_PDB_set(self): - os.environ['TESTR_PDB'] = '1' + os.environ["TESTR_PDB"] = "1" try: - raise Exception('fooo') + raise Exception("fooo") except Exception: err_tuple = sys.exc_info() expected = dedent("""\ File "...test_cli.py", line ..., in ...pdb_when_TESTR_PDB_set - raise Exception('fooo') + raise Exception("fooo") fooo """) @@ -132,37 +130,43 @@ def test_error_enters_pdb_when_TESTR_PDB_set(self): # - this code is the most pragmatic to test on 2.6 and up, and 3.2 and # up. stdout = StringIO() - stdin = StringIO('c\n') + stdin = StringIO("c\n") stderr = StringIO() ui = cli.UI([], stdin, stdout, stderr) ui.output_error(err_tuple) - self.assertThat(stderr.getvalue(), - DocTestMatches(expected, doctest.ELLIPSIS)) + self.assertThat(stderr.getvalue(), DocTestMatches(expected, doctest.ELLIPSIS)) def test_outputs_rest_to_stdout(self): ui, cmd = get_test_ui_and_cmd() - ui.output_rest('topic\n=====\n') - self.assertEqual(b'topic\n=====\n', ui._stdout.buffer.getvalue()) + ui.output_rest("topic\n=====\n") + self.assertEqual(b"topic\n=====\n", ui._stdout.buffer.getvalue()) def test_outputs_results_to_stdout(self): ui, cmd = get_test_ui_and_cmd() + class Case(ResourcedTestCase): def method(self): - self.fail('quux') + self.fail("quux") + result, summary = ui.make_result(lambda: None, StubTestCommand()) result.startTestRun() - Case('method').run(testtools.ExtendedToStreamDecorator(result)) + Case("method").run(testtools.ExtendedToStreamDecorator(result)) result.stopTestRun() - self.assertThat(ui._stdout.buffer.getvalue().decode('utf8'), - DocTestMatches("""\ + self.assertThat( + ui._stdout.buffer.getvalue().decode("utf8"), + DocTestMatches( + """\ ====================================================================== FAIL: testrepository.tests.ui.test_cli...Case.method ---------------------------------------------------------------------- ...Traceback (most recent call last):... File "...test_cli.py", line ..., in method - self.fail(\'quux\')... + self.fail(\"quux\")... AssertionError: quux... -""", doctest.ELLIPSIS)) +""", + doctest.ELLIPSIS, + ), + ) def test_outputs_stream_to_stdout(self): ui, cmd = get_test_ui_and_cmd() @@ -172,49 +176,54 @@ def test_outputs_stream_to_stdout(self): def test_outputs_tables_to_stdout(self): ui, cmd = get_test_ui_and_cmd() - ui.output_table([('foo', 1), ('b', 'quux')]) - self.assertEqual(b'foo 1\n--- ----\nb quux\n', - ui._stdout.buffer.getvalue()) + ui.output_table([("foo", 1), ("b", "quux")]) + self.assertEqual( + b"foo 1\n--- ----\nb quux\n", ui._stdout.buffer.getvalue() + ) def test_outputs_tests_to_stdout(self): ui, cmd = get_test_ui_and_cmd() - ui.output_tests([self, self.__class__('test_construct')]) + ui.output_tests([self, self.__class__("test_construct")]) self.assertThat( - ui._stdout.buffer.getvalue().decode('utf8'), + ui._stdout.buffer.getvalue().decode("utf8"), DocTestMatches( - '...TestCLIUI.test_outputs_tests_to_stdout\n' - '...TestCLIUI.test_construct\n', doctest.ELLIPSIS)) + "...TestCLIUI.test_outputs_tests_to_stdout\n" + "...TestCLIUI.test_construct\n", + doctest.ELLIPSIS, + ), + ) def test_outputs_values_to_stdout(self): ui, cmd = get_test_ui_and_cmd() - ui.output_values([('foo', 1), ('bar', 'quux')]) - self.assertEqual(b'foo=1, bar=quux\n', ui._stdout.buffer.getvalue()) + ui.output_values([("foo", 1), ("bar", "quux")]) + self.assertEqual(b"foo=1, bar=quux\n", ui._stdout.buffer.getvalue()) def test_outputs_summary_to_stdout(self): ui, cmd = get_test_ui_and_cmd() summary = [True, 1, None, 2, None, []] expected_summary = ui._format_summary(*summary) ui.output_summary(*summary) - self.assertEqual(("%s\n" % (expected_summary,)).encode('utf8'), - ui._stdout.buffer.getvalue()) + self.assertEqual( + ("%s\n" % (expected_summary,)).encode("utf8"), ui._stdout.buffer.getvalue() + ) def test_parse_error_goes_to_stderr(self): bytestream = BytesIO() - stdout = TextIOWrapper(bytestream, 'utf8', line_buffering=True) + stdout = TextIOWrapper(bytestream, "utf8", line_buffering=True) stdin = StringIO() stderr = StringIO() - ui = cli.UI(['one'], stdin, stdout, stderr) + ui = cli.UI(["one"], stdin, stdout, stderr) cmd = commands.Command(ui) - cmd.args = [arguments.command.CommandArgument('foo')] + cmd.args = [arguments.command.CommandArgument("foo")] ui.set_command(cmd) self.assertEqual("Could not find command 'one'.\n", stderr.getvalue()) def test_parse_excess_goes_to_stderr(self): bytestream = BytesIO() - stdout = TextIOWrapper(bytestream, 'utf8', line_buffering=True) + stdout = TextIOWrapper(bytestream, "utf8", line_buffering=True) stdin = StringIO() stderr = StringIO() - ui = cli.UI(['one'], stdin, stdout, stderr) + ui = cli.UI(["one"], stdin, stdout, stderr) cmd = commands.Command(ui) ui.set_command(cmd) self.assertEqual("Unexpected arguments: ['one']\n", stderr.getvalue()) @@ -223,43 +232,45 @@ def test_parse_options_after_double_dash_are_arguments(self): stdout = BytesIO() stdin = BytesIO() stderr = BytesIO() - ui = cli.UI(['one', '--', '--two', 'three'], stdin, stdout, stderr) + ui = cli.UI(["one", "--", "--two", "three"], stdin, stdout, stderr) cmd = commands.Command(ui) - cmd.args = [arguments.string.StringArgument('myargs', max=None), + cmd.args = [ + arguments.string.StringArgument("myargs", max=None), arguments.doubledash.DoubledashArgument(), - arguments.string.StringArgument('subargs', max=None)] + arguments.string.StringArgument("subargs", max=None), + ] ui.set_command(cmd) - self.assertEqual({ - 'doubledash': ['--'], - 'myargs': ['one'], - 'subargs': ['--two', 'three']}, - ui.arguments) + self.assertEqual( + {"doubledash": ["--"], "myargs": ["one"], "subargs": ["--two", "three"]}, + ui.arguments, + ) def test_double_dash_passed_to_arguments(self): class CaptureArg(arguments.AbstractArgument): def _parse_one(self, arg): return arg + stdout = BytesIO() stdin = BytesIO() stderr = BytesIO() - ui = cli.UI(['one', '--', '--two', 'three'], stdin, stdout, stderr) + ui = cli.UI(["one", "--", "--two", "three"], stdin, stdout, stderr) cmd = commands.Command(ui) - cmd.args = [CaptureArg('args', max=None)] + cmd.args = [CaptureArg("args", max=None)] ui.set_command(cmd) - self.assertEqual({'args':['one', '--', '--two', 'three']}, ui.arguments) + self.assertEqual({"args": ["one", "--", "--two", "three"]}, ui.arguments) def test_run_subunit_option(self): - ui, cmd = get_test_ui_and_cmd(options=[('subunit', True)]) + ui, cmd = get_test_ui_and_cmd(options=[("subunit", True)]) self.assertEqual(True, ui.options.subunit) def test_dash_dash_help_shows_help(self): bytestream = BytesIO() - stdout = TextIOWrapper(bytestream, 'utf8', line_buffering=True) + stdout = TextIOWrapper(bytestream, "utf8", line_buffering=True) stdin = StringIO() stderr = StringIO() - ui = cli.UI(['--help'], stdin, stdout, stderr) + ui = cli.UI(["--help"], stdin, stdout, stderr) cmd = commands.Command(ui) - cmd.args = [arguments.string.StringArgument('foo')] + cmd.args = [arguments.string.StringArgument("foo")] cmd.name = "bar" # By definition SystemExit is not caught by 'except Exception'. try: @@ -268,76 +279,87 @@ def test_dash_dash_help_shows_help(self): exc_info = sys.exc_info() self.assertThat(exc_info, MatchesException(SystemExit(0))) else: - self.fail('ui.set_command did not raise') - self.assertThat(bytestream.getvalue().decode('utf8'), - DocTestMatches("""Usage: run.py bar [options] foo + self.fail("ui.set_command did not raise") + self.assertThat( + bytestream.getvalue().decode("utf8"), + DocTestMatches( + """Usage: run.py bar [options] foo ... A command that can be run... ... -d HERE, --here=HERE... -...""", doctest.ELLIPSIS)) +...""", + doctest.ELLIPSIS, + ), + ) -class TestCLISummary(TestCase): +class TestCLISummary(TestCase): def get_summary(self, successful, tests, tests_delta, time, time_delta, values): """Get the summary that would be output for successful & values.""" ui, cmd = get_test_ui_and_cmd() return ui._format_summary( - successful, tests, tests_delta, time, time_delta, values) + successful, tests, tests_delta, time, time_delta, values + ) def test_success_only(self): x = self.get_summary(True, None, None, None, None, []) - self.assertEqual('PASSED', x) + self.assertEqual("PASSED", x) def test_failure_only(self): x = self.get_summary(False, None, None, None, None, []) - self.assertEqual('FAILED', x) + self.assertEqual("FAILED", x) def test_time(self): x = self.get_summary(True, None, None, 3.4, None, []) - self.assertEqual('Ran tests in 3.400s\nPASSED', x) + self.assertEqual("Ran tests in 3.400s\nPASSED", x) def test_time_with_delta(self): x = self.get_summary(True, None, None, 3.4, 0.1, []) - self.assertEqual('Ran tests in 3.400s (+0.100s)\nPASSED', x) + self.assertEqual("Ran tests in 3.400s (+0.100s)\nPASSED", x) def test_tests_run(self): x = self.get_summary(True, 34, None, None, None, []) - self.assertEqual('Ran 34 tests\nPASSED', x) + self.assertEqual("Ran 34 tests\nPASSED", x) def test_tests_run_with_delta(self): x = self.get_summary(True, 34, 5, None, None, []) - self.assertEqual('Ran 34 (+5) tests\nPASSED', x) + self.assertEqual("Ran 34 (+5) tests\nPASSED", x) def test_tests_and_time(self): x = self.get_summary(True, 34, -5, 3.4, 0.1, []) - self.assertEqual('Ran 34 (-5) tests in 3.400s (+0.100s)\nPASSED', x) + self.assertEqual("Ran 34 (-5) tests in 3.400s (+0.100s)\nPASSED", x) def test_other_values(self): x = self.get_summary( - True, None, None, None, None, [('failures', 12, -1), ('errors', 13, 2)]) - self.assertEqual('PASSED (failures=12 (-1), errors=13 (+2))', x) + True, None, None, None, None, [("failures", 12, -1), ("errors", 13, 2)] + ) + self.assertEqual("PASSED (failures=12 (-1), errors=13 (+2))", x) def test_values_no_delta(self): x = self.get_summary( - True, None, None, None, None, - [('failures', 12, None), ('errors', 13, None)]) - self.assertEqual('PASSED (failures=12, errors=13)', x) + True, None, None, None, None, [("failures", 12, None), ("errors", 13, None)] + ) + self.assertEqual("PASSED (failures=12, errors=13)", x) def test_combination(self): x = self.get_summary( - True, 34, -5, 3.4, 0.1, [('failures', 12, -1), ('errors', 13, 2)]) + True, 34, -5, 3.4, 0.1, [("failures", 12, -1), ("errors", 13, 2)] + ) self.assertEqual( - ('Ran 34 (-5) tests in 3.400s (+0.100s)\n' - 'PASSED (failures=12 (-1), errors=13 (+2))'), x) + ( + "Ran 34 (-5) tests in 3.400s (+0.100s)\n" + "PASSED (failures=12 (-1), errors=13 (+2))" + ), + x, + ) class TestCLITestResult(TestCase): - def make_exc_info(self): # Make an exc_info tuple for use in testing. try: - 1/0 + 1 / 0 except ZeroDivisionError: return sys.exc_info() @@ -348,54 +370,73 @@ def make_result(self, stream=None, argv=None, filter_tags=None): ui = cli.UI(argv, None, stream, None) cmd = commands.Command(ui) cmd.options = [ - optparse.Option("--subunit", action="store_true", - default=False, help="Display results in subunit format."), - ] + optparse.Option( + "--subunit", + action="store_true", + default=False, + help="Display results in subunit format.", + ), + ] ui.set_command(cmd) - return ui.make_result( - lambda: None, StubTestCommand(filter_tags=filter_tags)) + return ui.make_result(lambda: None, StubTestCommand(filter_tags=filter_tags)) def test_initial_stream(self): # CLITestResult.__init__ does not do anything to the stream it is # given. bytestream = BytesIO() - stream = TextIOWrapper(bytestream, 'utf8', line_buffering=True) + stream = TextIOWrapper(bytestream, "utf8", line_buffering=True) ui = cli.UI(None, None, None, None) cli.CLITestResult(ui, stream, lambda: None) - self.assertEqual(b'', bytestream.getvalue()) + self.assertEqual(b"", bytestream.getvalue()) def test_format_error(self): # CLITestResult formats errors by giving them a big fat line, a title # made up of their 'label' and the name of the test, another different # big fat line, and then the actual error itself. result = self.make_result()[0] - error = result._format_error('label', self, 'error text') - expected = '%s%s: %s\n%s%s' % ( - result.sep1, 'label', self.id(), result.sep2, 'error text') + error = result._format_error("label", self, "error text") + expected = "%s%s: %s\n%s%s" % ( + result.sep1, + "label", + self.id(), + result.sep2, + "error text", + ) self.assertThat(error, DocTestMatches(expected)) def test_format_error_includes_tags(self): result = self.make_result()[0] - error = result._format_error('label', self, 'error text', set(['foo'])) - expected = '%s%s: %s\ntags: foo\n%s%s' % ( - result.sep1, 'label', self.id(), result.sep2, 'error text') + error = result._format_error("label", self, "error text", set(["foo"])) + expected = "%s%s: %s\ntags: foo\n%s%s" % ( + result.sep1, + "label", + self.id(), + result.sep2, + "error text", + ) self.assertThat(error, DocTestMatches(expected)) def test_addFail_outputs_error(self): # CLITestResult.status test_status='fail' outputs the given error # immediately to the stream. bytestream = BytesIO() - stream = TextIOWrapper(bytestream, 'utf8', line_buffering=True) + stream = TextIOWrapper(bytestream, "utf8", line_buffering=True) result = self.make_result(stream)[0] - error = self.make_exc_info() - error_text = 'foo\nbar\n' + self.make_exc_info() + error_text = "foo\nbar\n" result.startTestRun() - result.status(test_id=self.id(), test_status='fail', eof=True, - file_name='traceback', mime_type='text/plain;charset=utf8', - file_bytes=error_text.encode('utf8')) + result.status( + test_id=self.id(), + test_status="fail", + eof=True, + file_name="traceback", + mime_type="text/plain;charset=utf8", + file_bytes=error_text.encode("utf8"), + ) self.assertThat( - bytestream.getvalue().decode('utf8'), - DocTestMatches(result._format_error('FAIL', self, error_text))) + bytestream.getvalue().decode("utf8"), + DocTestMatches(result._format_error("FAIL", self, error_text)), + ) def test_addFailure_handles_string_encoding(self): # CLITestResult.addFailure outputs the given error handling non-ascii @@ -404,49 +445,54 @@ def test_addFailure_handles_string_encoding(self): stream = BytesIO() result = self.make_result(stream)[0] result.startTestRun() - result.status(test_id='foo', test_status='fail', file_name='traceback', - mime_type='text/plain;charset=utf8', - file_bytes=b'-->\xe2\x80\x9c<--', eof=True) + result.status( + test_id="foo", + test_status="fail", + file_name="traceback", + mime_type="text/plain;charset=utf8", + file_bytes=b"-->\xe2\x80\x9c<--", + eof=True, + ) pattern = "...-->?<--..." self.assertThat( - stream.getvalue().decode('utf8'), - DocTestMatches(pattern, doctest.ELLIPSIS)) + stream.getvalue().decode("utf8"), DocTestMatches(pattern, doctest.ELLIPSIS) + ) def test_subunit_output(self): bytestream = BytesIO() - stream = TextIOWrapper(bytestream, 'utf8', line_buffering=True) - result = self.make_result(stream, argv=['--subunit'])[0] + stream = TextIOWrapper(bytestream, "utf8", line_buffering=True) + result = self.make_result(stream, argv=["--subunit"])[0] result.startTestRun() result.stopTestRun() - self.assertEqual(b'', bytestream.getvalue()) + self.assertEqual(b"", bytestream.getvalue()) def test_make_result_tag_filter(self): bytestream = BytesIO() - stream = TextIOWrapper(bytestream, 'utf8', line_buffering=True) - result, summary = self.make_result( - stream, filter_tags=set(['worker-0'])) + stream = TextIOWrapper(bytestream, "utf8", line_buffering=True) + result, summary = self.make_result(stream, filter_tags=set(["worker-0"])) # Generate a bunch of results with tags in the same events that # testtools generates them. - tags = set(['worker-0']) + tags = set(["worker-0"]) result.startTestRun() - result.status(test_id='pass', test_status='inprogress') - result.status(test_id='pass', test_status='success', test_tags=tags) - result.status(test_id='fail', test_status='inprogress') - result.status(test_id='fail', test_status='fail', test_tags=tags) - result.status(test_id='xfail', test_status='inprogress') - result.status(test_id='xfail', test_status='xfail', test_tags=tags) - result.status(test_id='uxsuccess', test_status='inprogress') - result.status( - test_id='uxsuccess', test_status='uxsuccess', test_tags=tags) - result.status(test_id='skip', test_status='inprogress') - result.status(test_id='skip', test_status='skip', test_tags=tags) + result.status(test_id="pass", test_status="inprogress") + result.status(test_id="pass", test_status="success", test_tags=tags) + result.status(test_id="fail", test_status="inprogress") + result.status(test_id="fail", test_status="fail", test_tags=tags) + result.status(test_id="xfail", test_status="inprogress") + result.status(test_id="xfail", test_status="xfail", test_tags=tags) + result.status(test_id="uxsuccess", test_status="inprogress") + result.status(test_id="uxsuccess", test_status="uxsuccess", test_tags=tags) + result.status(test_id="skip", test_status="inprogress") + result.status(test_id="skip", test_status="skip", test_tags=tags) result.stopTestRun() - self.assertEqual("""\ + self.assertEqual( + """\ ====================================================================== FAIL: fail tags: worker-0 ---------------------------------------------------------------------- Ran 1 tests FAILED (id=None, failures=1, skips=1) -""", bytestream.getvalue().decode('utf8')) - +""", + bytestream.getvalue().decode("utf8"), + ) diff --git a/testrepository/tests/ui/test_decorator.py b/testrepository/tests/ui/test_decorator.py index e6c6592..ca1b0c1 100644 --- a/testrepository/tests/ui/test_decorator.py +++ b/testrepository/tests/ui/test_decorator.py @@ -21,12 +21,11 @@ class TestDecoratorUI(ResourcedTestCase): - def test_options_overridable(self): - base = model.UI(options=[('partial', True), ('other', False)]) + base = model.UI(options=[("partial", True), ("other", False)]) cmd = commands.Command(base) base.set_command(cmd) - ui = decorator.UI(options={'partial':False}, decorated=base) + ui = decorator.UI(options={"partial": False}, decorated=base) internal_cmd = commands.Command(ui) ui.set_command(internal_cmd) self.assertEqual(False, ui.options.partial) diff --git a/testrepository/ui/__init__.py b/testrepository/ui/__init__.py index 3ce37c2..859d3d4 100644 --- a/testrepository/ui/__init__.py +++ b/testrepository/ui/__init__.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -25,7 +25,7 @@ from testtools import StreamResult from testrepository.results import SummarizingResult -from testrepository.utils import timedelta_to_seconds + class AbstractUI(object): """The base class for UI objects, this providers helpers and the interface. @@ -57,7 +57,7 @@ class AbstractUI(object): def _check_cmd(self): """Check that cmd is valid. This method is meant to be overridden. - + :return: True if the cmd is valid - if options and args match up with the ones supplied to the UI, and so on. """ @@ -75,7 +75,7 @@ def iter_streams(self, stream_type): method and a close method which behave as for file objects. """ for stream_spec in self.cmd.input_streams: - if '*' in stream_spec or '?' in stream_spec or '+' in stream_spec: + if "*" in stream_spec or "?" in stream_spec or "+" in stream_spec: found = stream_type == stream_spec[:-1] else: found = stream_type == stream_spec @@ -118,7 +118,7 @@ def output_rest(self, rest_string): This is typically used as the entire output for command help or documentation. - + :param rest_string: A ReST source to display. """ raise NotImplementedError(self.output_rest) @@ -172,7 +172,7 @@ def set_command(self, cmd): otherwise ensure that the information the command has declared it needs will be available. The default implementation simply sets self.cmd to cmd. - + :param cmd: A testrepository.commands.Command. """ self.cmd = cmd @@ -180,7 +180,7 @@ def set_command(self, cmd): def subprocess_Popen(self, *args, **kwargs): """Call an external process from the UI's context. - + The behaviour of this call should match the Popen process on any given platform, except that the UI can take care of any wrapping or manipulation needed to fit into its environment. @@ -228,13 +228,13 @@ def _output_summary(self, run_id): time_delta = None num_tests_run_delta = None num_failures_delta = None - values = [('id', run_id, None)] + values = [("id", run_id, None)] failures = self._summary.get_num_failures() previous_summary = self._get_previous_summary() if failures: if previous_summary: num_failures_delta = failures - previous_summary.get_num_failures() - values.append(('failures', failures, num_failures_delta)) + values.append(("failures", failures, num_failures_delta)) if previous_summary: num_tests_run_delta = self._summary.testsRun - previous_summary.testsRun if time: @@ -243,10 +243,15 @@ def _output_summary(self, run_id): time_delta = time - previous_time_taken skips = len(self._summary.skipped) if skips: - values.append(('skips', skips, None)) + values.append(("skips", skips, None)) self.ui.output_summary( - not bool(failures), self._summary.testsRun, num_tests_run_delta, - time, time_delta, values) + not bool(failures), + self._summary.testsRun, + num_tests_run_delta, + time, + time_delta, + values, + ) def startTestRun(self): super(BaseUITestResult, self).startTestRun() diff --git a/testrepository/ui/cli.py b/testrepository/ui/cli.py index d045c5d..5efc13e 100644 --- a/testrepository/ui/cli.py +++ b/testrepository/ui/cli.py @@ -14,14 +14,12 @@ """A command line UI for testrepository.""" -import io import os import signal import subunit import sys import testtools -from testtools import ExtendedToStreamDecorator, StreamToExtendedDecorator from testtools.compat import unicode_output_stream from testrepository import ui @@ -33,7 +31,7 @@ class CLITestResult(ui.BaseUITestResult): def __init__(self, ui, get_id, stream, previous_run=None, filter_tags=None): """Construct a CLITestResult writing to stream. - + :param filter_tags: Tags that should be used to filter tests out. When a tag in this set is present on a test outcome, the test is not counted towards the test run count. If the test errors, then it is @@ -41,35 +39,57 @@ def __init__(self, ui, get_id, stream, previous_run=None, filter_tags=None): """ super(CLITestResult, self).__init__(ui, get_id, previous_run) self.stream = unicode_output_stream(stream) - self.sep1 = '=' * 70 + '\n' - self.sep2 = '-' * 70 + '\n' + self.sep1 = "=" * 70 + "\n" + self.sep2 = "-" * 70 + "\n" self.filter_tags = filter_tags or frozenset() - self.filterable_states = set(['success', 'uxsuccess', 'xfail', 'skip']) + self.filterable_states = set(["success", "uxsuccess", "xfail", "skip"]) def _format_error(self, label, test, error_text, test_tags=None): test_tags = test_tags or () - tags = ' '.join(test_tags) + tags = " ".join(test_tags) if tags: - tags = 'tags: %s\n' % tags - return ''.join([ - self.sep1, - '%s: %s\n' % (label, test.id()), - tags, - self.sep2, - error_text, - ]) - - def status(self, test_id=None, test_status=None, test_tags=None, - runnable=True, file_name=None, file_bytes=None, eof=False, - mime_type=None, route_code=None, timestamp=None): - super(CLITestResult, self).status(test_id=test_id, - test_status=test_status, test_tags=test_tags, runnable=runnable, - file_name=file_name, file_bytes=file_bytes, eof=eof, - mime_type=mime_type, route_code=route_code, timestamp=timestamp) - if test_status == 'fail': + tags = "tags: %s\n" % tags + return "".join( + [ + self.sep1, + "%s: %s\n" % (label, test.id()), + tags, + self.sep2, + error_text, + ] + ) + + def status( + self, + test_id=None, + test_status=None, + test_tags=None, + runnable=True, + file_name=None, + file_bytes=None, + eof=False, + mime_type=None, + route_code=None, + timestamp=None, + ): + super(CLITestResult, self).status( + test_id=test_id, + test_status=test_status, + test_tags=test_tags, + runnable=runnable, + file_name=file_name, + file_bytes=file_bytes, + eof=eof, + mime_type=mime_type, + route_code=route_code, + timestamp=timestamp, + ) + if test_status == "fail": self.stream.write( - self._format_error('FAIL', *(self._summary.errors[-1]), - test_tags=test_tags)) + self._format_error( + "FAIL", *(self._summary.errors[-1]), test_tags=test_tags + ) + ) if test_status not in self.filterable_states: return if test_tags and test_tags.intersection(self.filter_tags): @@ -98,13 +118,12 @@ def _iter_streams(self, stream_type): # moment - as there is only one stdin and alternate streams are not yet # configurable in the CLI. first_stream_type = self.cmd.input_streams[0] - if (stream_type != first_stream_type - and stream_type != first_stream_type[:-1]): + if stream_type != first_stream_type and stream_type != first_stream_type[:-1]: return yield subunit.make_stream_binary(self._stdin) def make_result(self, get_id, test_command, previous_run=None): - if getattr(self.options, 'subunit', False): + if getattr(self.options, "subunit", False): serializer = subunit.StreamResultToBytes(self._stdout) # By pass user transforms - just forward it all, result = serializer @@ -116,34 +135,36 @@ def make_result(self, get_id, test_command, previous_run=None): else: # Apply user defined transforms. filter_tags = test_command.get_filter_tags() - output = CLITestResult(self, get_id, self._stdout, previous_run, - filter_tags=filter_tags) + output = CLITestResult( + self, get_id, self._stdout, previous_run, filter_tags=filter_tags + ) summary = output._summary return output, summary def output_error(self, error_tuple): - if 'TESTR_PDB' in os.environ: + if "TESTR_PDB" in os.environ: import traceback - self._stderr.write(''.join(traceback.format_tb(error_tuple[2]))) - self._stderr.write('\n') + + self._stderr.write("".join(traceback.format_tb(error_tuple[2]))) + self._stderr.write("\n") import pdb + p = pdb.Pdb(stdin=self._stdin, stdout=self._stdout) p.reset() p.interaction(None, error_tuple[2]) error_type = str(error_tuple[1]) - self._stderr.write(error_type + '\n') + self._stderr.write(error_type + "\n") def output_rest(self, rest_string): self._stdout.write(rest_string) - if not rest_string.endswith('\n'): - self._stdout.write('\n') + if not rest_string.endswith("\n"): + self._stdout.write("\n") def output_stream(self, stream): if not self._binary_stdout: self._binary_stdout = subunit.make_stream_binary(self._stdout) contents = stream.read(65536) - assert type(contents) is bytes, \ - "Bad stream contents %r" % type(contents) + assert type(contents) is bytes, "Bad stream contents %r" % type(contents) # If there are unflushed bytes in the text wrapper, we need to sync.. self._stdout.flush() while contents: @@ -168,40 +189,41 @@ def output_table(self, table): widths[idx] = len(column) # Show a row outputs = [] + def show_row(row): for idx, column in enumerate(row): outputs.append(column) if idx == len(row) - 1: - outputs.append('\n') + outputs.append("\n") return # spacers for the next column - outputs.append(' '*(widths[idx]-len(column))) - outputs.append(' ') + outputs.append(" " * (widths[idx] - len(column))) + outputs.append(" ") + show_row(contents[0]) # title spacer for idx, width in enumerate(widths): - outputs.append('-'*width) + outputs.append("-" * width) if idx == len(widths) - 1: - outputs.append('\n') + outputs.append("\n") continue - outputs.append(' ') + outputs.append(" ") for row in contents[1:]: show_row(row) - self._stdout.write(''.join(outputs)) + self._stdout.write("".join(outputs)) def output_tests(self, tests): for test in tests: self._stdout.write(test.id()) - self._stdout.write('\n') + self._stdout.write("\n") def output_values(self, values): outputs = [] for label, value in values: - outputs.append('%s=%s' % (label, value)) - self._stdout.write('%s\n' % ', '.join(outputs)) + outputs.append("%s=%s" % (label, value)) + self._stdout.write("%s\n" % ", ".join(outputs)) - def _format_summary(self, successful, tests, tests_delta, - time, time_delta, values): + def _format_summary(self, successful, tests, tests_delta, time, time_delta, values): # We build the string by appending to a list of strings and then # joining trivially at the end. Avoids expensive string concatenation. summary = [] @@ -220,46 +242,58 @@ def _format_summary(self, successful, tests, tests_delta, if summary: a("\n") if successful: - a('PASSED') + a("PASSED") else: - a('FAILED') + a("FAILED") if values: - a(' (') + a(" (") values_strings = [] for name, value, delta in values: - value_str = '%s=%s' % (name, value) + value_str = "%s=%s" % (name, value) if delta: - value_str += ' (%+d)' % (delta,) + value_str += " (%+d)" % (delta,) values_strings.append(value_str) - a(', '.join(values_strings)) - a(')') - return ''.join(summary) + a(", ".join(values_strings)) + a(")") + return "".join(summary) - def output_summary(self, successful, tests, tests_delta, - time, time_delta, values): + def output_summary(self, successful, tests, tests_delta, time, time_delta, values): self._stdout.write( self._format_summary( - successful, tests, tests_delta, time, time_delta, values)) - self._stdout.write('\n') + successful, tests, tests_delta, time, time_delta, values + ) + ) + self._stdout.write("\n") def _check_cmd(self): parser = get_command_parser(self.cmd) - parser.add_option("-d", "--here", dest="here", + parser.add_option( + "-d", + "--here", + dest="here", help="Set the directory or url that a command should run from. " "This affects all default path lookups but does not affect paths " - "supplied to the command.", default=os.getcwd(), type=str) - parser.add_option("-q", "--quiet", action="store_true", default=False, + "supplied to the command.", + default=os.getcwd(), + type=str, + ) + parser.add_option( + "-q", + "--quiet", + action="store_true", + default=False, help="Turn off output other than the primary output for a command " - "and any errors.") + "and any errors.", + ) # yank out --, as optparse makes it silly hard to just preserve it. try: - where_dashdash = self._argv.index('--') + where_dashdash = self._argv.index("--") opt_argv = self._argv[:where_dashdash] other_args = self._argv[where_dashdash:] except ValueError: opt_argv = self._argv other_args = [] - if '-h' in opt_argv or '--help' in opt_argv or '-?' in opt_argv: + if "-h" in opt_argv or "--help" in opt_argv or "-?" in opt_argv: self.output_rest(parser.format_help()) # Fugly, but its what optparse does: we're just overriding the # output path. @@ -290,8 +324,9 @@ def _clear_SIGPIPE(self): def subprocess_Popen(self, *args, **kwargs): import subprocess + if os.name == "posix": # GZ 2010-12-04: Should perhaps check for existing preexec_fn and # combine so both will get called. - kwargs['preexec_fn'] = self._clear_SIGPIPE + kwargs["preexec_fn"] = self._clear_SIGPIPE return subprocess.Popen(*args, **kwargs) diff --git a/testrepository/ui/decorator.py b/testrepository/ui/decorator.py index 80edc86..b3dd1a4 100644 --- a/testrepository/ui/decorator.py +++ b/testrepository/ui/decorator.py @@ -1,11 +1,11 @@ # # Copyright (c) 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -22,16 +22,16 @@ class UI(ui.AbstractUI): """A decorating UI. - + Not comprehensive yet - only supports overriding input streams. Note that because UI objects carry command specific state only specific things can - be delegated - option/argument lookup, streams. set_command for instance, + be delegated - option/argument lookup, streams. set_command for instance, does not get passed to the decorated UI unless it has not been initialised. """ def __init__(self, input_streams=None, options={}, decorated=None): """Create a decorating UI. - + :param input_streams: The input steams to present from this UI. Should be a list of (stream name, file) tuples. :param options: Dict of options to replace in the base UI. These are @@ -42,8 +42,7 @@ def __init__(self, input_streams=None, options={}, decorated=None): self.input_streams = {} if input_streams: for stream_type, stream_value in input_streams: - self.input_streams.setdefault(stream_type, []).append( - stream_value) + self.input_streams.setdefault(stream_type, []).append(stream_value) self._options = options @property @@ -57,14 +56,15 @@ def here(self): def _iter_streams(self, stream_type): streams = self.input_streams.pop(stream_type, []) for stream_value in streams: - if getattr(stream_value, 'read', None): + if getattr(stream_value, "read", None): yield stream_value else: yield BytesIO(stream_value) def make_result(self, get_id, test_command, previous_run=None): return self._decorated.make_result( - get_id, test_command, previous_run=previous_run) + get_id, test_command, previous_run=previous_run + ) def output_error(self, error_tuple): return self._decorated.output_error(error_tuple) @@ -86,22 +86,22 @@ def output_values(self, values): def output_summary(self, successful, tests, tests_delta, time, time_delta, values): return self._decorated.output_summary( - successful, tests, tests_delta, time, time_delta, values) + successful, tests, tests_delta, time, time_delta, values + ) def set_command(self, cmd): self.cmd = cmd result = True - if getattr(self._decorated, 'cmd', None) is None: + if getattr(self._decorated, "cmd", None) is None: result = self._decorated.set_command(cmd) # Pickup the repository factory from the decorated UI's command. cmd.repository_factory = self._decorated.cmd.repository_factory # Merge options self.options = optparse.Values() for option in dir(self._decorated.options): - if option.startswith('_'): + if option.startswith("_"): continue - setattr(self.options, option, - getattr(self._decorated.options, option)) + setattr(self.options, option, getattr(self._decorated.options, option)) for option, value in self._options.items(): setattr(self.options, option, value) return result diff --git a/testrepository/ui/model.py b/testrepository/ui/model.py index 6f441ad..5703617 100644 --- a/testrepository/ui/model.py +++ b/testrepository/ui/model.py @@ -1,11 +1,11 @@ # # Copyright (c) 2009 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -17,7 +17,6 @@ from io import BytesIO import optparse -import testtools from testrepository import ui @@ -32,15 +31,14 @@ def __init__(self, ui): self.stdout = BytesIO() def communicate(self): - self.ui.outputs.append(('communicate',)) - return self.stdout.getvalue(), b'' + self.ui.outputs.append(("communicate",)) + return self.stdout.getvalue(), b"" def wait(self): return self.returncode class TestSuiteModel(object): - def __init__(self): self._results = [] @@ -53,37 +51,61 @@ def run(self, result): class TestResultModel(ui.BaseUITestResult): - def __init__(self, ui, get_id, previous_run=None): super(TestResultModel, self).__init__(ui, get_id, previous_run) self._suite = TestSuiteModel() - def status(self, test_id=None, test_status=None, test_tags=None, - runnable=True, file_name=None, file_bytes=None, eof=False, - mime_type=None, route_code=None, timestamp=None): - super(TestResultModel, self).status(test_id=test_id, - test_status=test_status, test_tags=test_tags, runnable=runnable, - file_name=file_name, file_bytes=file_bytes, eof=eof, - mime_type=mime_type, route_code=route_code, timestamp=timestamp) - self._suite.recordResult('status', test_id, test_status) + def status( + self, + test_id=None, + test_status=None, + test_tags=None, + runnable=True, + file_name=None, + file_bytes=None, + eof=False, + mime_type=None, + route_code=None, + timestamp=None, + ): + super(TestResultModel, self).status( + test_id=test_id, + test_status=test_status, + test_tags=test_tags, + runnable=runnable, + file_name=file_name, + file_bytes=file_bytes, + eof=eof, + mime_type=mime_type, + route_code=route_code, + timestamp=timestamp, + ) + self._suite.recordResult("status", test_id, test_status) def stopTestRun(self): if self.ui.options.quiet: return - self.ui.outputs.append(('results', self._suite)) + self.ui.outputs.append(("results", self._suite)) return super(TestResultModel, self).stopTestRun() class UI(ui.AbstractUI): """A object based UI. - + This is useful for reusing the Command objects that provide a simplified interaction model with the domain logic from python. It is used for testing testrepository commands. """ - def __init__(self, input_streams=None, options=(), args=(), - here='memory:', proc_outputs=(), proc_results=()): + def __init__( + self, + input_streams=None, + options=(), + args=(), + here="memory:", + proc_outputs=(), + proc_results=(), + ): """Create a model UI. :param input_streams: A list of stream name, (file or bytes) tuples to @@ -100,9 +122,8 @@ def __init__(self, input_streams=None, options=(), args=(), if input_streams: for stream_type, stream_value in input_streams: if isinstance(stream_value, str) and str is not bytes: - raise Exception('bad stream_value') - self.input_streams.setdefault(stream_type, []).append( - stream_value) + raise Exception("bad stream_value") + self.input_streams.setdefault(stream_type, []).append(stream_value) self.here = here self.unparsed_opts = options self.outputs = [] @@ -119,10 +140,10 @@ def _check_cmd(self): for option, value in options: setattr(self.options, option, value) seen_options.add(option) - if not 'quiet' in seen_options: - setattr(self.options, 'quiet', False) + if "quiet" not in seen_options: + setattr(self.options, "quiet", False) for option in self.cmd.options: - if not option.dest in seen_options: + if option.dest not in seen_options: setattr(self.options, option.dest, option.default) args = list(self.unparsed_args) parsed_args = {} @@ -139,7 +160,7 @@ def _check_cmd(self): def _iter_streams(self, stream_type): streams = self.input_streams.pop(stream_type, []) for stream_value in streams: - if getattr(stream_value, 'read', None): + if getattr(stream_value, "read", None): yield stream_value else: yield BytesIO(stream_value) @@ -149,31 +170,32 @@ def make_result(self, get_id, test_command, previous_run=None): return result, result._summary def output_error(self, error_tuple): - self.outputs.append(('error', error_tuple)) + self.outputs.append(("error", error_tuple)) def output_rest(self, rest_string): - self.outputs.append(('rest', rest_string)) + self.outputs.append(("rest", rest_string)) def output_stream(self, stream): - self.outputs.append(('stream', stream.read())) + self.outputs.append(("stream", stream.read())) def output_table(self, table): - self.outputs.append(('table', table)) + self.outputs.append(("table", table)) def output_tests(self, tests): """Output a list of tests.""" - self.outputs.append(('tests', tests)) + self.outputs.append(("tests", tests)) def output_values(self, values): - self.outputs.append(('values', values)) + self.outputs.append(("values", values)) def output_summary(self, successful, tests, tests_delta, time, time_delta, values): self.outputs.append( - ('summary', successful, tests, tests_delta, time, time_delta, values)) + ("summary", successful, tests, tests_delta, time, time_delta, values) + ) def subprocess_Popen(self, *args, **kwargs): # Really not an output - outputs should be renamed to events. - self.outputs.append(('popen', args, kwargs)) + self.outputs.append(("popen", args, kwargs)) result = ProcessModel(self) if self.proc_outputs: result.stdout = BytesIO(self.proc_outputs.pop(0)) diff --git a/testrepository/utils.py b/testrepository/utils.py index dcca004..bef5cea 100644 --- a/testrepository/utils.py +++ b/testrepository/utils.py @@ -1,7 +1,5 @@ - def timedelta_to_seconds(delta): - """Return the number of seconds that make up the duration of a timedelta. - """ + """Return the number of seconds that make up the duration of a timedelta.""" return ( - (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10**6) - / float(10**6)) + delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10**6 + ) / float(10**6)