From 3090406a229f465522944b385e2d4d41b044be73 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Mon, 24 Jul 2017 13:23:37 -0400 Subject: [PATCH] Add a real python api for running commands This commit adds a real api for running the stestr commands. Instead of just having a magic run() function that takes in an tuple of a argparse Namespace and a list of undefined arguments this migrates all the real work into a function that has properly defined kwargs. The run() command is then relegated to just convert the Namespace into a dict and pass the arguments into that real function. This enables external programs to just call the new functions with defined args and run commands exactly like on the cli, but with a defined python interface. It makes everything a lot easier for python consumption. The tradeoff here though is that everything is bit more verbose, but that's the cost of being explicit with a defined interface. As a side effect of this change instead of passing that Namespace object around between all the lower layers real interfaces have to be defined for all the functions. This means a ton of new kwargs, but again this is better in the long run because it means we have defined interfaces for all the functions. Closes Issue #8 --- stestr/commands/failing.py | 30 +++++- stestr/commands/init.py | 20 +++- stestr/commands/last.py | 22 +++- stestr/commands/list.py | 61 +++++++++-- stestr/commands/load.py | 56 +++++----- stestr/commands/run.py | 174 ++++++++++++++++++++++++------- stestr/commands/slowest.py | 20 +++- stestr/commands/stats.py | 15 ++- stestr/config_file.py | 70 +++++++++---- stestr/test_listing_fixture.py | 45 +++++--- stestr/tests/test_config_file.py | 26 ++--- 11 files changed, 403 insertions(+), 136 deletions(-) diff --git a/stestr/commands/failing.py b/stestr/commands/failing.py index 3bfa30a3..bc81ef25 100644 --- a/stestr/commands/failing.py +++ b/stestr/commands/failing.py @@ -59,13 +59,35 @@ def _get_id(): def run(arguments): args = arguments[0] - repo = util.get_repo_open(args.repo_type, args.repo_url) + return failing(repo_type=args.repo_type, repo_url=args.repo_url, + list_tests=args.list, subunit=args.subunit) + + +def failing(repo_type='file', repo_url=None, list_tests=False, subunit=False): + """Return the failing tests from the most recent run in the repository + + Note this function depends on the cwd for the repository if `repo_type` is + set to file and `repo_url` is not specified it will use the repository + located at CWD/.stestr + + :param str repo_type: This is the type of repository to use. Valid choices + are 'file' and 'sql'. + :param str repo_url: The url of the repository to use. + :param bool list_test: Show only a list of failing tests. + :param bool subunit: Show output as a subunit stream. + + """ + if repo_type not in ['file', 'sql']: + print('Repository type %s is not a type' % repo_type) + return 1 + + repo = util.get_repo_open(repo_type, repo_url) run = repo.get_failing() - if args.subunit: + if subunit: return _show_subunit(run) case = run.get_test() failed = False - result, summary = _make_result(repo, list_tests=args.list) + result, summary = _make_result(repo, list_tests=list_tests) result.startTestRun() try: case.run(result) @@ -76,7 +98,7 @@ def run(arguments): result = 1 else: result = 0 - if args.list: + if list_tests: failing_tests = [ test for test, _ in summary.errors + summary.failures] output.output_tests(failing_tests) diff --git a/stestr/commands/init.py b/stestr/commands/init.py index 497791e3..0a81578a 100644 --- a/stestr/commands/init.py +++ b/stestr/commands/init.py @@ -15,8 +15,24 @@ from stestr.repository import util -def run(args): - util.get_repo_initialise(args[0].repo_type, args[0].repo_url) +def run(arguments): + args = arguments[0] + init(args.repo_type, args.repo_url) + + +def init(repo_type='file', repo_url=None): + """Initialize a new repository + + Note this function depends on the cwd for the repository if `repo_type` is + set to file and `repo_url` is not specified it will use the repository + located at CWD/.stestr + + :param str repo_type: This is the type of repository to use. Valid choices + are 'file' and 'sql'. + :param str repo_url: The url of the repository to use. + """ + + util.get_repo_initialise(repo_type, repo_url) def set_cli_opts(parser): diff --git a/stestr/commands/last.py b/stestr/commands/last.py index d7c9d322..d813405d 100644 --- a/stestr/commands/last.py +++ b/stestr/commands/last.py @@ -40,16 +40,32 @@ def set_cli_opts(parser): def run(arguments): args = arguments[0] - repo = util.get_repo_open(args.repo_type, args.repo_url) + return last(repo_type=args.repo_type, repo_url=args.repo_url, + subunit=args.subunit) + + +def last(repo_type='file', repo_url=None, subunit=False): + """Show the last run loaded into a a repository + + Note this function depends on the cwd for the repository if `repo_type` is + set to file and `repo_url` is not specified it will use the repository + located at CWD/.stestr + + :param str repo_type: This is the type of repository to use. Valid choices + are 'file' and 'sql'. + :param str repo_url: The url of the repository to use. + :param bool subunit: Show output as a subunit stream. + """ + repo = util.get_repo_open(repo_type, repo_url) latest_run = repo.get_latest_run() - if args.subunit: + if subunit: stream = latest_run.get_subunit_stream() output.output_stream(stream) # Exits 0 if we successfully wrote the stream. return 0 case = latest_run.get_test() try: - if args.repo_type == 'file': + if repo_type == 'file': previous_run = repo.get_test_run(repo.latest_id() - 1) # TODO(mtreinish): add a repository api to get the previous_run to # unify this logic diff --git a/stestr/commands/list.py b/stestr/commands/list.py index e34b7115..f94b6931 100644 --- a/stestr/commands/list.py +++ b/stestr/commands/list.py @@ -45,16 +45,59 @@ def set_cli_opts(parser): ' white selection, which by default is everything.') -def run(args): - _args = args[0] +def run(arguments): + args = arguments[0] + filters = arguments[1] + return list_command(config=args.config, repo_type=args.repo_type, + repo_url=args.repo_url, group_regex=args.group_regex, + blacklist_file=args.blacklist_file, + whitelist_file=args.whitelist_file, + black_regex=args.black_regex, + filters=filters) + + +def list_command(config='.stestr.conf', repo_type='file', repo_url=None, + test_path=None, top_dir=None, group_regex=None, + blacklist_file=None, whitelist_file=None, black_regex=None, + filters=None): + """Print a list of test_ids for a project + + This function will print the test_ids for tests in a project. You can + filter the output just like with the run command to see exactly what + will be run. + + :param str config: The path to the stestr config file. Must be a string. + :param str repo_type: This is the type of repository to use. Valid choices + are 'file' and 'sql'. + :param str repo_url: The url of the repository to use. + :param str test_path: Set the test path to use for unittest discovery. + If both this and the corresponding config file option are set, this + value will be used. + :param str top_dir: The top dir to use for unittest discovery. This takes + precedence over the value in the config file. (if one is present in + the config file) + :param str group_regex: Set a group regex to use for grouping tests + together in the stestr scheduler. If both this and the corresponding + config file option are set this value will be used. + :param str blacklist_file: Path to a blacklist file, this file contains a + separate regex exclude on each newline. + :param str whitelist_file: Path to a whitelist file, this file contains a + separate regex on each newline. + :param str black_regex: Test rejection regex. If a test cases name matches + on re.search() operation, it will be removed from the final test list. + :param list filters: A list of string regex filters to initially apply on + the test list. Tests that match any of the regexes will be used. + (assuming any other filtering specified also uses it) + """ ids = None - filters = None - if args[1]: - filters = args[1] - conf = config_file.TestrConf(_args.config) - cmd = conf.get_run_command(_args, ids, filters) - not_filtered = filters is None and _args.blacklist_file is None\ - and _args.whitelist_file is None and _args.black_regex is None + conf = config_file.TestrConf(config) + cmd = conf.get_run_command( + regexes=filters, repo_type=repo_type, + repo_url=repo_url, group_regex=group_regex, + blacklist_file=blacklist_file, whitelist_file=whitelist_file, + black_regex=black_regex) + not_filtered = filters is None and blacklist_file is None\ + and whitelist_file is None and black_regex is None try: cmd.setUp() # List tests if the fixture has not already needed to to filter. diff --git a/stestr/commands/load.py b/stestr/commands/load.py index fb2c3045..2eef631f 100644 --- a/stestr/commands/load.py +++ b/stestr/commands/load.py @@ -55,20 +55,34 @@ def get_cli_help(): def run(arguments): - load(arguments) - - -def load(arguments, in_streams=None, partial=False, subunit_out=False, - repo_type=None, repo_url=None, run_id=None): args = arguments[0] - streams = arguments[1] - if args: - repo_type = args.repo_type - repo_url = args.repo_url + load(repo_type=args.repo_type, repo_url=args.repo_url, + partial=args.partial, subunit_out=args.subunit, + force_init=args.force_init, streams=arguments[1]) + + +def load(force_init=False, in_streams=None, + partial=False, subunit_out=False, repo_type='file', repo_url=None, + run_id=None, streams=None): + """Load subunit streams into a repository + + :param bool force_init: Initialize the specifiedrepository if it hasn't + been created. + :param list in_streams: A list of file objects that will be saved into the + repository + :param bool partial: Specify the input is a partial stream + :param bool subunit_out: Output the subunit stream to stdout + :param str repo_type: This is the type of repository to use. Valid choices + are 'file' and 'sql'. + :param str repo_url: The url of the repository to use. + :param run_id: The optional run id to save the subunit stream to. + :param list streams: A list of file paths to read for the input streams. + """ + try: repo = util.get_repo_open(repo_type, repo_url) except repository.RepositoryNotFound: - if args.force_init: + if force_init: repo = util.get_repo_initialise(repo_type, repo_url) else: raise @@ -96,25 +110,11 @@ def make_tests(): yield (case, str(pos)) case = testtools.ConcurrentStreamTestSuite(make_tests) - # One unmodified copy of the stream to repository storage - _partial = False - if args: - _partial = getattr(args, 'partial') - # Set partial_stream if it comes in via the CLI or the kwarg - partial_stream = _partial or partial - _subunit = False - if args: - _subunit = getattr(args, 'subunit') - _subunit_out = _subunit or subunit_out - _run_id = None - if args: - _run_id = getattr(args, 'id') - _run_id = _run_id or run_id - if not _run_id: - inserter = repo.get_inserter(partial=partial_stream) + if not run_id: + inserter = repo.get_inserter(partial=partial) else: - inserter = repo.get_inserter(partial=partial_stream, run_id=_run_id) - if _subunit_out: + inserter = repo.get_inserter(partial=partial, run_id=run_id) + if subunit_out: output_result, summary_result = output.make_result(inserter.get_id) else: try: diff --git a/stestr/commands/run.py b/stestr/commands/run.py index f9f8b347..9fb7a662 100644 --- a/stestr/commands/run.py +++ b/stestr/commands/run.py @@ -111,26 +111,81 @@ def gather_errors(test_dict): return ids -def run(arguments): - args = arguments[0] - filters = arguments[1] or None +def run_command(config='.stestr.conf', repo_type='file', + repo_url=None, test_path=None, top_dir=None, group_regex=None, + failing=False, serial=False, concurrency=0, load_list=None, + partial=False, subunit=False, until_failure=False, + analyze_isolation=False, isolated=False, worker_path=None, + blacklist_file=None, whitelist_file=None, black_regex=None, + no_discover=False, random=False, combine=False, filters=None): + """Function to execute the run command + + This function implements the run command. It will run the tests specified + in the parameters based on the provided config file and/or arguments + specified in the way specified by the arguments. + + :param str config: The path to the stestr config file. Must be a string. + :param str repo_type: This is the type of repository to use. Valid choices + are 'file' and 'sql'. + :param str repo_url: The url of the repository to use. + :param str test_path: Set the test path to use for unittest discovery. + If both this and the corresponding config file option are set, this + value will be used. + :param str top_dir: The top dir to use for unittest discovery. This takes + precedence over the value in the config file. (if one is present in + the config file) + :param str group_regex: Set a group regex to use for grouping tests + together in the stestr scheduler. If both this and the corresponding + config file option are set this value will be used. + :param bool failing: Run only tests known to be failing. + :param bool serial: Run tests serially + :param int concurrency: "How many processes to use. The default (0) + autodetects your CPU count and uses that. + :param str load_list: The path to a list of test_ids. If specified only + tests listed in the named file will be run. + :param bool partial: Only some tests will be run. Implied by `--failing`. + :param bool subunit: Display results in subunit format. + :param bool until_failure: Repeat the run again and again until failure + occurs. + :param bool analyze_isolation: Search the last test run for 2-test test + isolation interactions. + :param bool isolated: Run each test id in a separate test runner. + :param str worker_path: Optional path of a manual worker grouping file + to use for the run. + :param str blacklist_file: Path to a blacklist file, this file contains a + separate regex exclude on each newline. + :param str whitelist_file: Path to a whitelist file, this file contains a + separate regex on each newline. + :param str black_regex: Test rejection regex. If a test cases name matches + on re.search() operation, it will be removed from the final test list. + :param str no_discover: Takes in a single test_id to bypasses test + discover and just execute the test specified. A file name may be used + in place of a test name. + :param bool random: Randomize the test order after they are partitioned + into separate workers + :param bool combine: Combine the results from the test run with the + last run in the repository + :param list filters: A list of string regex filters to initially apply on + the test list. Tests that match any of the regexes will be used. + (assuming any other filtering specified also uses it) + """ try: - repo = util.get_repo_open(args.repo_type, args.repo_url) + repo = util.get_repo_open(repo_type, repo_url) # If a repo is not found, and there a testr config exists just create it except repository.RepositoryNotFound: - if not os.path.isfile(args.config) and not args.test_path: + if not os.path.isfile(config) and not test_path: msg = ("No config file found and --test-path not specified. " "Either create or specify a .stestr.conf or use " "--test-path ") print(msg) exit(1) - repo = util.get_repo_initialise(args.repo_type, args.repo_url) + repo = util.get_repo_initialise(repo_type, repo_url) combine_id = None - if args.combine: + if combine: latest_id = repo.latest_id() combine_id = six.text_type(latest_id) - if args.no_discover: - ids = args.no_discover + if no_discover: + ids = no_discover if ids.find('/') != -1: root, _ = os.path.splitext(ids) ids = root.replace('/', '.') @@ -140,12 +195,12 @@ def run_tests(): run_proc = [('subunit', output.ReturnCodeToSubunit( subprocess.Popen(run_cmd, shell=True, stdout=subprocess.PIPE)))] - return load.load((None, None), in_streams=run_proc, - partial=args.partial, subunit_out=args.subunit, - repo_type=args.repo_type, - repo_url=args.repo_url, run_id=combine_id) + return load.load(in_streams=run_proc, + partial=partial, subunit_out=subunit, + repo_type=repo_type, + repo_url=repo_url, run_id=combine_id) - if not args.until_failure: + if not until_failure: return run_tests() else: result = run_tests() @@ -153,14 +208,14 @@ def run_tests(): result = run_tests() return result - if args.failing or args.analyze_isolation: + if failing or analyze_isolation: ids = _find_failing(repo) else: ids = None - if args.load_list: + if load_list: list_ids = set() # Should perhaps be text.. currently does its own decode. - with open(args.load_list, 'rb') as list_file: + with open(load_list, 'rb') as list_file: list_ids = set(parse_list(list_file.read())) if ids is None: # Use the supplied list verbatim @@ -170,10 +225,15 @@ def run_tests(): # that are both failing and listed. ids = list_ids.intersection(ids) - conf = config_file.TestrConf(args.config) - if not args.analyze_isolation: - cmd = conf.get_run_command(args, ids, filters) - if args.isolated: + conf = config_file.TestrConf(config) + if not analyze_isolation: + cmd = conf.get_run_command( + ids, regexes=filters, group_regex=group_regex, repo_type=repo_type, + repo_url=repo_url, serial=serial, worker_path=worker_path, + concurrency=concurrency, blacklist_file=blacklist_file, + black_regex=black_regex, top_dir=top_dir, test_path=test_path, + randomize=random) + if isolated: result = 0 cmd.setUp() try: @@ -182,21 +242,31 @@ def run_tests(): cmd.cleanUp() for test_id in ids: # TODO(mtreinish): add regex - cmd = conf.get_run_command(args, [test_id], filters) - run_result = _run_tests(cmd, args.failing, - args.analyze_isolation, - args.isolated, - args.until_failure, - subunit_out=args.subunit, - combine_id=combine_id) + cmd = conf.get_run_command( + [test_id], filters, group_regex=group_regex, + repo_type=repo_type, repo_url=repo_url, serial=serial, + worker_path=worker_path, concurrency=concurrency, + blacklist_file=blacklist_file, black_regex=black_regex, + randomize=random, test_path=test_path, top_dir=top_dir) + + run_result = _run_tests(cmd, failing, + analyze_isolation, + isolated, + until_failure, + subunit_out=subunit, + combine_id=combine_id, + repo_type=repo_type, + repo_url=repo_url) if run_result > result: result = run_result return result else: - return _run_tests(cmd, args.failing, args.analyze_isolation, - args.isolated, args.until_failure, - subunit_out=args.subunit, - combine_id=combine_id) + return _run_tests(cmd, failing, analyze_isolation, + isolated, until_failure, + subunit_out=subunit, + combine_id=combine_id, + repo_type=repo_type, + repo_url=repo_url) else: # Where do we source data about the cause of conflicts. # XXX: Should instead capture the run id in with the failing test @@ -208,7 +278,12 @@ def run_tests(): spurious_failures = set() for test_id in ids: # TODO(mtrienish): Add regex - cmd = conf.get_run_command(args, [test_id]) + cmd = conf.get_run_command( + [test_id], group_regex=group_regex, repo_type=repo_type, + repo_url=repo_url, serial=serial, worker_path=worker_path, + concurrency=concurrency, blacklist_file=blacklist_file, + black_regex=black_regex, randomize=random, test_path=test_path, + top_dir=top_dir) if not _run_tests(cmd): # If the test was filtered, it won't have been run. if test_id in repo.get_test_ids(repo.latest_id()): @@ -238,9 +313,13 @@ def run_tests(): check_width = int(ceil(width / 2.0)) # TODO(mtreinish): Add regex cmd = conf.get_run_command( - args, candidate_causes[bottom:bottom + check_width] - + [spurious_failure]) + + [spurious_failure], + group_regex=group_regex, repo_type=repo_type, + repo_url=repo_url, serial=serial, worker_path=worker_path, + concurrency=concurrency, blacklist_file=blacklist_file, + black_regex=black_regex, randomize=random, + test_path=test_path, top_dir=top_dir) _run_tests(cmd) # check that the test we're probing still failed - still # awkward. @@ -331,7 +410,8 @@ def map_test(test_dict): def _run_tests(cmd, failing, analyze_isolation, isolated, until_failure, - subunit_out=False, combine_id=None): + subunit_out=False, combine_id=None, repo_type='file', + repo_url=None): """Run the tests cmd was parameterised with.""" cmd.setUp() try: @@ -347,8 +427,8 @@ def run_tests(): return 0 return load.load((None, None), in_streams=run_procs, partial=partial, subunit_out=subunit_out, - repo_type=cmd.options.repo_type, - repo_url=cmd.options.repo_url, run_id=combine_id) + repo_type=repo_type, + repo_url=repo_url, run_id=combine_id) if not until_failure: return run_tests() @@ -359,3 +439,21 @@ def run_tests(): return result finally: cmd.cleanUp() + + +def run(arguments): + filters = arguments[1] or None + args = arguments[0] + + return run_command( + config=args.config, repo_type=args.repo_type, repo_url=args.repo_url, + test_path=args.test_path, top_dir=args.top_dir, + group_regex=args.group_regex, failing=args.failing, serial=args.serial, + concurrency=args.concurrency, load_list=args.load_list, + partial=args.partial, subunit=args.subunit, + until_failure=args.until_failure, + analyze_isolation=args.analyze_isolation, isolated=args.isolated, + worker_path=args.worker_path, blacklist_file=args.blacklist_file, + whitelist_file=args.whitelist_file, black_regex=args.black_regex, + no_discover=args.no_discover, random=args.random, combine=args.combine, + filters=filters) diff --git a/stestr/commands/slowest.py b/stestr/commands/slowest.py index 6abba222..42d6b2d0 100644 --- a/stestr/commands/slowest.py +++ b/stestr/commands/slowest.py @@ -50,8 +50,22 @@ def format_time(time): return times -def run(args): - repo = util.get_repo_open(args[0].repo_type, args[0].repo_url) +def run(arguments): + args = arguments[0] + return slowest(repo_type=args.repo_type, repo_url=args.repo_url, + show_all=args.all) + + +def slowest(repo_type='file', repo_url=None, show_all=False): + """Print the slowest times from the last run in the repository + + :param str repo_type: This is the type of repository to use. Valid choices + are 'file' and 'sql'. + :param str repo_url: The url of the repository to use. + :param bool show_all: Show timing for all tests. + """ + + repo = util.get_repo_open(repo_type, repo_url) try: latest_id = repo.latest_id() except KeyError: @@ -62,7 +76,7 @@ def run(args): known_times.sort(key=itemgetter(1), reverse=True) if len(known_times) > 0: # By default show 10 rows - if not args[0].all: + if not show_all: known_times = known_times[:10] known_times = format_times(known_times) header = ('Test id', 'Runtime (s)') diff --git a/stestr/commands/stats.py b/stestr/commands/stats.py index 6ad89f75..881778c3 100644 --- a/stestr/commands/stats.py +++ b/stestr/commands/stats.py @@ -25,7 +25,18 @@ def set_cli_opts(parser): pass -def run(args): - repo = util.get_repo_open(args[0].repo_type, args[0].repo_url) +def run(arguments): + args = arguments[0] + return stats(repo_type=args.repo_type, repo_url=args.repo_url) + + +def stats(repo_type='file', repo_url=None): + """Print repo stats + + :param str repo_type: This is the type of repository to use. Valid choices + are 'file' and 'sql'. + :param str repo_url: The url of the repository to use. + """ + repo = util.get_repo_open(repo_type, repo_url) sys.stdout.write('%s=%s\n' % ('runs', repo.count())) return 0 diff --git a/stestr/config_file.py b/stestr/config_file.py index 43c3132f..520bc9b7 100644 --- a/stestr/config_file.py +++ b/stestr/config_file.py @@ -32,25 +32,59 @@ def __init__(self, config_file): self.parser.read(config_file) self.config_file = config_file - def get_run_command(self, options, test_ids=None, regexes=None): + def get_run_command(self, test_ids=None, regexes=None, + test_path=None, top_dir=None, group_regex=None, + repo_type='file', repo_url=None, + serial=False, worker_path=None, + concurrency=0, blacklist_file=None, + whitelist_file=None, black_regex=None, + randomize=False): """Get a test_listing_fixture.TestListingFixture for this config file - :param options: A argparse Namespace object of the cli options that - were used in the invocation of the original CLI command that - needs a TestListingFixture + Any parameters about running tests will be used for initialize the + output fixture so the settings are correct when that fixture is used + to run tests. Parameters will take precedence over values in the config + file. + :param list test_ids: an optional list of test_ids to use when running tests :param list regexes: an optional list of regex strings to use for filtering the tests to run. See the test_filters parameter in TestListingFixture to see how this is used. + :param str test_path: Set the test path to use for unittest discovery. + If both this and the corresponding config file option are set, this + value will be used. + :param str top_dir: The top dir to use for unittest discovery. This + takes precedence over the value in the config file. (if one is + present in the config file) + :param str group_regex: Set a group regex to use for grouping tests + together in the stestr scheduler. If both this and the + corresponding config file option are set this value will be used. + :param str repo_type: This is the type of repository to use. Valid + choices are 'file' and 'sql'. + :param str repo_url: The url of the repository to use. + :param bool serial: If tests are run from the returned fixture, they + will be run serially + :param str worker_path: Optional path of a manual worker grouping file + to use for the run. + :param int concurrency: How many processes to use. The default (0) + autodetects your CPU count and uses that. + :param str blacklist_file: Path to a blacklist file, this file contains + a separate regex exclude on each newline. + :param str whitelist_file: Path to a whitelist file, this file contains + a separate regex on each newline. + :param str black_regex: Test rejection regex. If a test cases name + matches on re.search() operation, it will be removed from the final + test list. + :param bool randomize: Randomize the test order after they are + partitioned into separate workers + :returns: a TestListingFixture object for the specified config file and any arguments passed into this function :rtype: test_listing_fixture.TestListingFixture """ - if options.test_path: - test_path = options.test_path - elif self.parser.has_option('DEFAULT', 'test_path'): + if not test_path and self.parser.has_option('DEFAULT', 'test_path'): test_path = self.parser.get('DEFAULT', 'test_path') else: print("No test_path can be found in either the command line " @@ -58,11 +92,10 @@ def get_run_command(self, options, test_ids=None, regexes=None): "specify a test path either in the config file or via the " "--test-path argument".format(self.config_file)) sys.exit(1) - top_dir = './' - if options.top_dir: - top_dir = options.top_dir - elif self.parser.has_option('DEFAULT', 'top_dir'): + if not top_dir and self.parser.has_option('DEFAULT', 'top_dir'): top_dir = self.parser.get('DEFAULT', 'top_dir') + elif not top_dir: + top_dir = './' python = 'python' if sys.platform == 'win32' else '${PYTHON:-python}' command = "%s -m subunit.run discover -t" \ @@ -71,10 +104,8 @@ def get_run_command(self, options, test_ids=None, regexes=None): idoption = "--load-list $IDFILE" # If the command contains $IDOPTION read that command from config # Use a group regex if one is defined - group_regex = None - if options.group_regex: - group_regex = options.group_regex - elif self.parser.has_option('DEFAULT', 'group_regex'): + if not group_regex and self.parser.has_option('DEFAULT', + 'group_regex'): group_regex = self.parser.get('DEFAULT', 'group_regex') if group_regex: def group_callback(test_id, regex=re.compile(group_regex)): @@ -85,7 +116,10 @@ def group_callback(test_id, regex=re.compile(group_regex)): group_callback = None # Handle the results repository - repository = util.get_repo_open(options.repo_type, options.repo_url) + repository = util.get_repo_open(repo_type, repo_url) return test_listing_fixture.TestListingFixture( - test_ids, options, command, listopt, idoption, repository, - test_filters=regexes, group_callback=group_callback) + test_ids, command, listopt, idoption, repository, + test_filters=regexes, group_callback=group_callback, serial=serial, + worker_path=worker_path, concurrency=concurrency, + blacklist_file=blacklist_file, black_regex=black_regex, + whitelist_file=whitelist_file, randomize=randomize) diff --git a/stestr/test_listing_fixture.py b/stestr/test_listing_fixture.py index b9daa0da..e40bb3b3 100644 --- a/stestr/test_listing_fixture.py +++ b/stestr/test_listing_fixture.py @@ -63,11 +63,24 @@ class TestListingFixture(fixtures.Fixture): test id and returns a group id. A group id is an arbitrary value used as a dictionary key in the scheduler. All test ids with the same group id are scheduled onto the same backend test process. + :param bool serial: Run tests serially + :param path worker_path: Optional path of a manual worker grouping file + to use for the run + :param int concurrency: How many processes to use. The default (0) + autodetects your CPU count and uses that. + :param path blacklist_file: Path to a blacklist file, this file contains a + separate regex exclude on each newline. + :param path whitelist_file: Path to a whitelist file, this file contains a + separate regex on each newline. + :param boolean randomize: Randomize the test order after they are + partitioned into separate workers """ - def __init__(self, test_ids, options, cmd_template, listopt, idoption, + def __init__(self, test_ids, cmd_template, listopt, idoption, repository, parallel=True, listpath=None, - test_filters=None, group_callback=None): + test_filters=None, group_callback=None, serial=False, + worker_path=None, concurrency=0, blacklist_file=None, + black_regex=None, whitelist_file=None, randomize=False): """Create a TestListingFixture.""" self.test_ids = test_ids @@ -76,15 +89,18 @@ def __init__(self, test_ids, options, cmd_template, listopt, idoption, self.idoption = idoption self.repository = repository self.parallel = parallel - if hasattr(options, 'serial') and options.serial: + if serial: self.parallel = False self._listpath = listpath self.test_filters = test_filters self._group_callback = group_callback - self.options = options self.worker_path = None - if hasattr(options, 'worker_path') and options.worker_path: - self.worker_path = options.worker_path + self.worker_path = worker_path + self.concurrency_value = concurrency + self.blacklist_file = blacklist_file + self.whitelist_file = whitelist_file + self.black_regex = black_regex + self.randomize = randomize def setUp(self): super(TestListingFixture, self).setUp() @@ -103,8 +119,8 @@ def list_subst(match): self.concurrency = 1 else: self.concurrency = None - if hasattr(self.options, 'concurrency'): - self.concurrency = int(self.options.concurrency) + if self.concurrency_value: + self.concurrency = int(self.concurrency_value) if not self.concurrency: self.concurrency = scheduler.local_concurrency() if not self.concurrency: @@ -125,10 +141,10 @@ def list_subst(match): idlist = '' else: self.test_ids = selection.construct_list( - self.test_ids, blacklist_file=self.options.blacklist_file, - whitelist_file=self.options.whitelist_file, + self.test_ids, blacklist_file=self.blacklist_file, + whitelist_file=self.whitelist_file, regexes=self.test_filters, - black_regex=self.options.black_regex) + black_regex=self.black_regex) name = self.make_listfile() variables['IDFILE'] = name idlist = ' '.join(self.test_ids) @@ -223,12 +239,9 @@ def run_tests(self): return [run_proc] # If there is a worker path, use that to get worker groups elif self.worker_path: - randomize = False - if hasattr(self.options, 'randomize'): - randomize = self.options.randomize test_id_groups = scheduler.generate_worker_partitions( test_ids, self.worker_path, self.repository, - self._group_callback, randomize) + self._group_callback, self.randomize) # If we have multiple workers partition the tests and recursively # create single worker TestListingFixtures for each worker else: @@ -241,7 +254,7 @@ def run_tests(self): # No tests in this partition continue fixture = self.useFixture( - TestListingFixture(test_ids, self.options, + TestListingFixture(test_ids, self.template, self.listopt, self.idoption, self.repository, parallel=False)) result.extend(fixture.run_tests()) diff --git a/stestr/tests/test_config_file.py b/stestr/tests/test_config_file.py index 82974214..e3701ae7 100644 --- a/stestr/tests/test_config_file.py +++ b/stestr/tests/test_config_file.py @@ -31,24 +31,24 @@ def _check_get_run_command(self, mock_sys, mock_TestListingFixture, mock_get_repo_open, platform='win32', expected_python='python'): mock_sys.platform = platform - mock_options = mock.Mock() - mock_options.test_path = 'fake_test_path' - mock_options.top_dir = 'fake_top_dir' - mock_options.group_regex = '.*' - fixture = self._testr_conf.get_run_command(mock_options, - mock.sentinel.test_ids, - mock.sentinel.regexes) + fixture = self._testr_conf.get_run_command(test_path='fake_test_path', + top_dir='fake_top_dir', + group_regex='.*') self.assertEqual(mock_TestListingFixture.return_value, fixture) - mock_get_repo_open.assert_called_once_with(mock_options.repo_type, - mock_options.repo_url) + mock_get_repo_open.assert_called_once_with('file', + None) command = "%s -m subunit.run discover -t %s %s $LISTOPT $IDOPTION" % ( - expected_python, mock_options.top_dir, mock_options.test_path) + expected_python, 'fake_top_dir', 'fake_test_path') + # Ensure TestListingFixture is created with defaults except for where + # we specfied and with the correct python. mock_TestListingFixture.assert_called_once_with( - mock.sentinel.test_ids, mock_options, command, "--list", - "--load-list $IDFILE", mock_get_repo_open.return_value, - test_filters=mock.sentinel.regexes, group_callback=mock.ANY) + None, command, "--list", "--load-list $IDFILE", + mock_get_repo_open.return_value, black_regex=None, + blacklist_file=None, concurrency=0, group_callback=mock.ANY, + test_filters=None, randomize=False, serial=False, + whitelist_file=None, worker_path=None) def test_get_run_command_linux(self): self._check_get_run_command(platform='linux2',