diff --git a/scripts/pylib/twister/twisterlib/reports.py b/scripts/pylib/twister/twisterlib/reports.py index a1dbe68b5a790..c0f06fc514288 100644 --- a/scripts/pylib/twister/twisterlib/reports.py +++ b/scripts/pylib/twister/twisterlib/reports.py @@ -584,19 +584,19 @@ def summary(self, results, ignore_unrecognized_sections, duration): if float(handler_time) > 0: run += 1 - if results.total and results.total != results.skipped_configs: - pass_rate = (float(results.passed) / float(results.total - results.skipped_configs)) + if results.total and results.total != results.filtered_configs: + pass_rate = (float(results.passed) / float(results.total - results.filtered_configs)) else: pass_rate = 0 logger.info( f"{TwisterStatus.get_color(TwisterStatus.FAIL) if failed else TwisterStatus.get_color(TwisterStatus.PASS)}{results.passed}" - f" of {results.total - results.skipped_configs}{Fore.RESET}" + f" of {results.total - results.filtered_configs}{Fore.RESET}" f" executed test configurations passed ({pass_rate:.2%})," f" {f'{TwisterStatus.get_color(TwisterStatus.NOTRUN)}{results.notrun}{Fore.RESET}' if results.notrun else f'{results.notrun}'} built (not run)," f" {f'{TwisterStatus.get_color(TwisterStatus.FAIL)}{results.failed}{Fore.RESET}' if results.failed else f'{results.failed}'} failed," f" {f'{TwisterStatus.get_color(TwisterStatus.ERROR)}{results.error}{Fore.RESET}' if results.error else f'{results.error}'} errored," - f" with {f'{Fore.YELLOW}{self.plan.warnings}{Fore.RESET}' if self.plan.warnings else 'no'} warnings" + f" with {f'{Fore.YELLOW}{self.plan.warnings + results.warnings}{Fore.RESET}' if (self.plan.warnings + results.warnings) else 'no'} warnings" f" in {duration:.2f} seconds." ) @@ -615,16 +615,15 @@ def summary(self, results, ignore_unrecognized_sections, duration): f'{", " + str(results.none_cases) + " without a status" if results.none_cases else ""}' f' on {len(self.filtered_platforms)} out of total {total_platforms} platforms ({platform_rate:02.2f}%).' ) - if results.skipped_cases or results.filtered_cases or results.notrun_cases: + if results.skipped_cases or results.notrun_cases: logger.info( - f'{results.skipped_cases + results.filtered_cases} selected test cases not executed:' \ + f'{results.skipped_cases + results.notrun_cases} selected test cases not executed:' \ f'{" " + str(results.skipped_cases) + " skipped" if results.skipped_cases else ""}' \ - f'{(", " if results.skipped_cases else " ") + str(results.filtered_cases) + " filtered" if results.filtered_cases else ""}' \ - f'{(", " if results.skipped_cases or results.filtered_cases else " ") + str(results.notrun_cases) + " not run (built only)" if results.notrun_cases else ""}' \ + f'{(", " if results.skipped_cases else " ") + str(results.notrun_cases) + " not run (built only)" if results.notrun_cases else ""}' \ f'.' ) - built_only = results.total - run - results.skipped_configs + built_only = results.total - run - results.filtered_configs logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \ {TwisterStatus.get_color(TwisterStatus.NOTRUN)}{built_only}{Fore.RESET} test configurations were only built.") diff --git a/scripts/pylib/twister/twisterlib/runner.py b/scripts/pylib/twister/twisterlib/runner.py index 201a869a82a9f..128bc598ed9c4 100644 --- a/scripts/pylib/twister/twisterlib/runner.py +++ b/scripts/pylib/twister/twisterlib/runner.py @@ -57,6 +57,7 @@ logger = logging.getLogger('twister') logger.setLevel(logging.DEBUG) import expr_parser +from anytree import Node, RenderTree class ExecutionCounter(object): @@ -67,11 +68,11 @@ def __init__(self, total=0): total = yaml test scenarios * applicable platforms done := instances that reached report_out stage of the pipeline - done = skipped_configs + passed + failed + error - completed = done - skipped_filter - skipped_configs = skipped_runtime + skipped_filter + done = filtered_configs + passed + failed + error + completed = done - filtered_static + filtered_configs = filtered_runtime + filtered_static - pass rate = passed / (total - skipped_configs) + pass rate = passed / (total - filtered_configs) case pass rate = passed_cases / (cases - filtered_cases - skipped_cases) ''' # instances that go through the pipeline @@ -91,19 +92,20 @@ def __init__(self, total=0): # static filter + runtime filter + build skipped # updated by update_counting_before_pipeline() and report_out() - self._skipped_configs = Value('i', 0) + self._filtered_configs = Value('i', 0) # cmake filter + build skipped # updated by report_out() - self._skipped_runtime = Value('i', 0) + self._filtered_runtime = Value('i', 0) # static filtered at yaml parsing time # updated by update_counting_before_pipeline() - self._skipped_filter = Value('i', 0) + self._filtered_static = Value('i', 0) # updated by report_out() in pipeline self._error = Value('i', 0) self._failed = Value('i', 0) + self._skipped = Value('i', 0) # initialized to number of test instances self._total = Value('i', total) @@ -129,6 +131,7 @@ def __init__(self, total=0): self._none_cases = Value('i', 0) self._started_cases = Value('i', 0) + self._warnings = Value('i', 0) self.lock = Lock() @@ -143,48 +146,57 @@ def _find_number_length(n): return length def summary(self): - executed_cases = self.cases - self.skipped_cases - self.filtered_cases - completed_configs = self.done - self.skipped_filter - - # Find alignment length for aesthetic printing - suites_n_length = self._find_number_length(self.total if self.total > self.done else self.done) - processed_suites_n_length = self._find_number_length(self.done) - completed_suites_n_length = self._find_number_length(completed_configs) - skipped_suites_n_length = self._find_number_length(self.skipped_configs) - total_cases_n_length = self._find_number_length(self.cases) - executed_cases_n_length = self._find_number_length(executed_cases) - - print("--------------------------------------------------") - print(f"{'Total test suites: ':<23}{self.total:>{suites_n_length}}") # actually test instances - print(f"{'Processed test suites: ':<23}{self.done:>{suites_n_length}}") - print(f"├─ {'Filtered test suites (static): ':<37}{self.skipped_filter:>{processed_suites_n_length}}") - print(f"└─ {'Completed test suites: ':<37}{completed_configs:>{processed_suites_n_length}}") - print(f" ├─ {'Filtered test suites (at runtime): ':<37}{self.skipped_runtime:>{completed_suites_n_length}}") - print(f" ├─ {'Passed test suites: ':<37}{self.passed:>{completed_suites_n_length}}") - print(f" ├─ {'Built only test suites: ':<37}{self.notrun:>{completed_suites_n_length}}") - print(f" ├─ {'Failed test suites: ':<37}{self.failed:>{completed_suites_n_length}}") - print(f" └─ {'Errors in test suites: ':<37}{self.error:>{completed_suites_n_length}}") - print(f"") - print(f"{'Filtered test suites: ':<21}{self.skipped_configs}") - print(f"├─ {'Filtered test suites (static): ':<37}{self.skipped_filter:>{skipped_suites_n_length}}") - print(f"└─ {'Filtered test suites (at runtime): ':<37}{self.skipped_runtime:>{skipped_suites_n_length}}") - print("---------------------- ----------------------") - print(f"{'Total test cases: ':<18}{self.cases}") - print(f"├─ {'Filtered test cases: ':<21}{self.filtered_cases:>{total_cases_n_length}}") - print(f"├─ {'Skipped test cases: ':<21}{self.skipped_cases:>{total_cases_n_length}}") - print(f"└─ {'Executed test cases: ':<21}{executed_cases:>{total_cases_n_length}}") - print(f" ├─ {'Passed test cases: ':<25}{self.passed_cases:>{executed_cases_n_length}}") - print(f" ├─ {'Built only test cases: ':<25}{self.notrun_cases:>{executed_cases_n_length}}") - print(f" ├─ {'Blocked test cases: ':<25}{self.blocked_cases:>{executed_cases_n_length}}") - print(f" ├─ {'Failed test cases: ':<25}{self.failed_cases:>{executed_cases_n_length}}") - print(f" {'├' if self.none_cases or self.started_cases else '└'}─ {'Errors in test cases: ':<25}{self.error_cases:>{executed_cases_n_length}}") + selected_cases = self.cases - self.filtered_cases + selected_configs = self.done - self.filtered_static - self.filtered_runtime + + + root = Node("Summary") + + Node(f"Total test suites: {self.total}", parent=root) + processed_suites = Node(f"Processed test suites: {self.done}", parent=root) + filtered_suites = Node(f"Filtered test suites: {self.filtered_configs}", parent=processed_suites) + Node(f"Filtered test suites (static): {self.filtered_static}", parent=filtered_suites) + Node(f"Filtered test suites (at runtime): {self.filtered_runtime}", parent=filtered_suites) + selected_suites = Node(f"Selected test suites: {selected_configs}", parent=processed_suites) + Node(f"Skipped test suites: {self.skipped}", parent=selected_suites) + Node(f"Passed test suites: {self.passed}", parent=selected_suites) + Node(f"Built only test suites: {self.notrun}", parent=selected_suites) + Node(f"Failed test suites: {self.failed}", parent=selected_suites) + Node(f"Errors in test suites: {self.error}", parent=selected_suites) + + total_cases = Node(f"Total test cases: {self.cases}", parent=root) + Node(f"Filtered test cases: {self.filtered_cases}", parent=total_cases) + selected_cases_node = Node(f"Selected test cases: {selected_cases}", parent=total_cases) + Node(f"Passed test cases: {self.passed_cases}", parent=selected_cases_node) + Node(f"Skipped test cases: {self.skipped_cases}", parent=selected_cases_node) + Node(f"Built only test cases: {self.notrun_cases}", parent=selected_cases_node) + Node(f"Blocked test cases: {self.blocked_cases}", parent=selected_cases_node) + Node(f"Failed test cases: {self.failed_cases}", parent=selected_cases_node) + error_cases_node = Node(f"Errors in test cases: {self.error_cases}", parent=selected_cases_node) + if self.none_cases or self.started_cases: - print(f" ├──── The following test case statuses should not appear in a proper execution ───") + Node("The following test case statuses should not appear in a proper execution", parent=error_cases_node) if self.none_cases: - print(f" {'├' if self.started_cases else '└'}─ {'Statusless test cases: ':<25}{self.none_cases:>{executed_cases_n_length}}") + Node(f"Statusless test cases: {self.none_cases}", parent=error_cases_node) if self.started_cases: - print(f" └─ {'Test cases only started: ':<25}{self.started_cases:>{executed_cases_n_length}}") - print("--------------------------------------------------") + Node(f"Test cases only started: {self.started_cases}", parent=error_cases_node) + + for pre, _, node in RenderTree(root): + print("%s%s" % (pre, node.name)) + + @property + def warnings(self): + with self._warnings.get_lock(): + return self._warnings.value + + @warnings.setter + def warnings(self, value): + with self._warnings.get_lock(): + self._warnings.value = value + + def warnings_increment(self, value=1): + with self._warnings.get_lock(): + self._warnings.value += value @property def cases(self): @@ -326,6 +338,20 @@ def started_cases_increment(self, value=1): with self._started_cases.get_lock(): self._started_cases.value += value + @property + def skipped(self): + with self._skipped.get_lock(): + return self._skipped.value + + @skipped.setter + def skipped(self, value): + with self._skipped.get_lock(): + self._skipped.value = value + + def skipped_increment(self, value=1): + with self._skipped.get_lock(): + self._skipped.value += value + @property def error(self): with self._error.get_lock(): @@ -397,46 +423,46 @@ def notrun_increment(self, value=1): self._notrun.value += value @property - def skipped_configs(self): - with self._skipped_configs.get_lock(): - return self._skipped_configs.value + def filtered_configs(self): + with self._filtered_configs.get_lock(): + return self._filtered_configs.value - @skipped_configs.setter - def skipped_configs(self, value): - with self._skipped_configs.get_lock(): - self._skipped_configs.value = value + @filtered_configs.setter + def filtered_configs(self, value): + with self._filtered_configs.get_lock(): + self._filtered_configs.value = value - def skipped_configs_increment(self, value=1): - with self._skipped_configs.get_lock(): - self._skipped_configs.value += value + def filtered_configs_increment(self, value=1): + with self._filtered_configs.get_lock(): + self._filtered_configs.value += value @property - def skipped_filter(self): - with self._skipped_filter.get_lock(): - return self._skipped_filter.value + def filtered_static(self): + with self._filtered_static.get_lock(): + return self._filtered_static.value - @skipped_filter.setter - def skipped_filter(self, value): - with self._skipped_filter.get_lock(): - self._skipped_filter.value = value + @filtered_static.setter + def filtered_static(self, value): + with self._filtered_static.get_lock(): + self._filtered_static.value = value - def skipped_filter_increment(self, value=1): - with self._skipped_filter.get_lock(): - self._skipped_filter.value += value + def filtered_static_increment(self, value=1): + with self._filtered_static.get_lock(): + self._filtered_static.value += value @property - def skipped_runtime(self): - with self._skipped_runtime.get_lock(): - return self._skipped_runtime.value + def filtered_runtime(self): + with self._filtered_runtime.get_lock(): + return self._filtered_runtime.value - @skipped_runtime.setter - def skipped_runtime(self, value): - with self._skipped_runtime.get_lock(): - self._skipped_runtime.value = value + @filtered_runtime.setter + def filtered_runtime(self, value): + with self._filtered_runtime.get_lock(): + self._filtered_runtime.value = value - def skipped_runtime_increment(self, value=1): - with self._skipped_runtime.get_lock(): - self._skipped_runtime.value += value + def filtered_runtime_increment(self, value=1): + with self._filtered_runtime.get_lock(): + self._filtered_runtime.value += value @property def failed(self): @@ -877,8 +903,8 @@ def process(self, pipeline, done, message, lock, results): logger.debug("filtering %s" % self.instance.name) self.instance.status = TwisterStatus.FILTER self.instance.reason = "runtime filter" - results.skipped_runtime_increment() - self.instance.add_missing_case_status(TwisterStatus.SKIP) + results.filtered_runtime_increment() + self.instance.add_missing_case_status(TwisterStatus.FILTER) next_op = 'report' else: next_op = 'cmake' @@ -910,8 +936,8 @@ def process(self, pipeline, done, message, lock, results): logger.debug("filtering %s" % self.instance.name) self.instance.status = TwisterStatus.FILTER self.instance.reason = "runtime filter" - results.skipped_runtime_increment() - self.instance.add_missing_case_status(TwisterStatus.SKIP) + results.filtered_runtime_increment() + self.instance.add_missing_case_status(TwisterStatus.FILTER) next_op = 'report' else: next_op = 'build' @@ -937,7 +963,7 @@ def process(self, pipeline, done, message, lock, results): # Count skipped cases during build, for example # due to ram/rom overflow. if self.instance.status == TwisterStatus.SKIP: - results.skipped_runtime_increment() + results.skipped_increment() self.instance.add_missing_case_status(TwisterStatus.SKIP, self.instance.reason) if ret.get('returncode', 1) > 0: @@ -1324,19 +1350,22 @@ def _add_instance_testcases_to_status_counts(instance, results, decrement=False) # but having those statuses in this part of processing is an error. case TwisterStatus.NONE: results.none_cases_increment(increment_value) - logger.error(f'A None status detected in instance {instance.name},' + logger.warning(f'A None status detected in instance {instance.name},' f' test case {tc.name}.') + results.warnings_increment(1) case TwisterStatus.STARTED: results.started_cases_increment(increment_value) - logger.error(f'A started status detected in instance {instance.name},' + logger.warning(f'A started status detected in instance {instance.name},' f' test case {tc.name}.') + results.warnings_increment(1) case _: - logger.error(f'An unknown status "{tc.status}" detected in instance {instance.name},' + logger.warning(f'An unknown status "{tc.status}" detected in instance {instance.name},' f' test case {tc.name}.') + results.warnings_increment(1) def report_out(self, results): - total_to_do = results.total - results.skipped_filter + total_to_do = results.total - results.filtered_static total_tests_width = len(str(total_to_do)) results.done_increment() instance = self.instance @@ -1364,20 +1393,13 @@ def report_out(self, results): if not self.options.verbose: self.log_info_file(self.options.inline_logs) elif instance.status == TwisterStatus.SKIP: - results.skipped_configs_increment() + results.skipped_increment() elif instance.status == TwisterStatus.FILTER: - results.skipped_configs_increment() + results.filtered_configs_increment() elif instance.status == TwisterStatus.PASS: results.passed_increment() - for case in instance.testcases: - # test cases skipped at the test case level - if case.status == TwisterStatus.SKIP: - results.skipped_cases_increment() elif instance.status == TwisterStatus.NOTRUN: results.notrun_increment() - for case in instance.testcases: - if case.status == TwisterStatus.SKIP: - results.skipped_cases_increment() else: logger.debug(f"Unknown status = {instance.status}") status = Fore.YELLOW + "UNKNOWN" + Fore.RESET @@ -1403,7 +1425,7 @@ def report_out(self, results): and self.instance.handler.seed is not None ): more_info += "/seed: " + str(self.options.seed) logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format( - results.done - results.skipped_filter, total_tests_width, total_to_do , instance.platform.name, + results.done - results.filtered_static, total_tests_width, total_to_do , instance.platform.name, instance.testsuite.name, status, more_info)) if self.options.verbose > 1: @@ -1418,19 +1440,19 @@ def report_out(self, results): else: completed_perc = 0 if total_to_do > 0: - completed_perc = int((float(results.done - results.skipped_filter) / total_to_do) * 100) + completed_perc = int((float(results.done - results.filtered_static) / total_to_do) * 100) sys.stdout.write("INFO - Total complete: %s%4d/%4d%s %2d%% built (not run): %s%4d%s, filtered: %s%4d%s, failed: %s%4d%s, error: %s%4d%s\r" % ( TwisterStatus.get_color(TwisterStatus.PASS), - results.done - results.skipped_filter, + results.done - results.filtered_static, total_to_do, Fore.RESET, completed_perc, TwisterStatus.get_color(TwisterStatus.NOTRUN), results.notrun, Fore.RESET, - TwisterStatus.get_color(TwisterStatus.SKIP) if results.skipped_configs > 0 else Fore.RESET, - results.skipped_configs, + TwisterStatus.get_color(TwisterStatus.SKIP) if results.filtered_configs > 0 else Fore.RESET, + results.filtered_configs, Fore.RESET, TwisterStatus.get_color(TwisterStatus.FAIL) if results.failed > 0 else Fore.RESET, results.failed, @@ -1653,7 +1675,7 @@ def run(self): self.results.error = 0 self.results.done -= self.results.error else: - self.results.done = self.results.skipped_filter + self.results.done = self.results.filtered_static self.execute(pipeline, done_queue) @@ -1688,20 +1710,20 @@ def update_counting_before_pipeline(self): ''' for instance in self.instances.values(): if instance.status == TwisterStatus.FILTER and not instance.reason == 'runtime filter': - self.results.skipped_filter_increment() - self.results.skipped_configs_increment() + self.results.filtered_static_increment() + self.results.filtered_configs_increment() self.results.filtered_cases_increment(len(instance.testsuite.testcases)) self.results.cases_increment(len(instance.testsuite.testcases)) elif instance.status == TwisterStatus.ERROR: self.results.error_increment() def show_brief(self): - logger.info("%d test scenarios (%d test instances) selected, " + logger.info("%d test scenarios (%d configurations) selected, " "%d configurations filtered (%d by static filter, %d at runtime)." % (len(self.suites), len(self.instances), - self.results.skipped_configs, - self.results.skipped_filter, - self.results.skipped_configs - self.results.skipped_filter)) + self.results.filtered_configs, + self.results.filtered_static, + self.results.filtered_configs - self.results.filtered_static)) def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False, retry_build_errors=False): for instance in self.instances.values(): diff --git a/scripts/pylib/twister/twisterlib/testplan.py b/scripts/pylib/twister/twisterlib/testplan.py index 4a9f4cb86e5f8..da8eaa0a74908 100755 --- a/scripts/pylib/twister/twisterlib/testplan.py +++ b/scripts/pylib/twister/twisterlib/testplan.py @@ -439,7 +439,6 @@ def init_and_add_platforms(data, board, target, qualifier, aliases): raise Exception(f"Duplicate platform identifier {platform.name} found") if not platform.twister: return - logger.debug(f"Adding platform {platform.name} with aliases {platform.aliases}") self.platforms.append(platform) for board in known_boards.values(): diff --git a/scripts/tests/twister/test_runner.py b/scripts/tests/twister/test_runner.py index b6cd151d78ad0..3ab7de2fb935f 100644 --- a/scripts/tests/twister/test_runner.py +++ b/scripts/tests/twister/test_runner.py @@ -194,9 +194,9 @@ def test_executioncounter(capfd): ec.iteration = 2 ec.done = 9 ec.passed = 6 - ec.skipped_configs = 3 - ec.skipped_runtime = 1 - ec.skipped_filter = 2 + ec.filtered_configs = 3 + ec.filtered_runtime = 1 + ec.filtered_static = 2 ec.failed = 1 ec.summary() @@ -206,31 +206,26 @@ def test_executioncounter(capfd): sys.stderr.write(err) assert ( - '--------------------------------------------------\n' - 'Total test suites: 12\n' - 'Processed test suites: 9\n' - '├─ Filtered test suites (static): 2\n' - '└─ Completed test suites: 7\n' - ' ├─ Filtered test suites (at runtime): 1\n' - ' ├─ Passed test suites: 6\n' - ' ├─ Built only test suites: 0\n' - ' ├─ Failed test suites: 1\n' - ' └─ Errors in test suites: 2\n' - '\n' - 'Filtered test suites: 3\n' - '├─ Filtered test suites (static): 2\n' - '└─ Filtered test suites (at runtime): 1\n' - '---------------------- ----------------------\n' - 'Total test cases: 25\n' - '├─ Filtered test cases: 0\n' - '├─ Skipped test cases: 6\n' - '└─ Executed test cases: 19\n' - ' ├─ Passed test cases: 0\n' - ' ├─ Built only test cases: 0\n' - ' ├─ Blocked test cases: 0\n' - ' ├─ Failed test cases: 0\n' - ' └─ Errors in test cases: 0\n' - '--------------------------------------------------\n' +"├── Total test suites: 12\n" +"├── Processed test suites: 9\n" +"│ ├── Filtered test suites: 3\n" +"│ │ ├── Filtered test suites (static): 2\n" +"│ │ └── Filtered test suites (at runtime): 1\n" +"│ └── Selected test suites: 6\n" +"│ ├── Skipped test suites: 0\n" +"│ ├── Passed test suites: 6\n" +"│ ├── Built only test suites: 0\n" +"│ ├── Failed test suites: 1\n" +"│ └── Errors in test suites: 2\n" +"└── Total test cases: 25\n" +" ├── Filtered test cases: 0\n" +" └── Selected test cases: 25\n" +" ├── Passed test cases: 0\n" +" ├── Skipped test cases: 6\n" +" ├── Built only test cases: 0\n" +" ├── Blocked test cases: 0\n" +" ├── Failed test cases: 0\n" +" └── Errors in test cases: 0\n" ) in out assert ec.cases == 25 @@ -239,9 +234,9 @@ def test_executioncounter(capfd): assert ec.iteration == 2 assert ec.done == 9 assert ec.passed == 6 - assert ec.skipped_configs == 3 - assert ec.skipped_runtime == 1 - assert ec.skipped_filter == 2 + assert ec.filtered_configs == 3 + assert ec.filtered_runtime == 1 + assert ec.filtered_static == 2 assert ec.failed == 1 @@ -915,7 +910,7 @@ def mock_getsize(filename, *args, **kwargs): TwisterStatus.FILTER, 'runtime filter', 1, - (TwisterStatus.SKIP,) + (TwisterStatus.FILTER,) ), ( {'op': 'filter'}, @@ -1025,7 +1020,7 @@ def mock_getsize(filename, *args, **kwargs): TwisterStatus.FILTER, 'runtime filter', 1, - (TwisterStatus.SKIP,) + (TwisterStatus.FILTER,) # this is a tuple ), ( {'op': 'cmake'}, @@ -1091,7 +1086,7 @@ def mock_getsize(filename, *args, **kwargs): {'op': 'gather_metrics', 'test': mock.ANY}, mock.ANY, mock.ANY, - 1, + 0, (TwisterStatus.SKIP, mock.ANY) ), ( @@ -1547,7 +1542,7 @@ def mock_determine_testcases(res): __exit__=mock.Mock(return_value=None) ) results_mock = mock.Mock() - results_mock.skipped_runtime = 0 + results_mock.filtered_runtime = 0 pb.process(pipeline_mock, done_mock, message, lock_mock, results_mock) @@ -1558,7 +1553,7 @@ def mock_determine_testcases(res): assert pb.instance.status == expected_status assert pb.instance.reason == expected_reason - assert results_mock.skipped_runtime_increment.call_args_list == [mock.call()] * expected_skipped + assert results_mock.filtered_runtime_increment.call_args_list == [mock.call()] * expected_skipped if expected_missing: pb.instance.add_missing_case_status.assert_called_with(*expected_missing) @@ -2043,9 +2038,9 @@ def test_projectbuilder_report_out( passed = 17, notrun = 0, failed = 2, - skipped_configs = 3, - skipped_runtime = 0, - skipped_filter = 0, + filtered_configs = 3, + filtered_runtime = 0, + filtered_static = 0, error = 1, cases = 0, filtered_cases = 0, @@ -2061,15 +2056,15 @@ def test_projectbuilder_report_out( def results_done_increment(value=1, decrement=False): results_mock.done += value * (-1 if decrement else 1) results_mock.done_increment = results_done_increment - def skipped_configs_increment(value=1, decrement=False): - results_mock.skipped_configs += value * (-1 if decrement else 1) - results_mock.skipped_configs_increment = skipped_configs_increment - def skipped_filter_increment(value=1, decrement=False): - results_mock.skipped_filter += value * (-1 if decrement else 1) - results_mock.skipped_filter_increment = skipped_filter_increment - def skipped_runtime_increment(value=1, decrement=False): - results_mock.skipped_runtime += value * (-1 if decrement else 1) - results_mock.skipped_runtime_increment = skipped_runtime_increment + def filtered_configs_increment(value=1, decrement=False): + results_mock.filtered_configs += value * (-1 if decrement else 1) + results_mock.filtered_configs_increment = filtered_configs_increment + def filtered_static_increment(value=1, decrement=False): + results_mock.filtered_static += value * (-1 if decrement else 1) + results_mock.filtered_static_increment = filtered_static_increment + def filtered_runtime_increment(value=1, decrement=False): + results_mock.filtered_runtime += value * (-1 if decrement else 1) + results_mock.filtered_runtime_increment = filtered_runtime_increment def failed_increment(value=1, decrement=False): results_mock.failed += value * (-1 if decrement else 1) results_mock.failed_increment = failed_increment @@ -2566,9 +2561,9 @@ def test_twisterrunner_update_counting_before_pipeline(): done = 0, passed = 0, failed = 0, - skipped_configs = 0, - skipped_runtime = 0, - skipped_filter = 0, + filtered_configs = 0, + filtered_runtime = 0, + filtered_static = 0, error = 0, cases = 0, filtered_cases = 0, @@ -2580,12 +2575,12 @@ def test_twisterrunner_update_counting_before_pipeline(): none_cases = 0, started_cases = 0 ) - def skipped_configs_increment(value=1, decrement=False): - tr.results.skipped_configs += value * (-1 if decrement else 1) - tr.results.skipped_configs_increment = skipped_configs_increment - def skipped_filter_increment(value=1, decrement=False): - tr.results.skipped_filter += value * (-1 if decrement else 1) - tr.results.skipped_filter_increment = skipped_filter_increment + def filtered_configs_increment(value=1, decrement=False): + tr.results.filtered_configs += value * (-1 if decrement else 1) + tr.results.filtered_configs_increment = filtered_configs_increment + def filtered_static_increment(value=1, decrement=False): + tr.results.filtered_static += value * (-1 if decrement else 1) + tr.results.filtered_static_increment = filtered_static_increment def error_increment(value=1, decrement=False): tr.results.error += value * (-1 if decrement else 1) tr.results.error_increment = error_increment @@ -2598,8 +2593,8 @@ def filtered_cases_increment(value=1, decrement=False): tr.update_counting_before_pipeline() - assert tr.results.skipped_filter == 1 - assert tr.results.skipped_configs == 1 + assert tr.results.filtered_static == 1 + assert tr.results.filtered_configs == 1 assert tr.results.filtered_cases == 4 assert tr.results.cases == 4 assert tr.results.error == 1 @@ -2618,8 +2613,8 @@ def test_twisterrunner_show_brief(caplog): tr = TwisterRunner(instances, suites, env=env_mock) tr.results = mock.Mock( - skipped_filter = 3, - skipped_configs = 4, + filtered_static = 3, + filtered_configs = 4, skipped_cases = 0, cases = 0, error = 0 @@ -2627,7 +2622,7 @@ def test_twisterrunner_show_brief(caplog): tr.show_brief() - log = '2 test scenarios (5 test instances) selected,' \ + log = '2 test scenarios (5 configurations) selected,' \ ' 4 configurations filtered (3 by static filter, 1 at runtime).' assert log in caplog.text diff --git a/scripts/tests/twister_blackbox/test_platform.py b/scripts/tests/twister_blackbox/test_platform.py index 75a6cda909309..43ff3346cf8af 100644 --- a/scripts/tests/twister_blackbox/test_platform.py +++ b/scripts/tests/twister_blackbox/test_platform.py @@ -37,7 +37,7 @@ class TestPlatform: 'failed_configurations': 0, 'errored_configurations': 0, 'executed_test_cases': 8, - 'skipped_test_cases': 5, + 'skipped_test_cases': 2, 'platform_count': 3, 'executed_on_platform': 4, 'only_built': 2 @@ -58,7 +58,7 @@ class TestPlatform: 'failed_configurations': 0, 'errored_configurations': 0, 'executed_test_cases': 0, - 'skipped_test_cases': 3, + 'skipped_test_cases': 0, 'platform_count': 3, 'executed_on_platform': 0, 'only_built': 0 @@ -264,7 +264,7 @@ def test_emulation_only(self, capfd, out_path, test_path, test_platforms, expect self.loader.exec_module(self.twister_module) select_regex = r'^INFO - (?P[0-9]+) test scenarios' \ - r' \((?P[0-9]+) test instances\) selected,' \ + r' \((?P[0-9]+) configurations\) selected,' \ r' (?P[0-9]+) configurations filtered' \ r' \((?P[0-9]+) by static filter,' \ r' (?P[0-9]+) at runtime\)\.$' diff --git a/scripts/tests/twister_blackbox/test_runner.py b/scripts/tests/twister_blackbox/test_runner.py index 0ace7fb05157b..1ab50522c5ebf 100644 --- a/scripts/tests/twister_blackbox/test_runner.py +++ b/scripts/tests/twister_blackbox/test_runner.py @@ -260,7 +260,7 @@ def test_runtest_only(self, capfd, out_path, test_path, test_platforms, expected select_regex = r'^INFO - (?P[0-9]+) test scenarios' \ - r' \((?P[0-9]+) test instances\) selected,' \ + r' \((?P[0-9]+) configurations\) selected,' \ r' (?P[0-9]+) configurations filtered' \ r' \((?P[0-9]+) by static filter,' \ r' (?P[0-9]+) at runtime\)\.$' @@ -627,7 +627,7 @@ def test_only_failed(self, capfd, out_path, test_path, test_platforms, expected) self.loader.exec_module(self.twister_module) select_regex = r'^INFO - (?P[0-9]+) test scenarios' \ - r' \((?P[0-9]+) test instances\) selected,' \ + r' \((?P[0-9]+) configurations\) selected,' \ r' (?P[0-9]+) configurations filtered' \ r' \((?P[0-9]+) by static filter,' \ r' (?P[0-9]+) at runtime\)\.$'