From ce957b39af71faaf7380563f2b55a7d940e5218b Mon Sep 17 00:00:00 2001 From: Zhi-Qiang You Date: Mon, 17 Feb 2020 11:05:21 -0500 Subject: [PATCH 1/6] Add #1161 changes --- reframe/frontend/cli.py | 8 +++++- reframe/frontend/statistics.py | 47 ++++++++++++++++++++++++++++++++-- 2 files changed, 52 insertions(+), 3 deletions(-) diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index 460362514d..9527eac527 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -231,6 +231,9 @@ def main(): misc_options.add_argument( '--non-default-craype', action='store_true', default=False, help='Test a non-default Cray PE') + misc_options.add_argument( + '--rerun-failed', action='store_true', + help='Print run options for all failed tests in summary table') misc_options.add_argument( '--show-config', action='store_true', help='Print configuration of the current system and exit') @@ -611,7 +614,10 @@ def main(): # Print a failure report if we had failures in the last run if runner.stats.failures(): - printer.info(runner.stats.failure_report()) + if options.rerun_failed: + printer.info(runner.stats.failure_report(rerun_failed=1)) + else: + printer.info(runner.stats.failure_report()) success = False if options.performance_report: diff --git a/reframe/frontend/statistics.py b/reframe/frontend/statistics.py index 01e3f2c459..ac16baa54d 100644 --- a/reframe/frontend/statistics.py +++ b/reframe/frontend/statistics.py @@ -65,11 +65,12 @@ def retry_report(self): return '\n'.join(report) - def failure_report(self): + def failure_report(self, rerun_failed=-1): line_width = 78 report = [line_width * '='] report.append('SUMMARY OF FAILURES') current_run = rt.runtime().current_run + failures = {} for tf in (t for t in self.tasks(current_run) if t.failed): check = tf.check partition = check.current_partition @@ -107,8 +108,50 @@ def failure_report(self): # This shouldn't happen... report.append('Unknown error.') + # Collect failures for summary table + if not tf.failed_stage in failures: + failures[tf.failed_stage] = [] + failures[tf.failed_stage].append([check.name, environ_name, partname, reason]) + report.append(line_width * '-') - return '\n'.join(report) + + # Generate summary table + row_format = "{:<11} {:<5} {:<60}" + hline = row_format.format(11*'-', 5*'-', 60*'-') + header = row_format.format('Phase', '#', 'Description') + stage_descr = { + 'setup': "Failed to set up test's environment and path", + 'compile': "Failed to compile the source code in the current environemnt", + 'run': "Failed to launch jobs", + 'sanity': "Failed in sanity checking", + 'performance': "Failed in performance checking", + 'poll': "Failed in polling", + 'cleanup': "Failed to clean up the resources of the test." + } + total_num_tests = len(self.tasks(current_run)) + total_num_failures = 0 + for p in failures.keys(): + total_num_failures += len(failures[p]) + summary = [''] + summary.append('Total number of tests: %d' % int(total_num_tests)) + summary.append('Total number of failures: %d' % int(total_num_failures)) + summary.append('') + summary.append(header) + summary.append(hline) + for p in failures: + summary.append(row_format.format(p, len(failures[p]), stage_descr[p])) + if rerun_failed == 1: + for f in failures[p]: + phase = p + if "could not load module" in f[3]: + phase = "load module" + elif "could not show module" in f[3]: + phase = "no module" + summary.append(row_format.format( + '', '', '%s: -n %s -p %s --system %s' % tuple([phase] + f[:3]))) + summary.append('') + + return '\n'.join(report + summary) def performance_report(self): line_width = 78 From 4409186f4cf4a896000e7170b0f84232b4dbb0a3 Mon Sep 17 00:00:00 2001 From: Zhi-Qiang You Date: Mon, 17 Feb 2020 11:37:45 -0500 Subject: [PATCH 2/6] Fix coding style --- reframe/frontend/cli.py | 3 ++- reframe/frontend/statistics.py | 23 ++++++++++++++--------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index 9527eac527..8dc20584c5 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -615,7 +615,8 @@ def main(): # Print a failure report if we had failures in the last run if runner.stats.failures(): if options.rerun_failed: - printer.info(runner.stats.failure_report(rerun_failed=1)) + rerun_failed = 1 + printer.info(runner.stats.failure_report(rerun_failed)) else: printer.info(runner.stats.failure_report()) success = False diff --git a/reframe/frontend/statistics.py b/reframe/frontend/statistics.py index ac16baa54d..6f103e7997 100644 --- a/reframe/frontend/statistics.py +++ b/reframe/frontend/statistics.py @@ -70,7 +70,7 @@ def failure_report(self, rerun_failed=-1): report = [line_width * '='] report.append('SUMMARY OF FAILURES') current_run = rt.runtime().current_run - failures = {} + failures = {} for tf in (t for t in self.tasks(current_run) if t.failed): check = tf.check partition = check.current_partition @@ -109,9 +109,10 @@ def failure_report(self, rerun_failed=-1): report.append('Unknown error.') # Collect failures for summary table - if not tf.failed_stage in failures: + if tf.failed_stage not in failures: failures[tf.failed_stage] = [] - failures[tf.failed_stage].append([check.name, environ_name, partname, reason]) + failures[tf.failed_stage].append([check.name, environ_name, + partname, reason]) report.append(line_width * '-') @@ -121,12 +122,13 @@ def failure_report(self, rerun_failed=-1): header = row_format.format('Phase', '#', 'Description') stage_descr = { 'setup': "Failed to set up test's environment and path", - 'compile': "Failed to compile the source code in the current environemnt", + 'compile': "Failed to compile the source code in the current" + "environemnt", 'run': "Failed to launch jobs", 'sanity': "Failed in sanity checking", 'performance': "Failed in performance checking", 'poll': "Failed in polling", - 'cleanup': "Failed to clean up the resources of the test." + 'cleanup': "Failed to clean up the resources of the test." } total_num_tests = len(self.tasks(current_run)) total_num_failures = 0 @@ -134,12 +136,14 @@ def failure_report(self, rerun_failed=-1): total_num_failures += len(failures[p]) summary = [''] summary.append('Total number of tests: %d' % int(total_num_tests)) - summary.append('Total number of failures: %d' % int(total_num_failures)) + summary.append('Total number of failures: %d' % + int(total_num_failures)) summary.append('') summary.append(header) summary.append(hline) for p in failures: - summary.append(row_format.format(p, len(failures[p]), stage_descr[p])) + summary.append(row_format.format(p, len(failures[p]), + stage_descr[p])) if rerun_failed == 1: for f in failures[p]: phase = p @@ -147,8 +151,9 @@ def failure_report(self, rerun_failed=-1): phase = "load module" elif "could not show module" in f[3]: phase = "no module" - summary.append(row_format.format( - '', '', '%s: -n %s -p %s --system %s' % tuple([phase] + f[:3]))) + summary.append( + row_format.format('', '', '%s: -n %s -p %s --system %s' + % tuple([phase] + f[:3]))) summary.append('') return '\n'.join(report + summary) From f88fc17aeb802635e71babe7f408553f846ba1bd Mon Sep 17 00:00:00 2001 From: Zhi-Qiang You Date: Wed, 4 Mar 2020 10:09:54 -0500 Subject: [PATCH 3/6] Update for #1161 * Add failure_stats function to print failure statistics * Add "Rerun as:" in failure report for run options --- reframe/frontend/cli.py | 14 +++--- reframe/frontend/statistics.py | 79 +++++++++++++++------------------- 2 files changed, 41 insertions(+), 52 deletions(-) diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index 266b4a15ab..55f658ea47 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -228,6 +228,9 @@ def main(): misc_options.add_argument( '--nocolor', action='store_false', dest='colorize', default=True, help='Disable coloring of output') + misc_options.add_argument( + '--failure-stats', action='store_true', + help='Print failure statistics') misc_options.add_argument('--performance-report', action='store_true', help='Print the performance report') @@ -236,9 +239,6 @@ def main(): misc_options.add_argument( '--non-default-craype', action='store_true', default=False, help='Test a non-default Cray PE') - misc_options.add_argument( - '--rerun-failed', action='store_true', - help='Print run options for all failed tests in summary table') misc_options.add_argument( '--show-config', action='store_true', help='Print configuration of the current system and exit') @@ -622,12 +622,10 @@ def main(): # Print a failure report if we had failures in the last run if runner.stats.failures(): - if options.rerun_failed: - rerun_failed = 1 - printer.info(runner.stats.failure_report(rerun_failed)) - else: - printer.info(runner.stats.failure_report()) + printer.info(runner.stats.failure_report()) success = False + if options.failure_stats: + printer.info(runner.stats.failure_stats()) if options.performance_report: printer.info(runner.stats.performance_report()) diff --git a/reframe/frontend/statistics.py b/reframe/frontend/statistics.py index 62ec5989a1..f9ab02bd19 100644 --- a/reframe/frontend/statistics.py +++ b/reframe/frontend/statistics.py @@ -70,12 +70,11 @@ def retry_report(self): return '\n'.join(report) - def failure_report(self, rerun_failed=-1): + def failure_report(self): line_width = 78 report = [line_width * '='] report.append('SUMMARY OF FAILURES') current_run = rt.runtime().current_run - failures = {} for tf in (t for t in self.tasks(current_run) if t.failed): check = tf.check partition = check.current_partition @@ -98,6 +97,8 @@ def failure_report(self, rerun_failed=-1): report.append(' * Job type: %s (id=%s)' % (job_type, jobid)) report.append(' * Maintainers: %s' % check.maintainers) report.append(' * Failing phase: %s' % tf.failed_stage) + report.append(' * Rerun as: -n %s -p %s --system %s' % + (check.name, environ_name, partname)) reason = ' * Reason: ' if tf.exc_info is not None: from reframe.core.exceptions import format_exception @@ -113,55 +114,45 @@ def failure_report(self, rerun_failed=-1): # This shouldn't happen... report.append('Unknown error.') - # Collect failures for summary table - if tf.failed_stage not in failures: - failures[tf.failed_stage] = [] - failures[tf.failed_stage].append([check.name, environ_name, - partname, reason]) - report.append(line_width * '-') + return '\n'.join(report) - # Generate summary table + def failure_stats(self): + failures = {} + current_run = rt.runtime().current_run + for tf in (t for t in self.tasks(current_run) if t.failed): + check = tf.check + if tf.exc_info is not None: + from reframe.core.exceptions import format_exception + if tf.failed_stage not in failures: + failures[tf.failed_stage] = [] + failures[tf.failed_stage].append(check.name) + line_width = 78 + stats_start = line_width * '=' + stats_title = 'FAILURE STATISTICS' + stats_end = line_width * '_' + stats_body = [] row_format = "{:<11} {:<5} {:<60}" - hline = row_format.format(11*'-', 5*'-', 60*'-') - header = row_format.format('Phase', '#', 'Description') - stage_descr = { - 'setup': "Failed to set up test's environment and path", - 'compile': "Failed to compile the source code in the current" - "environemnt", - 'run': "Failed to launch jobs", - 'sanity': "Failed in sanity checking", - 'performance': "Failed in performance checking", - 'poll': "Failed in polling", - 'cleanup': "Failed to clean up the resources of the test." - } + stats_hline = row_format.format(11*'-', 5*'-', 60*'-') + stats_header = row_format.format('Phase', '#', 'Failing tests') total_num_tests = len(self.tasks(current_run)) total_num_failures = 0 for p in failures.keys(): total_num_failures += len(failures[p]) - summary = [''] - summary.append('Total number of tests: %d' % int(total_num_tests)) - summary.append('Total number of failures: %d' % - int(total_num_failures)) - summary.append('') - summary.append(header) - summary.append(hline) - for p in failures: - summary.append(row_format.format(p, len(failures[p]), - stage_descr[p])) - if rerun_failed == 1: - for f in failures[p]: - phase = p - if "could not load module" in f[3]: - phase = "load module" - elif "could not show module" in f[3]: - phase = "no module" - summary.append( - row_format.format('', '', '%s: -n %s -p %s --system %s' - % tuple([phase] + f[:3]))) - summary.append('') - - return '\n'.join(report + summary) + stats_body = [''] + stats_body.append('Total number of tests: %d' % int(total_num_tests)) + stats_body.append('Total number of failures: %d' % + int(total_num_failures)) + stats_body.append('') + stats_body.append(stats_header) + stats_body.append(stats_hline) + for p in failures.keys(): + stats_body.append(row_format.format(p, len(failures[p]), + '|'.join(failures[p]))) + if stats_body: + return '\n'.join([stats_start, stats_title, *stats_body, + stats_end]) + return '' def performance_report(self): line_width = 78 From 7d6bee4835152f23a48be2cd888a63aed8f1bbea Mon Sep 17 00:00:00 2001 From: Zhi-Qiang You Date: Thu, 12 Mar 2020 14:24:55 -0400 Subject: [PATCH 4/6] Update for #1161 * Print test cases in the table * Add unittest for option `--failure-stats --- reframe/frontend/statistics.py | 41 ++++++++++++++++++++-------------- unittests/test_cli.py | 9 ++++++++ 2 files changed, 33 insertions(+), 17 deletions(-) diff --git a/reframe/frontend/statistics.py b/reframe/frontend/statistics.py index f9ab02bd19..c4a853d1ad 100644 --- a/reframe/frontend/statistics.py +++ b/reframe/frontend/statistics.py @@ -97,7 +97,7 @@ def failure_report(self): report.append(' * Job type: %s (id=%s)' % (job_type, jobid)) report.append(' * Maintainers: %s' % check.maintainers) report.append(' * Failing phase: %s' % tf.failed_stage) - report.append(' * Rerun as: -n %s -p %s --system %s' % + report.append(" * Rerun with '-n %s -p %s --system %s'" % (check.name, environ_name, partname)) reason = ' * Reason: ' if tf.exc_info is not None: @@ -122,33 +122,40 @@ def failure_stats(self): current_run = rt.runtime().current_run for tf in (t for t in self.tasks(current_run) if t.failed): check = tf.check - if tf.exc_info is not None: - from reframe.core.exceptions import format_exception + partition = check.current_partition + partname = partition.fullname if partition else 'None' + environ_name = (check.current_environ.name + if check.current_environ else 'None') + f = "[%s, %s, %s]" % (check.name, environ_name, partname) if tf.failed_stage not in failures: failures[tf.failed_stage] = [] - failures[tf.failed_stage].append(check.name) + + failures[tf.failed_stage].append(f) + line_width = 78 stats_start = line_width * '=' stats_title = 'FAILURE STATISTICS' - stats_end = line_width * '_' + stats_end = line_width * '-' stats_body = [] - row_format = "{:<11} {:<5} {:<60}" + row_format = "{:<11} {:<5} {}" stats_hline = row_format.format(11*'-', 5*'-', 60*'-') - stats_header = row_format.format('Phase', '#', 'Failing tests') - total_num_tests = len(self.tasks(current_run)) - total_num_failures = 0 - for p in failures.keys(): - total_num_failures += len(failures[p]) + stats_header = row_format.format('Phase', '#', 'Failing test cases') + num_tests = len(self.tasks(current_run)) + num_failures = 0 + for l in failures.values(): + num_failures += len(l) + stats_body = [''] - stats_body.append('Total number of tests: %d' % int(total_num_tests)) - stats_body.append('Total number of failures: %d' % - int(total_num_failures)) + stats_body.append('Total number of test cases: %s' % num_tests) + stats_body.append('Total number of failures: %s' % num_failures) stats_body.append('') stats_body.append(stats_header) stats_body.append(stats_hline) - for p in failures.keys(): - stats_body.append(row_format.format(p, len(failures[p]), - '|'.join(failures[p]))) + for p, l in failures.items(): + stats_body.append(row_format.format(p, len(l), l[0])) + for f in l[1:]: + stats_body.append(row_format.format('', '', str(f))) + if stats_body: return '\n'.join([stats_start, stats_title, *stats_body, stats_end]) diff --git a/unittests/test_cli.py b/unittests/test_cli.py index 75091823c4..c309491785 100644 --- a/unittests/test_cli.py +++ b/unittests/test_cli.py @@ -227,6 +227,15 @@ def test_performance_check_failure(self): self.environs) assert self._perflog_exists('PerformanceFailureCheck') + def test_failure_stats(self): + self.checkpath = ['unittests/resources/checks/frontend_checks.py'] + self.more_options = ['-t', 'SanityFailureCheck', + '--failure-stats'] + returncode, stdout, stderr = self._run_reframe() + + assert r'FAILURE STATISTICS' in stdout + assert r'sanity 1 [SanityFailureCheck' in stdout + def test_performance_report(self): self.checkpath = ['unittests/resources/checks/frontend_checks.py'] self.more_options = ['-t', 'PerformanceFailureCheck', From 9a60f887d267457de63f6540a7b1901b18190803 Mon Sep 17 00:00:00 2001 From: Zhi-Qiang You Date: Wed, 18 Mar 2020 10:48:26 -0400 Subject: [PATCH 5/6] Update requested changes --- reframe/frontend/statistics.py | 2 +- unittests/test_cli.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/reframe/frontend/statistics.py b/reframe/frontend/statistics.py index c4a853d1ad..6f28de74df 100644 --- a/reframe/frontend/statistics.py +++ b/reframe/frontend/statistics.py @@ -126,7 +126,7 @@ def failure_stats(self): partname = partition.fullname if partition else 'None' environ_name = (check.current_environ.name if check.current_environ else 'None') - f = "[%s, %s, %s]" % (check.name, environ_name, partname) + f = f'[{check.name}, {check.environ_name}, {check.partname}]' if tf.failed_stage not in failures: failures[tf.failed_stage] = [] diff --git a/unittests/test_cli.py b/unittests/test_cli.py index c309491785..22c8de7881 100644 --- a/unittests/test_cli.py +++ b/unittests/test_cli.py @@ -229,12 +229,14 @@ def test_performance_check_failure(self): def test_failure_stats(self): self.checkpath = ['unittests/resources/checks/frontend_checks.py'] - self.more_options = ['-t', 'SanityFailureCheck', - '--failure-stats'] + self.more_options = ['-t', 'SanityFailureCheck', '--failure-stats'] returncode, stdout, stderr = self._run_reframe() assert r'FAILURE STATISTICS' in stdout assert r'sanity 1 [SanityFailureCheck' in stdout + assert 'Traceback' not in stdout + assert 'Traceback' not in stderr + assert returncode != 0 def test_performance_report(self): self.checkpath = ['unittests/resources/checks/frontend_checks.py'] From a36408c85723b2bfc856f0b1c637a2a322c5b59b Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 18 Mar 2020 17:17:19 +0100 Subject: [PATCH 6/6] Fix unit tests --- reframe/frontend/statistics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reframe/frontend/statistics.py b/reframe/frontend/statistics.py index 6f28de74df..1a85d5b072 100644 --- a/reframe/frontend/statistics.py +++ b/reframe/frontend/statistics.py @@ -126,7 +126,7 @@ def failure_stats(self): partname = partition.fullname if partition else 'None' environ_name = (check.current_environ.name if check.current_environ else 'None') - f = f'[{check.name}, {check.environ_name}, {check.partname}]' + f = f'[{check.name}, {environ_name}, {partname}]' if tf.failed_stage not in failures: failures[tf.failed_stage] = []