diff --git a/config/cscs-ci.py b/config/cscs-ci.py index 58288775b9..b6cc9e61d4 100644 --- a/config/cscs-ci.py +++ b/config/cscs-ci.py @@ -157,6 +157,7 @@ class ReframeSettings: 'format': ( '%(asctime)s|reframe %(version)s|' '%(check_info)s|jobid=%(check_jobid)s|' + 'num_tasks=%(check_num_tasks)s|' '%(check_perf_var)s=%(check_perf_value)s|' 'ref=%(check_perf_ref)s ' '(l=%(check_perf_lower_thres)s, ' diff --git a/config/cscs-pbs.py b/config/cscs-pbs.py index 8ece85d54e..de7f22a943 100644 --- a/config/cscs-pbs.py +++ b/config/cscs-pbs.py @@ -140,6 +140,7 @@ class ReframeSettings: 'format': ( '%(asctime)s|reframe %(version)s|' '%(check_info)s|jobid=%(check_jobid)s|' + 'num_tasks=%(check_num_tasks)s|' '%(check_perf_var)s=%(check_perf_value)s|' 'ref=%(check_perf_ref)s ' '(l=%(check_perf_lower_thres)s, ' diff --git a/config/cscs.py b/config/cscs.py index f4792df03f..7eefee03f9 100644 --- a/config/cscs.py +++ b/config/cscs.py @@ -588,6 +588,7 @@ class ReframeSettings: 'format': ( '%(asctime)s|reframe %(version)s|' '%(check_info)s|jobid=%(check_jobid)s|' + 'num_tasks=%(check_num_tasks)s|' '%(check_perf_var)s=%(check_perf_value)s|' 'ref=%(check_perf_ref)s ' '(l=%(check_perf_lower_thres)s, ' diff --git a/docs/running.rst b/docs/running.rst index 9744178ecb..c63ac11a06 100644 --- a/docs/running.rst +++ b/docs/running.rst @@ -543,27 +543,31 @@ The performance report is printed after the output of the regression tests and h Check1 - system:partition - PrgEnv1 + * num_tasks: * perf_variable1: * perf_variable2: * ... - PrgEnv2 - : perf_variable1: - : perf_variable2: + * num_tasks: + * perf_variable1: + * perf_variable2: * ... ------------------------------------------------------------------------------ Check2 - system:partition - PrgEnv1 + * num_tasks: * perf_variable1: * perf_variable2: * ... - PrgEnv2 + * num_tasks: * perf_variable1: * perf_variable2: * ... ------------------------------------------------------------------------------ -Achieved performance values are listed by system partition and programming environment for each performance test that has run. +The number of tasks and the achieved performance values are listed by system partition and programming environment for each performance test that has run. Performance variables are the variables collected through the :attr:`reframe.core.pipeline.RegressionTest.perf_patterns` attribute. The following command will run the CUDA matrix-vector multiplication example from the `tutorial `__ and will produce a performance report: @@ -840,6 +844,7 @@ All handlers accept the following set of attributes (keys) in their configuratio If a job or process is not yet created, ``-1`` will be printed. - ``check_name``: Prints the name of the regression test on behalf of which ReFrame is currently executing. If ReFrame is not in the context of regression test, ``reframe`` will be printed. + - ``check_num_tasks``: The number of tasks assigned to the regression test. - ``check_outputdir``: The output directory associated with the currently executing test. - ``check_partition``: The system partition where this test is currently executing. - ``check_stagedir``: The stage directory associated with the currently executing test. @@ -967,14 +972,17 @@ The attributes of this handler are the following: - ``check_perf_var``: The name of the `performance variable `__, whose value is logged. - ``check_perf_unit``: The unit of measurement for the measured performance variable, if specified in the corresponding tuple of the :attr:`reframe.core.pipeline.RegressionTest.reference` attribute. +.. note:: + .. versionchanged:: 2.20 + Support for logging `num_tasks` in performance logs was added. + Using the default performance log format, the resulting log entries look like the following: .. code-block:: none - 2018-05-30T00:14:53|reframe 2.13-dev0|Example7Test on daint:gpu using PrgEnv-gnu|jobid=749667|perf=49.152408|ref=50.0 (l=-0.1, u=0.1) - 2018-05-30T00:14:53|reframe 2.13-dev0|Example7Test on daint:gpu using PrgEnv-pgi|jobid=749668|perf=48.930356|ref=50.0 (l=-0.1, u=0.1) - 2018-05-30T00:14:53|reframe 2.13-dev0|Example7Test on daint:gpu using PrgEnv-cray|jobid=749666|perf=48.914735|ref=50.0 (l=-0.1, u=0.1) - + 2019-10-23T13:46:05|reframe 2.20-dev2|Example7Test on daint:gpu using PrgEnv-cray|jobid=813559|num_tasks=1|perf=49.681565|ref=50.0 (l=-0.1, u=0.1)|Gflop/s + 2019-10-23T13:46:27|reframe 2.20-dev2|Example7Test on daint:gpu using PrgEnv-gnu|jobid=813560|num_tasks=1|perf=50.737651|ref=50.0 (l=-0.1, u=0.1)|Gflop/s + 2019-10-23T13:46:48|reframe 2.20-dev2|Example7Test on daint:gpu using PrgEnv-pgi|jobid=813561|num_tasks=1|perf=50.720164|ref=50.0 (l=-0.1, u=0.1)|Gflop/s The interpretation of the performance values depends on the individual tests. The above output is from the CUDA performance test we presented in the `tutorial `__, so the value refers to the achieved Gflop/s. @@ -997,6 +1005,7 @@ An example configuration of such a handler is the following: 'format': ( '%(asctime)s|reframe %(version)s|' '%(check_info)s|jobid=%(check_jobid)s|' + 'num_tasks=%(check_num_tasks)s|' '%(check_perf_var)s=%(check_perf_value)s|' 'ref=%(check_perf_ref)s ' '(l=%(check_perf_lower_thres)s, ' diff --git a/reframe/core/logging.py b/reframe/core/logging.py index 1682bfd726..03ced55af1 100644 --- a/reframe/core/logging.py +++ b/reframe/core/logging.py @@ -373,6 +373,7 @@ def __init__(self, logger=None, check=None): 'check_environ': None, 'check_outputdir': None, 'check_stagedir': None, + 'check_num_tasks': None, 'check_perf_var': None, 'check_perf_value': None, 'check_perf_ref': None, @@ -412,6 +413,7 @@ def _update_check_extras(self): self.extra['check_info'] = self.check.info() self.extra['check_outputdir'] = self.check.outputdir self.extra['check_stagedir'] = self.check.stagedir + self.extra['check_num_tasks'] = self.check.num_tasks self.extra['check_tags'] = ','.join(self.check.tags) if self.check.current_system: self.extra['check_system'] = self.check.current_system.name diff --git a/reframe/frontend/statistics.py b/reframe/frontend/statistics.py index e592247798..6be05d9832 100644 --- a/reframe/frontend/statistics.py +++ b/reframe/frontend/statistics.py @@ -127,6 +127,8 @@ def performance_report(self): report.append(' - %s' % t.check.current_environ) + report.append(' * num_tasks: %s' % t.check.num_tasks) + for key, ref in t.check.perfvalues.items(): var = key.split(':')[-1] val = ref[0]