From b77f5baf8727d794ff6587efb7945e4a769e71ab Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Mon, 28 Jun 2021 09:25:06 +0200 Subject: [PATCH 01/34] Preliminary work on the perf patterns --- reframe/core/pipeline.py | 19 +++++++++++++++---- reframe/frontend/statistics.py | 2 +- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 133b744b49..143338a723 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -543,7 +543,8 @@ def pipeline_hooks(cls): #: .. versionchanged:: 3.0 #: The measurement unit is required. The user should explicitly #: specify :class:`None` if no unit is available. - reference = variable(typ.Tuple[object, object, object, object], + reference = variable(typ.Tuple[object, object, object], + typ.Tuple[object, object, object, object], field=fields.ScopedDictField, value={}) # FIXME: There is not way currently to express tuples of `float`s or # `None`s, so we just use the very generic `object` @@ -587,8 +588,7 @@ def pipeline_hooks(cls): #: `) as values. #: :class:`None` is also allowed. #: :default: :class:`None` - perf_patterns = variable(typ.Dict[str, _DeferredExpression], - type(None), value=None) + perf_patterns = variable(typ.Dict[str, _DeferredExpression]) #: List of modules to be loaded before running this test. #: @@ -1562,7 +1562,18 @@ def check_performance(self): more details. ''' - if self.perf_patterns is None: + + if hasattr(self, '_rfm_perf'): + if hasattr(self, 'perf_patterns'): + raise ReframeSyntaxError( + f"assigning a value to 'perf_patters' conflicts with ", + f"using the 'performance_function' decorator (class ", + f"{self.__class__.__qualname__})" + ) + + self.perf_patterns = self._rfm_perf() + + if not hasattr(self, 'perf_patterns'): return self._setup_perf_logging() diff --git a/reframe/frontend/statistics.py b/reframe/frontend/statistics.py index ac913b92d1..35c4676090 100644 --- a/reframe/frontend/statistics.py +++ b/reframe/frontend/statistics.py @@ -171,7 +171,7 @@ def json(self, force=False): entry['result'] = 'success' entry['outputdir'] = check.outputdir - if check.perf_patterns: + if hasattr(check, 'perf_patterns'): # Record performance variables entry['perfvars'] = [] for key, ref in check.perfvalues.items(): From 53eb6cbb3bf75d84018b68c5aa50b50fde15fd62 Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Wed, 30 Jun 2021 18:36:03 +0200 Subject: [PATCH 02/34] Implement performance decorators --- .../gpu/gpu_burn/gpu_burn_test.py | 56 ++++++++------ .../microbenchmarks/gpu/gpu_burn/__init__.py | 45 +++++++----- reframe/core/meta.py | 73 ++++++++++++++++++- 3 files changed, 129 insertions(+), 45 deletions(-) diff --git a/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py b/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py index ac0457fd83..9b4306b6c0 100644 --- a/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py +++ b/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py @@ -24,30 +24,29 @@ class gpu_burn_check(GpuBurn): num_tasks = 0 reference = { 'dom:gpu': { - 'perf': (4115, -0.10, None, 'Gflop/s'), + 'min_perf': (4115, -0.10, None), }, 'daint:gpu': { - 'perf': (4115, -0.10, None, 'Gflop/s'), + 'min_perf': (4115, -0.10, None), }, 'arolla:cn': { - 'perf': (5861, -0.10, None, 'Gflop/s'), + 'min_perf': (5861, -0.10, None), }, 'tsa:cn': { - 'perf': (5861, -0.10, None, 'Gflop/s'), + 'min_perf': (5861, -0.10, None), }, 'ault:amda100': { - 'perf': (15000, -0.10, None, 'Gflop/s'), + 'min_perf': (15000, -0.10, None), }, 'ault:amdv100': { - 'perf': (5500, -0.10, None, 'Gflop/s'), + 'min_perf': (5500, -0.10, None), }, 'ault:intelv100': { - 'perf': (5500, -0.10, None, 'Gflop/s'), + 'min_perf': (5500, -0.10, None), }, 'ault:amdvega': { - 'perf': (3450, -0.10, None, 'Gflop/s'), + 'min_perf': (3450, -0.10, None), }, - '*': {'temp': (0, None, None, 'degC')} } maintainers = ['AJ', 'TM'] @@ -62,17 +61,30 @@ def set_gpu_arch(self): def set_num_gpus_per_node(self): hooks.set_num_gpus_per_node(self) - @run_before('performance') - def report_nid_with_smallest_flops(self): - regex = r'\[(\S+)\] GPU\s+\d\(OK\): (\d+) GF/s' - rptf = os.path.join(self.stagedir, sn.evaluate(self.stdout)) - self.nids = sn.extractall(regex, rptf, 1) - self.flops = sn.extractall(regex, rptf, 2, float) + @performance_report + def report_slow_nodes(self): + '''Report the base perf metrics and also all the slow nodes.''' - # Find index of smallest flops and update reference dictionary to - # include our patched units - index = self.flops.evaluate().index(min(self.flops)) - unit = f'GF/s ({self.nids[index]})' - for key, ref in self.reference.items(): - if not key.endswith(':temp'): - self.reference[key] = (*ref[:3], unit) + # Dict with the base perf metrics to report + perf_report = { + 'min_perf': self.perf(), + 'max_temp': self.temp(), + } + + # Only report the nodes that don't meet the perf reference + key = f'{self.current_partition.fullname}:perf' + if key in self.reference: + regex = r'\[(\S+)\] GPU\s+\d\(OK\): (\d+) GF/s' + nids = set(sn.extractall(regex, self.stdout, 1)) + + # Get the references + ref, lt, ut = self.reference[key] + + # Flag the slow nodes + for nid in nids: + try: + sn.assert_reference(self.perf(nid), ref, lt, ut) + except SanityError: + perf_report[nid] = self.perf(nid) + + return perf_report diff --git a/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py b/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py index 6732ffa012..a9a14eade9 100644 --- a/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py +++ b/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py @@ -51,12 +51,6 @@ class GpuBurn(rfm.RegressionTest, pin_prefix=True): build_system = 'Make' executable = './gpu_burn.x' num_tasks_per_node = 1 - reference = { - '*': { - 'perf': (0, None, None, 'Gflop/s'), - 'temp': (0, None, None, 'degC') - } - } @run_before('compile') def set_gpu_build(self): @@ -103,17 +97,28 @@ def count_successful_burns(self): r'^\s*\[[^\]]*\]\s*GPU\s*\d+\(OK\)', self.stdout) ), self.num_tasks_assigned) - @run_before('performance') - def set_perf_patterns(self): - '''Extract the minimum performance and maximum temperature recorded. - - The performance and temperature data are reported in Gflops/s and - deg. Celsius respectively. - ''' - - patt = (r'^\s*\[[^\]]*\]\s*GPU\s+\d+\(\S*\):\s+(?P\S*)\s+GF\/s' - r'\s+(?P\S*)\s+Celsius') - self.perf_patterns = { - 'perf': sn.min(sn.extractall(patt, self.stdout, 'perf', float)), - 'temp': sn.max(sn.extractall(patt, self.stdout, 'temp', float)), - } + @deferrable + def extract_perf(self, what, nid=None): + '''Utility to extract performance metrics.''' + + if what not in {'perf', 'temp'}: + raise ValueError( + f"unsupported value in 'what' argument ({what}!r)" + ) + + if nid is None: + nid = r'[^\]]*' + + patt = (rf'^\s*\[{nid}\]\s*GPU\s+\d+\(\S*\):\s+(?P\S*)\s+GF\/s' + rf'\s+(?P\S*)\s+Celsius') + return sn.extractall(patt, self.stdout, what, float)), + + @performance_function('Gflop/s') + def perf(self, nid=None): + '''Lowest performance recorded.''' + return sn.min(self.extract_perf('perf', nid)) + + @performance_function('degC') + def temp(self, nid=None): + '''Maximum temperature recorded.''' + return sn.max(self.extract_perf('temp', nid)) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 88f0a73cee..c4cfd96760 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -245,6 +245,37 @@ def sanity_function(fn): namespace['sanity_function'] = sanity_function namespace['deferrable'] = deferrable + + # Machinery to add performance variables + def performance_function(units): + '''Decoreate a function to extract a performance variable. + + The ``units`` argument indicates the units of the performance + variable to be extracted. + ''' + + if not isinstance(units, str): + raise TypeError('provided units are not in string format') + + def _fn(fn): + _def_fn = deferrable(fn) + setattr(_def_fn, '_rfm_perf_unit', units) + return _def_fn + + return _fn + + namespace['performance_function'] = performance_function + + def performance_report(fn): + '''Mark a function to generate the dict with the perf report. + + It must return an object of type + ``typ.Dict[str, _DefferredExpression]``. + ''' + setattr(fn, '_rfm_perf_report', True) + return fn + + namespace['performance_report'] = performance_report return metacls.MetaNamespace(namespace) def __new__(metacls, name, bases, namespace, **kwargs): @@ -259,7 +290,8 @@ class was created or even at the instance level (e.g. doing blacklist = [ 'parameter', 'variable', 'bind', 'run_before', 'run_after', - 'require_deps', 'required', 'deferrable', 'sanity_function' + 'require_deps', 'required', 'deferrable', 'sanity_function', + 'performance_function', 'performance_report' ] for b in blacklist: namespace.pop(b, None) @@ -291,13 +323,12 @@ def __init__(cls, name, bases, namespace, **kwargs): # phase if not assigned elsewhere hook_reg = hooks.HookRegistry.create(namespace) for base in (b for b in bases if hasattr(b, '_rfm_pipeline_hooks')): - hook_reg.update(getattr(base, '_rfm_pipeline_hooks')) + hook_reg.update(getattr(base, '_rfm_pipeline_hooks'), namespace) cls._rfm_pipeline_hooks = hook_reg # Gather all the locally defined sanity functions based on the # _rfm_sanity_fn attribute. - sn_fn = [v for v in namespace.values() if hasattr(v, '_rfm_sanity_fn')] if sn_fn: cls._rfm_sanity = sn_fn[0] @@ -321,6 +352,42 @@ def __init__(cls, name, bases, namespace, **kwargs): break + # Gather all the locally defined performance report functions based on + # the _rfm_perf_report attribute. + perf_report_fn = [ + v for v in namespace.values() if hasattr(v, '_rfm_perf_report') + ] + if perf_report_fn: + cls._rfm_perf_report = perf_report_fn[0] + if len(perf_report_fn) > 1: + raise ReframeSyntaxError( + f'{cls.__qualname__!r} defines more than one performance ' + 'report function in the class body.' + ) + + else: + # Search the bases if no local perf report functions exist. + for base in (b for b in bases if hasattr(b, '_rfm_perf_report')): + cls._rfm_perf_report = getattr(base, '_rfm_perf_report') + if cls._rfm_perf_report.__name__ in namespace: + raise ReframeSyntaxError( + f'{cls.__qualname__!r} overrides the candidate ' + f'performance report function ' + f'{cls._rfm_perf_report.__qualname__!r} without ' + f'defining an alternative' + ) + + break + + # Build a set with all the performance functions + cls._rfm_perf_fns = { + k for k,v in namespace.items() if hasattr(v, '_rfm_perf_unit') + } + for base in (b for b in bases if hasattr(b, '_rfm_perf_fns')): + cls._rfm_perf_fns = cls._rfm_perf_fns.union( + {f for f in getattr(b, '_rfm_perf_fns') if f not in namespace} + ) + cls._final_methods = {v.__name__ for v in namespace.values() if hasattr(v, '_rfm_final')} From cf65b721a9b35178879515b832612540c37a5b14 Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Thu, 1 Jul 2021 17:29:03 +0200 Subject: [PATCH 03/34] Update syntax compat logic --- .../microbenchmarks/gpu/gpu_burn/__init__.py | 3 +-- reframe/core/meta.py | 3 ++- reframe/core/pipeline.py | 20 ++++++++++++++++--- 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py b/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py index a9a14eade9..dd893d7e2b 100644 --- a/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py +++ b/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py @@ -77,7 +77,6 @@ def set_gpu_build(self): raise ValueError('unknown gpu_build option') @property - @deferrable def num_tasks_assigned(self): '''Total number of times the gpu burn will run. @@ -111,7 +110,7 @@ def extract_perf(self, what, nid=None): patt = (rf'^\s*\[{nid}\]\s*GPU\s+\d+\(\S*\):\s+(?P\S*)\s+GF\/s' rf'\s+(?P\S*)\s+Celsius') - return sn.extractall(patt, self.stdout, what, float)), + return sn.extractall(patt, self.stdout, what, float) @performance_function('Gflop/s') def perf(self, nid=None): diff --git a/reframe/core/meta.py b/reframe/core/meta.py index e3dd522f59..0768ff0417 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -386,7 +386,8 @@ def __init__(cls, name, bases, namespace, **kwargs): } for base in (b for b in bases if hasattr(b, '_rfm_perf_fns')): cls._rfm_perf_fns = cls._rfm_perf_fns.union( - {f for f in getattr(b, '_rfm_perf_fns') if f not in namespace} + {f for f in getattr(base, '_rfm_perf_fns') + if f not in namespace} ) cls._final_methods = {v.__name__ for v in namespace.values() diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 0525dbadfc..5a382b3f2a 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1563,15 +1563,29 @@ def check_performance(self): ''' - if hasattr(self, '_rfm_perf'): + if hasattr(self, '_rfm_perf_report') or self._rfm_perf_fns: if hasattr(self, 'perf_patterns'): raise ReframeSyntaxError( f"assigning a value to 'perf_patters' conflicts with ", - f"using the 'performance_function' decorator (class ", + f"using the 'performance_report' decorator (class ", f"{self.__class__.__qualname__})" ) - self.perf_patterns = self._rfm_perf() + if not hasattr(self, '_rfm_perf_report'): + self.perf_patterns = {} + for fn in self._rfm_perf_fns: + self.perf_patterns[fn.__name__] = fn + + else: + self.perf_patterns = self._rfm_perf_report() + + self._setup_perf_logging() + with osext.change_dir(self._stagedir): + '''New perf check here.''' + print(self.perf_patterns) + + return + if not hasattr(self, 'perf_patterns'): return From 94916a6bfadc5aaf8af344e11ec0e448a76e371f Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Fri, 2 Jul 2021 22:46:51 +0200 Subject: [PATCH 04/34] Implement new perf syntax --- .../gpu/gpu_burn/gpu_burn_test.py | 6 +- .../microbenchmarks/gpu/gpu_burn/__init__.py | 3 +- reframe/core/meta.py | 18 +++-- reframe/core/pipeline.py | 69 +++++++++++-------- 4 files changed, 59 insertions(+), 37 deletions(-) diff --git a/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py b/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py index 9b4306b6c0..2ecba81eb7 100644 --- a/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py +++ b/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py @@ -7,6 +7,7 @@ import reframe as rfm import reframe.utility.sanity as sn +from reframe.core.exceptions import SanityError from hpctestlib.microbenchmarks.gpu.gpu_burn import GpuBurn import cscstests.microbenchmarks.gpu.hooks as hooks @@ -72,7 +73,7 @@ def report_slow_nodes(self): } # Only report the nodes that don't meet the perf reference - key = f'{self.current_partition.fullname}:perf' + key = f'{self.current_partition.fullname}:min_perf' if key in self.reference: regex = r'\[(\S+)\] GPU\s+\d\(OK\): (\d+) GF/s' nids = set(sn.extractall(regex, self.stdout, 1)) @@ -83,7 +84,8 @@ def report_slow_nodes(self): # Flag the slow nodes for nid in nids: try: - sn.assert_reference(self.perf(nid), ref, lt, ut) + val = self.perf(nid).evaluate()[0] + sn.assert_reference(val, ref, lt, ut).evaluate() except SanityError: perf_report[nid] = self.perf(nid) diff --git a/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py b/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py index dd893d7e2b..8dd7491207 100644 --- a/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py +++ b/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py @@ -96,7 +96,6 @@ def count_successful_burns(self): r'^\s*\[[^\]]*\]\s*GPU\s*\d+\(OK\)', self.stdout) ), self.num_tasks_assigned) - @deferrable def extract_perf(self, what, nid=None): '''Utility to extract performance metrics.''' @@ -110,7 +109,7 @@ def extract_perf(self, what, nid=None): patt = (rf'^\s*\[{nid}\]\s*GPU\s+\d+\(\S*\):\s+(?P\S*)\s+GF\/s' rf'\s+(?P\S*)\s+Celsius') - return sn.extractall(patt, self.stdout, what, float) + return sn.extractall(patt, self.stdout, what, float).evaluate() @performance_function('Gflop/s') def perf(self, nid=None): diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 0768ff0417..a29510b853 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -258,9 +258,17 @@ def performance_function(units): raise TypeError('provided units are not in string format') def _fn(fn): - _def_fn = deferrable(fn) - setattr(_def_fn, '_rfm_perf_unit', units) - return _def_fn + @deferrable + @functools.wraps(fn) + def _perf_fn(*args, **kwargs): + try: + return fn(*args, **kwargs).evaluate(), units + except AttributeError: + return fn(*args, **kwargs), units + + setattr(_perf_fn, '_rfm_perf_fn', units) + + return _perf_fn return _fn @@ -382,12 +390,12 @@ def __init__(cls, name, bases, namespace, **kwargs): # Build a set with all the performance functions cls._rfm_perf_fns = { - k for k,v in namespace.items() if hasattr(v, '_rfm_perf_unit') + v for k,v in namespace.items() if hasattr(v, '_rfm_perf_fn') } for base in (b for b in bases if hasattr(b, '_rfm_perf_fns')): cls._rfm_perf_fns = cls._rfm_perf_fns.union( {f for f in getattr(base, '_rfm_perf_fns') - if f not in namespace} + if f.__name__ not in namespace} ) cls._final_methods = {v.__name__ for v in namespace.values() diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 5a382b3f2a..bae0aa0acf 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1484,7 +1484,8 @@ def sanity(self): @final def performance(self): try: - self.check_performance() + with osext.change_dir(self._stagedir): + self.check_performance() except PerformanceError: if self.strict_check: raise @@ -1571,27 +1572,38 @@ def check_performance(self): f"{self.__class__.__qualname__})" ) + # Build the performance dict if not hasattr(self, '_rfm_perf_report'): self.perf_patterns = {} for fn in self._rfm_perf_fns: - self.perf_patterns[fn.__name__] = fn + self.perf_patterns[fn.__name__] = fn(self) else: self.perf_patterns = self._rfm_perf_report() + # Log the performance variables self._setup_perf_logging() - with osext.change_dir(self._stagedir): - '''New perf check here.''' - print(self.perf_patterns) + for tag, expr in self.perf_patterns.items(): + value, unit = sn.evaluate(expr) + key = '%s:%s' % (self._current_partition.fullname, tag) + try: + ref = self.reference[key] + if len(ref) > 3: + raise ReframeSyntaxError( + 'reference tuple has more than three elements' + ) - return + except KeyError: + ref = (0, None, None) + self._perfvalues[key] = (value, *ref, unit) + self._perf_logger.log_performance(logging.INFO, tag, value, + *ref, unit) - if not hasattr(self, 'perf_patterns'): + elif not hasattr(self, 'perf_patterns'): return - - self._setup_perf_logging() - with osext.change_dir(self._stagedir): + else: + self._setup_perf_logging() # Check if default reference perf values are provided and # store all the variables tested in the performance check has_default = False @@ -1633,26 +1645,27 @@ def check_performance(self): self._perf_logger.log_performance(logging.INFO, tag, value, *self.reference[key]) - for key, values in self._perfvalues.items(): - val, ref, low_thres, high_thres, *_ = values + # Check the performace variables against their references. + for key, values in self._perfvalues.items(): + val, ref, low_thres, high_thres, *_ = values - # Verify that val is a number - if not isinstance(val, numbers.Number): - raise SanityError( - "the value extracted for performance variable '%s' " - "is not a number: %s" % (key, val) - ) + # Verify that val is a number + if not isinstance(val, numbers.Number): + raise SanityError( + "the value extracted for performance variable '%s' " + "is not a number: %s" % (key, val) + ) - tag = key.split(':')[-1] - try: - sn.evaluate( - sn.assert_reference( - val, ref, low_thres, high_thres, - msg=('failed to meet reference: %s={0}, ' - 'expected {1} (l={2}, u={3})' % tag)) - ) - except SanityError as e: - raise PerformanceError(e) + tag = key.split(':')[-1] + try: + sn.evaluate( + sn.assert_reference( + val, ref, low_thres, high_thres, + msg=('failed to meet reference: %s={0}, ' + 'expected {1} (l={2}, u={3})' % tag)) + ) + except SanityError as e: + raise PerformanceError(e) def _copy_job_files(self, job, dst): if job is None: From d974764d6eb294cc55e30eabb8ad627687f7e6ca Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Fri, 2 Jul 2021 23:02:39 +0200 Subject: [PATCH 05/34] Port jobreport to new syntax --- cscs-checks/system/jobreport/gpu_report.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/cscs-checks/system/jobreport/gpu_report.py b/cscs-checks/system/jobreport/gpu_report.py index 5967ff59eb..77a6396487 100644 --- a/cscs-checks/system/jobreport/gpu_report.py +++ b/cscs-checks/system/jobreport/gpu_report.py @@ -93,7 +93,11 @@ def gpu_usage_sanity(self): sn.assert_ge(sn.min(time_reported), self.burn_time) ]) - @run_before('performance') + @performance_function('nodes') + def total_nodes_reported(self): + return sn.count(self.nodes_reported) + + @performance_report def set_perf_patterns(self): '''The number of reported nodes can be used as a perf metric. @@ -103,9 +107,9 @@ def set_perf_patterns(self): self.reference = { '*': { - 'nodes_reported': (self.num_tasks, self.perf_floor, 0, 'nodes') + 'nodes_reported': (self.num_tasks, self.perf_floor, 0) }, } - self.perf_patterns = { - 'nodes_reported': sn.count(self.nodes_reported) + return { + 'nodes_reported': self.total_nodes_reported() } From 98432db13ce625d6e20fe4b19603321eabe4b285 Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Mon, 5 Jul 2021 16:36:17 +0200 Subject: [PATCH 06/34] Fix reference types --- reframe/core/pipeline.py | 205 ++++++++++++++++++++------------------- 1 file changed, 103 insertions(+), 102 deletions(-) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index bae0aa0acf..0e37b9c430 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -544,8 +544,9 @@ def pipeline_hooks(cls): #: The measurement unit is required. The user should explicitly #: specify :class:`None` if no unit is available. reference = variable(typ.Tuple[object, object, object], - typ.Tuple[object, object, object, object], - field=fields.ScopedDictField, value={}) + typ.Dict[str, typ.Dict[ + str, typ.Tuple[object, object, object, object]] + ], field=fields.ScopedDictField, value={}) # FIXME: There is not way currently to express tuples of `float`s or # `None`s, so we just use the very generic `object` @@ -588,7 +589,7 @@ def pipeline_hooks(cls): #: `) as values. #: :class:`None` is also allowed. #: :default: :class:`None` - perf_patterns = variable(typ.Dict[str, _DeferredExpression]) + perf_patterns = variable(typ.Dict[str, _DeferredExpression], type(None)) #: List of modules to be loaded before running this test. #: @@ -1484,8 +1485,7 @@ def sanity(self): @final def performance(self): try: - with osext.change_dir(self._stagedir): - self.check_performance() + self.check_performance() except PerformanceError: if self.strict_check: raise @@ -1564,108 +1564,109 @@ def check_performance(self): ''' - if hasattr(self, '_rfm_perf_report') or self._rfm_perf_fns: - if hasattr(self, 'perf_patterns'): - raise ReframeSyntaxError( - f"assigning a value to 'perf_patters' conflicts with ", - f"using the 'performance_report' decorator (class ", - f"{self.__class__.__qualname__})" - ) + with osext.change_dir(self._stagedir): + if hasattr(self, '_rfm_perf_report') or self._rfm_perf_fns: + if hasattr(self, 'perf_patterns'): + raise ReframeSyntaxError( + f"assigning a value to 'perf_patters' conflicts with ", + f"using the 'performance_report' decorator (class ", + f"{self.__class__.__qualname__})" + ) - # Build the performance dict - if not hasattr(self, '_rfm_perf_report'): - self.perf_patterns = {} - for fn in self._rfm_perf_fns: - self.perf_patterns[fn.__name__] = fn(self) + # Build the performance dict + if not hasattr(self, '_rfm_perf_report'): + self.perf_patterns = {} + for fn in self._rfm_perf_fns: + self.perf_patterns[fn.__name__] = fn(self) + else: + self.perf_patterns = self._rfm_perf_report() + + # Log the performance variables + self._setup_perf_logging() + for tag, expr in self.perf_patterns.items(): + value, unit = sn.evaluate(expr) + key = '%s:%s' % (self._current_partition.fullname, tag) + try: + ref = self.reference[key] + if len(ref) > 3: + raise ReframeSyntaxError( + 'reference tuple has more than three elements' + ) + + except KeyError: + ref = (0, None, None) + + self._perfvalues[key] = (value, *ref, unit) + self._perf_logger.log_performance(logging.INFO, tag, value, + *ref, unit) + + elif not hasattr(self, 'perf_patterns'): + return else: - self.perf_patterns = self._rfm_perf_report() - - # Log the performance variables - self._setup_perf_logging() - for tag, expr in self.perf_patterns.items(): - value, unit = sn.evaluate(expr) - key = '%s:%s' % (self._current_partition.fullname, tag) - try: - ref = self.reference[key] - if len(ref) > 3: - raise ReframeSyntaxError( - 'reference tuple has more than three elements' - ) - - except KeyError: - ref = (0, None, None) - - self._perfvalues[key] = (value, *ref, unit) - self._perf_logger.log_performance(logging.INFO, tag, value, - *ref, unit) - - elif not hasattr(self, 'perf_patterns'): - return - else: - self._setup_perf_logging() - # Check if default reference perf values are provided and - # store all the variables tested in the performance check - has_default = False - variables = set() - for key, ref in self.reference.items(): - keyparts = key.split(self.reference.scope_separator) - system = keyparts[0] - varname = keyparts[-1] - unit = ref[3] - variables.add((varname, unit)) - if system == '*': - has_default = True - break - - if not has_default: - if not variables: - # If empty, it means that self.reference was empty, so try - # to infer their name from perf_patterns - variables = {(name, None) - for name in self.perf_patterns.keys()} - - for var in variables: - name, unit = var - ref_tuple = (0, None, None, unit) - self.reference.update({'*': {name: ref_tuple}}) - - # We first evaluate and log all performance values and then we - # check them against the reference. This way we always log them - # even if the don't meet the reference. - for tag, expr in self.perf_patterns.items(): - value = sn.evaluate(expr) - key = '%s:%s' % (self._current_partition.fullname, tag) - if key not in self.reference: + self._setup_perf_logging() + # Check if default reference perf values are provided and + # store all the variables tested in the performance check + has_default = False + variables = set() + for key, ref in self.reference.items(): + keyparts = key.split(self.reference.scope_separator) + system = keyparts[0] + varname = keyparts[-1] + unit = ref[3] + variables.add((varname, unit)) + if system == '*': + has_default = True + break + + if not has_default: + if not variables: + # If empty, it means that self.reference was empty, so + # try to infer their name from perf_patterns + variables = {(name, None) + for name in self.perf_patterns.keys()} + + for var in variables: + name, unit = var + ref_tuple = (0, None, None, unit) + self.reference.update({'*': {name: ref_tuple}}) + + # We first evaluate and log all performance values and then we + # check them against the reference. This way we always log them + # even if the don't meet the reference. + for tag, expr in self.perf_patterns.items(): + value = sn.evaluate(expr) + key = '%s:%s' % (self._current_partition.fullname, tag) + if key not in self.reference: + raise SanityError( + "tag `%s' not resolved in references for `%s'" % + (tag, self._current_partition.fullname)) + + self._perfvalues[key] = (value, *self.reference[key]) + self._perf_logger.log_performance(logging.INFO, tag, value, + *self.reference[key]) + + # Check the performace variables against their references. + for key, values in self._perfvalues.items(): + val, ref, low_thres, high_thres, *_ = values + + # Verify that val is a number + if not isinstance(val, numbers.Number): raise SanityError( - "tag `%s' not resolved in references for `%s'" % - (tag, self._current_partition.fullname)) - - self._perfvalues[key] = (value, *self.reference[key]) - self._perf_logger.log_performance(logging.INFO, tag, value, - *self.reference[key]) - - # Check the performace variables against their references. - for key, values in self._perfvalues.items(): - val, ref, low_thres, high_thres, *_ = values - - # Verify that val is a number - if not isinstance(val, numbers.Number): - raise SanityError( - "the value extracted for performance variable '%s' " - "is not a number: %s" % (key, val) - ) + "the value extracted for performance variable '%s' " + "is not a number: %s" % (key, val) + ) - tag = key.split(':')[-1] - try: - sn.evaluate( - sn.assert_reference( - val, ref, low_thres, high_thres, - msg=('failed to meet reference: %s={0}, ' - 'expected {1} (l={2}, u={3})' % tag)) - ) - except SanityError as e: - raise PerformanceError(e) + tag = key.split(':')[-1] + try: + sn.evaluate( + sn.assert_reference( + val, ref, low_thres, high_thres, + msg=('failed to meet reference: %s={0}, ' + 'expected {1} (l={2}, u={3})' % tag)) + ) + except SanityError as e: + raise PerformanceError(e) def _copy_job_files(self, job, dst): if job is None: From 20633fae50d41328654d84f1ea604465a903734c Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Mon, 19 Jul 2021 12:12:31 +0200 Subject: [PATCH 07/34] Add perf_key argument to @perf_function --- reframe/core/pipeline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index e3feca57db..dcaf060b32 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1623,7 +1623,7 @@ def check_performance(self): if not hasattr(self, '_rfm_perf_report'): self.perf_patterns = {} for fn in self._rfm_perf_fns: - self.perf_patterns[fn.__name__] = fn(self) + self.perf_patterns[fn._rfm_perf_name] = fn(self) else: self.perf_patterns = self._rfm_perf_report() From 0fc17dd6acafc22344ddb115cbbcf3842aff0d15 Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Mon, 19 Jul 2021 15:37:32 +0200 Subject: [PATCH 08/34] Add make_performance_function method --- reframe/core/meta.py | 18 ++++++++++++------ reframe/core/pipeline.py | 9 ++++++++- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index f5650fbd9f..f2576ba19b 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -16,7 +16,7 @@ import reframe.core.hooks as hooks from reframe.core.exceptions import ReframeSyntaxError -from reframe.core.deferrable import deferrable +from reframe.core.deferrable import deferrable, _DeferredExpression _USER_PIPELINE_STAGES = ( @@ -272,9 +272,9 @@ def _fn(fn): @functools.wraps(fn) def _perf_fn(*args, **kwargs): ret = fn(*args, **kwargs) - try: + if isinstance(ret, _DeferredExpression): return ret.evaluate(), units - except AttributeError: + else: return ret, units setattr(_perf_fn, '_rfm_perf_fn', units) @@ -285,17 +285,16 @@ def _perf_fn(*args, **kwargs): return _fn - namespace['performance_function'] = performance_function - def performance_report(fn): '''Mark a function to generate the dict with the perf report. It must return an object of type - ``typ.Dict[str, _DefferredExpression]``. + ``typ.Dict[str, _DeferredExpression]``. ''' setattr(fn, '_rfm_perf_report', True) return fn + namespace['performance_function'] = performance_function namespace['performance_report'] = performance_report return metacls.MetaNamespace(namespace) @@ -309,6 +308,13 @@ class was created or even at the instance level (e.g. doing constructed. ''' + perf_fn_deco = namespace['performance_function'] + def make_performance_function(self, fn, unit, *args, **kwargs): + '''Wrapper to make a performance function inline.''' + return perf_fn_deco(unit)(fn)(self, *args, **kwargs) + + namespace['make_performance_function'] = make_performance_function + directives = [ 'parameter', 'variable', 'bind', 'run_before', 'run_after', 'require_deps', 'required', 'deferrable', 'sanity_function', diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index dcaf060b32..6fb6c758aa 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1631,7 +1631,14 @@ def check_performance(self): # Log the performance variables self._setup_perf_logging() for tag, expr in self.perf_patterns.items(): - value, unit = sn.evaluate(expr) + try: + value, unit = sn.evaluate(expr) + except TypeError: + raise ReframeSyntaxError( + f'function assigned for performance variable ' + f'{tag!r} is not a performance function' + ) + key = '%s:%s' % (self._current_partition.fullname, tag) try: ref = self.reference[key] From c5fa6885f30964e2de99efd4c3b3837ad1b13e8e Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Mon, 19 Jul 2021 15:58:27 +0200 Subject: [PATCH 09/34] Make unit type-checking more robust --- reframe/core/meta.py | 14 ++++++++++++-- reframe/core/pipeline.py | 4 ++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index f2576ba19b..afc975ab50 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -264,9 +264,19 @@ def performance_function(units, *, perf_key=None): be used as the performance variable name. ''' - if not isinstance(units, str): - raise TypeError('provided units are not in string format') + class perf_units: + __slots__ = ('_rfm_perf_units') + def __init__(self, units): + if not isinstance(units, str): + raise TypeError( + 'performance units must be provided in string ' + 'format' + ) + + self._rfm_perf_units = units + + units = perf_units(units) def _fn(fn): @deferrable @functools.wraps(fn) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 6fb6c758aa..0a47d0212d 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1633,6 +1633,10 @@ def check_performance(self): for tag, expr in self.perf_patterns.items(): try: value, unit = sn.evaluate(expr) + unit = getattr(unit, '_rfm_perf_units', None) + if not unit: + raise TypeError('Trigger the TypeError below') + except TypeError: raise ReframeSyntaxError( f'function assigned for performance variable ' From 7607dea19e9c09c32ee53c0471322064a4ab3d92 Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Mon, 19 Jul 2021 16:31:54 +0200 Subject: [PATCH 10/34] Add signature checking on perf fns --- reframe/core/meta.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index afc975ab50..b06c4ebd96 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -9,6 +9,7 @@ import functools import types +import inspect import reframe.core.namespaces as namespaces import reframe.core.parameters as parameters @@ -277,7 +278,20 @@ def __init__(self, units): self._rfm_perf_units = units units = perf_units(units) + def _fn(fn): + # Performance functions must only have a single positional arg + # without a default value + pos_args = len( + [p for p in inspect.signature(fn).parameters.values() + if p.default is p.empty] + ) + if pos_args != 1: + raise ReframeSyntaxError( + f'performance function {fn.__name__} has more than ' + f'one argument without a default value' + ) + @deferrable @functools.wraps(fn) def _perf_fn(*args, **kwargs): @@ -319,6 +333,7 @@ class was created or even at the instance level (e.g. doing ''' perf_fn_deco = namespace['performance_function'] + def make_performance_function(self, fn, unit, *args, **kwargs): '''Wrapper to make a performance function inline.''' return perf_fn_deco(unit)(fn)(self, *args, **kwargs) @@ -419,7 +434,7 @@ def __init__(cls, name, bases, namespace, **kwargs): # Build a set with all the performance functions cls._rfm_perf_fns = { - v for k,v in namespace.items() if hasattr(v, '_rfm_perf_fn') + v for k, v in namespace.items() if hasattr(v, '_rfm_perf_fn') } for base in (b for b in bases if hasattr(b, '_rfm_perf_fns')): cls._rfm_perf_fns = cls._rfm_perf_fns.union( From e0b3ab8d36743c70aba23ca54d9f2cf83e5416ce Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Mon, 19 Jul 2021 17:17:10 +0200 Subject: [PATCH 11/34] Remove unused imports --- cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py | 1 - reframe/core/pipeline.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py b/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py index 2ecba81eb7..a743cc44da 100644 --- a/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py +++ b/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py @@ -3,7 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -import os import reframe as rfm import reframe.utility.sanity as sn diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 0a47d0212d..1fca68e5ef 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -586,7 +586,7 @@ def pipeline_hooks(cls): reference = variable(typ.Tuple[object, object, object], typ.Dict[str, typ.Dict[ str, typ.Tuple[object, object, object, object]] - ], field=fields.ScopedDictField, value={}) + ], field=fields.ScopedDictField, value={}) # FIXME: There is not way currently to express tuples of `float`s or # `None`s, so we just use the very generic `object` From 277e73d94bfcd71658b0fa406a3272ed7adabb59 Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Tue, 20 Jul 2021 17:56:46 +0200 Subject: [PATCH 12/34] Add trivially callable util --- reframe/core/meta.py | 7 ++----- reframe/utility/__init__.py | 21 +++++++++++++++++++++ unittests/test_utility.py | 11 +++++++++++ 3 files changed, 34 insertions(+), 5 deletions(-) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index b06c4ebd96..4c8fb6b347 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -15,6 +15,7 @@ import reframe.core.parameters as parameters import reframe.core.variables as variables import reframe.core.hooks as hooks +import reframe.utility as utils from reframe.core.exceptions import ReframeSyntaxError from reframe.core.deferrable import deferrable, _DeferredExpression @@ -282,11 +283,7 @@ def __init__(self, units): def _fn(fn): # Performance functions must only have a single positional arg # without a default value - pos_args = len( - [p for p in inspect.signature(fn).parameters.values() - if p.default is p.empty] - ) - if pos_args != 1: + if not utils.is_trivially_callable(fn, non_def_args=1): raise ReframeSyntaxError( f'performance function {fn.__name__} has more than ' f'one argument without a default value' diff --git a/reframe/utility/__init__.py b/reframe/utility/__init__.py index 96f0140ddb..2b8d211dc9 100644 --- a/reframe/utility/__init__.py +++ b/reframe/utility/__init__.py @@ -304,6 +304,27 @@ def attrs(obj): return ret +def is_trivially_callable(fn, *, non_def_args=0): + '''Assert that a callable object is trivially callable. + + A trivially callable object is one that can be called without providing + any additional arguments to its call method. Member functions with a single + argument without a default value may be considered trivially callable, + since the ``self`` placeholder is automatically provided when invoked from + the instance. The number of allowed arguments without a default value for a + callable to be considered trivially callable may be controlled with the + ``non_def_args`` argument. + ''' + + if not callable(fn): + raise TypeError('argument is not a callable') + + return len( + [p for p in inspect.signature(fn).parameters.values() + if p.default is p.empty] + ) == non_def_args + + def _is_builtin_type(cls): # NOTE: The set of types is copied from the copy.deepcopy() implementation builtin_types = (type(None), int, float, bool, complex, str, tuple, diff --git a/unittests/test_utility.py b/unittests/test_utility.py index bfc7737122..a1acde6e30 100644 --- a/unittests/test_utility.py +++ b/unittests/test_utility.py @@ -1717,6 +1717,17 @@ def foo(): assert not util.is_copyable(foo()) +def test_is_trivially_callable(): + def foo(): + pass + + def bar(x, y): + pass + + assert util.is_trivially_callable(foo) + assert util.is_trivially_callable(bar, non_def_args=2) + + def test_nodelist_abbrev(): nid_nodes = [f'nid{n:03}' for n in range(5, 20)] cid_nodes = [f'cid{n:03}' for n in range(20)] From e4f4092960078128c542d5b53a96d2c8c258ed38 Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Tue, 20 Jul 2021 18:08:37 +0200 Subject: [PATCH 13/34] Remove unused imports --- reframe/core/meta.py | 1 - 1 file changed, 1 deletion(-) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 4c8fb6b347..15c1952fc3 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -9,7 +9,6 @@ import functools import types -import inspect import reframe.core.namespaces as namespaces import reframe.core.parameters as parameters From cabacdd49a8d3be8512d919343d218a928750cad Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Wed, 21 Jul 2021 09:49:53 +0200 Subject: [PATCH 14/34] Address PR comments --- reframe/core/meta.py | 2 +- reframe/core/pipeline.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 15c1952fc3..fe0a87cc77 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -256,7 +256,7 @@ def sanity_function(fn): # Machinery to add performance variables def performance_function(units, *, perf_key=None): - '''Decoreate a function to extract a performance variable. + '''Decorate a function to extract a performance variable. The ``units`` argument indicates the units of the performance variable to be extracted. diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 1fca68e5ef..5ac9b281d5 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1614,7 +1614,7 @@ def check_performance(self): if hasattr(self, '_rfm_perf_report') or self._rfm_perf_fns: if hasattr(self, 'perf_patterns'): raise ReframeSyntaxError( - f"assigning a value to 'perf_patters' conflicts with ", + f"assigning a value to 'perf_pattenrs' conflicts with ", f"using the 'performance_report' decorator (class ", f"{self.__class__.__qualname__})" ) @@ -1703,7 +1703,7 @@ def check_performance(self): self._perf_logger.log_performance(logging.INFO, tag, value, *self.reference[key]) - # Check the performace variables against their references. + # Check the performance variables against their references. for key, values in self._perfvalues.items(): val, ref, low_thres, high_thres, *_ = values From cdf69cc15d0da975fef9c7e6efb7eac8ed4f8eef Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Mon, 26 Jul 2021 14:49:18 +0200 Subject: [PATCH 15/34] Allow units in references --- reframe/core/pipeline.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 7e7bcfdb47..e9fadd4725 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1693,10 +1693,20 @@ def check_performance(self): key = '%s:%s' % (self._current_partition.fullname, tag) try: ref = self.reference[key] - if len(ref) > 3: - raise ReframeSyntaxError( - 'reference tuple has more than three elements' - ) + + # If units are also provided in the reference, make + # sure they match with the units provided by the + # performance function. + if len(ref) == 4: + if ref[3] != unit: + raise ReframeSyntaxError( + f'units for {tag!r} in the reference ' + f'{key!r} do not match those specified in ' + f'the performance function ({unit})' + ) + + # Pop the unit from the ref tuple (redundant) + ref = ref[:3] except KeyError: ref = (0, None, None) From 98d43d2f022b935a565f74db622b333a18845183 Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Fri, 30 Jul 2021 15:45:46 +0200 Subject: [PATCH 16/34] Rename performance_report to performance_variables --- reframe/core/meta.py | 32 ++++++++++++++++---------------- reframe/core/pipeline.py | 12 ++++++------ 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index d450991889..343eaa45b7 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -277,17 +277,17 @@ def _perf_fn(*args, **kwargs): return _fn - def performance_report(fn): - '''Mark a function to generate the dict with the perf report. + def performance_variables(fn): + '''Mark a function to generate the dict with all the perf vars. It must return an object of type ``typ.Dict[str, _DeferredExpression]``. ''' - setattr(fn, '_rfm_perf_report', True) + setattr(fn, '_rfm_perf_vars', True) return fn namespace['performance_function'] = performance_function - namespace['performance_report'] = performance_report + namespace['performance_variables'] = performance_variables return metacls.MetaNamespace(namespace) def __new__(metacls, name, bases, namespace, **kwargs): @@ -311,7 +311,7 @@ def make_performance_function(self, fn, unit, *args, **kwargs): directives = [ 'parameter', 'variable', 'bind', 'run_before', 'run_after', 'require_deps', 'required', 'deferrable', 'sanity_function', - 'final', 'performance_function', 'performance_report' + 'final', 'performance_function', 'performance_variables' ] for b in directives: namespace.pop(b, None) @@ -373,28 +373,28 @@ def __init__(cls, name, bases, namespace, **kwargs): break - # Gather all the locally defined performance report functions based on - # the _rfm_perf_report attribute. + # Gather all the locally defined performance_variables functions based on + # the _rfm_perf_vars attribute. perf_report_fn = [ - v for v in namespace.values() if hasattr(v, '_rfm_perf_report') + v for v in namespace.values() if hasattr(v, '_rfm_perf_vars') ] if perf_report_fn: - cls._rfm_perf_report = perf_report_fn[0] + cls._rfm_perf_vars = perf_report_fn[0] if len(perf_report_fn) > 1: raise ReframeSyntaxError( - f'{cls.__qualname__!r} defines more than one performance ' - 'report function in the class body.' + f'{cls.__qualname__!r} uses the performance_variables ' + f'decorator more than once in the class body.' ) else: # Search the bases if no local perf report functions exist. - for base in (b for b in bases if hasattr(b, '_rfm_perf_report')): - cls._rfm_perf_report = getattr(base, '_rfm_perf_report') - if cls._rfm_perf_report.__name__ in namespace: + for base in (b for b in bases if hasattr(b, '_rfm_perf_vars')): + cls._rfm_perf_vars = getattr(base, '_rfm_perf_vars') + if cls._rfm_perf_vars.__name__ in namespace: raise ReframeSyntaxError( f'{cls.__qualname__!r} overrides the candidate ' - f'performance report function ' - f'{cls._rfm_perf_report.__qualname__!r} without ' + f'performance variables function ' + f'{cls._rfm_perf_vars.__qualname__!r} without ' f'defining an alternative' ) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index e9fadd4725..69e9b9dce2 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1658,22 +1658,22 @@ def check_performance(self): ''' with osext.change_dir(self._stagedir): - if hasattr(self, '_rfm_perf_report') or self._rfm_perf_fns: + if hasattr(self, '_rfm_perf_vars') or self._rfm_perf_fns: if hasattr(self, 'perf_patterns'): raise ReframeSyntaxError( - f"assigning a value to 'perf_pattenrs' conflicts with ", - f"using the 'performance_report' decorator (class ", - f"{self.__class__.__qualname__})" + f"assigning a value to 'perf_pattenrs' conflicts ", + f"with using the 'performance_variables' decorator ", + f"(class {self.__class__.__qualname__})" ) # Build the performance dict - if not hasattr(self, '_rfm_perf_report'): + if not hasattr(self, '_rfm_perf_vars'): self.perf_patterns = {} for fn in self._rfm_perf_fns: self.perf_patterns[fn._rfm_perf_name] = fn(self) else: - self.perf_patterns = self._rfm_perf_report() + self.perf_patterns = self._rfm_perf_vars() # Log the performance variables self._setup_perf_logging() From 17a28810876bb588c470acb5c7ebd344677c43fc Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Fri, 30 Jul 2021 19:43:19 +0200 Subject: [PATCH 17/34] Cleanup the metaclass --- reframe/core/hooks.py | 33 +++---- reframe/core/meta.py | 161 ++++++++++++++++++----------------- reframe/core/namespaces.py | 2 +- unittests/test_parameters.py | 7 +- unittests/test_variables.py | 3 +- 5 files changed, 105 insertions(+), 101 deletions(-) diff --git a/reframe/core/hooks.py b/reframe/core/hooks.py index e87db4d0f1..99aef67809 100644 --- a/reframe/core/hooks.py +++ b/reframe/core/hooks.py @@ -128,25 +128,6 @@ def __repr__(self): class HookRegistry: '''Global hook registry.''' - @classmethod - def create(cls, namespace): - '''Create a hook registry from a class namespace. - - Hook functions have an `_rfm_attach` attribute that specify the stages - of the pipeline where they must be attached. Dependencies will be - resolved first in the post-setup phase if not assigned elsewhere. - ''' - - local_hooks = util.OrderedSet() - for v in namespace.values(): - if hasattr(v, '_rfm_attach'): - local_hooks.add(Hook(v)) - elif hasattr(v, '_rfm_resolve_deps'): - v._rfm_attach = ['post_setup'] - local_hooks.add(Hook(v)) - - return cls(local_hooks) - def __init__(self, hooks=None): self.__hooks = util.OrderedSet() if hooks is not None: @@ -161,6 +142,20 @@ def __getattr__(self, name): def __iter__(self): return iter(self.__hooks) + def add(self, v): + '''Add value to the hook registry if it meets the conditions. + + Hook functions have an `_rfm_attach` attribute that specify the stages + of the pipeline where they must be attached. Dependencies will be + resolved first in the post-setup phase if not assigned elsewhere. + ''' + + if hasattr(v, '_rfm_attach'): + self.__hooks.add(Hook(v)) + elif hasattr(v, '_rfm_resolve_deps'): + v._rfm_attach = ['post_setup'] + self.__hooks.add(Hook(v)) + def update(self, hooks, *, denied_hooks=None): '''Update the hook registry with the hooks from another hook registry. diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 343eaa45b7..de98445d7d 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -31,27 +31,39 @@ class MetaNamespace(namespaces.LocalNamespace): same class body. Overriding a variable with a parameter or the other way around has an undefined behavior. A variable's value may be updated multiple times within the same class body. A parameter's - value may not be updated more than once within the same class body. + value cannot be updated more than once within the same class body. ''' def __setitem__(self, key, value): if isinstance(value, variables.TestVar): # Insert the attribute in the variable namespace - self['_rfm_local_var_space'][key] = value - value.__set_name__(self, key) + try: + self['_rfm_local_var_space'][key] = value + value.__set_name__(self, key) + except KeyError: + raise ReframeSyntaxError( + f'variable {key} is already declared' + ) - # Override the regular class attribute (if present) + # Override the regular class attribute (if present) and return self._namespace.pop(key, None) + return elif isinstance(value, parameters.TestParam): # Insert the attribute in the parameter namespace - self['_rfm_local_param_space'][key] = value + try: + self['_rfm_local_param_space'][key] = value + except KeyError: + raise ReframeSyntaxError( + f'parameter {key} is already declared in this class' + ) - # Override the regular class attribute (if present) + # Override the regular class attribute (if present) and return self._namespace.pop(key, None) + return elif key in self['_rfm_local_param_space']: - raise ValueError( + raise ReframeSyntaxError( f'cannot override parameter {key!r}' ) else: @@ -59,6 +71,37 @@ def __setitem__(self, key, value): # check from the base namespace. self._namespace[key] = value + # Register functions decorated with either @sanity_function or + # @performance_variables or @performance_function decorators. + if hasattr(value, '_rfm_sanity_fn'): + try: + super().__setitem__('_rfm_sanity', value) + except KeyError: + raise ReframeSyntaxError( + 'the @sanity_function decorator can only be used ' + 'once in the class body' + ) + + elif hasattr(value, '_rfm_perf_vars'): + try: + super().__setitem__('_rfm_perf_vars', value) + except KeyError: + raise ReframeSyntaxError( + 'the @performance_variables decorator can only be ' + 'used once in the class body' + ) + + elif hasattr(value, '_rfm_perf_fn'): + self['_rfm_perf_fns'].add(value) + + # Register the final methods + if hasattr(value, '_rfm_final'): + self['_rfm_final_methods'].add(key) + + # Register the hooks - if a value does not meet the conditions + # it will be simply ignored + self['_rfm_hook_registry'].add(value) + def __getitem__(self, key): '''Expose and control access to the local namespaces. @@ -191,6 +234,7 @@ def final(fn): namespace['bind'] = bind namespace['final'] = final + namespace['_rfm_final_methods'] = set() # Hook-related functionality def run_before(stage): @@ -210,6 +254,7 @@ def run_after(stage): namespace['run_before'] = run_before namespace['run_after'] = run_after namespace['require_deps'] = hooks.require_deps + namespace['_rfm_hook_registry'] = hooks.HookRegistry() # Machinery to add a sanity function def sanity_function(fn): @@ -288,6 +333,7 @@ def performance_variables(fn): namespace['performance_function'] = performance_function namespace['performance_variables'] = performance_variables + namespace['_rfm_perf_fns'] = set() return metacls.MetaNamespace(namespace) def __new__(metacls, name, bases, namespace, **kwargs): @@ -338,91 +384,52 @@ def __init__(cls, name, bases, namespace, **kwargs): # Update used names set with the local __dict__ cls._rfm_dir.update(cls.__dict__) - # Set up the hooks for the pipeline stages based on the _rfm_attach - # attribute; all dependencies will be resolved first in the post-setup - # phase if not assigned elsewhere - hook_reg = hooks.HookRegistry.create(namespace) - for base in (b for b in bases if hasattr(b, '_rfm_hook_registry')): - hook_reg.update(getattr(base, '_rfm_hook_registry'), - denied_hooks=namespace) - - cls._rfm_hook_registry = hook_reg - - # Gather all the locally defined sanity functions based on the - # _rfm_sanity_fn attribute. - sn_fn = [v for v in namespace.values() if hasattr(v, '_rfm_sanity_fn')] - if sn_fn: - cls._rfm_sanity = sn_fn[0] - if len(sn_fn) > 1: - raise ReframeSyntaxError( - f'{cls.__qualname__!r} defines more than one sanity ' - 'function in the class body.' - ) - - else: - # Search the bases if no local sanity functions exist. - for base in (b for b in bases if hasattr(b, '_rfm_sanity')): - cls._rfm_sanity = getattr(base, '_rfm_sanity') - if cls._rfm_sanity.__name__ in namespace: - raise ReframeSyntaxError( - f'{cls.__qualname__!r} overrides the candidate ' - f'sanity function ' - f'{cls._rfm_sanity.__qualname__!r} without ' - f'defining an alternative' - ) - - break + # Update the hook registry with the bases + for base in cls._rfm_bases: + cls._rfm_hook_registry.update( + base._rfm_hook_registry, denied_hooks=namespace + ) - # Gather all the locally defined performance_variables functions based on - # the _rfm_perf_vars attribute. - perf_report_fn = [ - v for v in namespace.values() if hasattr(v, '_rfm_perf_vars') - ] - if perf_report_fn: - cls._rfm_perf_vars = perf_report_fn[0] - if len(perf_report_fn) > 1: - raise ReframeSyntaxError( - f'{cls.__qualname__!r} uses the performance_variables ' - f'decorator more than once in the class body.' - ) + # Search the bases if no local sanity functions exist. + if '_rfm_sanity' not in namespace: + for base in cls._rfm_bases: + if hasattr(base, '_rfm_sanity'): + cls._rfm_sanity = getattr(base, '_rfm_sanity') + if cls._rfm_sanity.__name__ in namespace: + raise ReframeSyntaxError( + f'{cls.__qualname__!r} overrides the candidate ' + f'sanity function ' + f'{cls._rfm_sanity.__qualname__!r} without ' + f'defining an alternative' + ) - else: - # Search the bases if no local perf report functions exist. - for base in (b for b in bases if hasattr(b, '_rfm_perf_vars')): - cls._rfm_perf_vars = getattr(base, '_rfm_perf_vars') - if cls._rfm_perf_vars.__name__ in namespace: - raise ReframeSyntaxError( - f'{cls.__qualname__!r} overrides the candidate ' - f'performance variables function ' - f'{cls._rfm_perf_vars.__qualname__!r} without ' - f'defining an alternative' - ) + break - break + # Search the bases if no local fn used the deco @performance_variables. + if '_rfm_perf_vars' not in namespace: + for base in cls._rfm_bases: + if hasattr(base, '_rfm_perf_vars'): + cls._rfm_perf_vars = getattr(base, '_rfm_perf_vars') + break - # Build a set with all the performance functions - cls._rfm_perf_fns = { - v for k, v in namespace.items() if hasattr(v, '_rfm_perf_fn') - } - for base in (b for b in bases if hasattr(b, '_rfm_perf_fns')): + # Update the performance function set with the bases. + for base in cls._rfm_bases: cls._rfm_perf_fns = cls._rfm_perf_fns.union( {f for f in getattr(base, '_rfm_perf_fns') if f.__name__ not in namespace} ) - cls._final_methods = {v.__name__ for v in namespace.values() - if hasattr(v, '_rfm_final')} - # Add the final functions from its parents - bases_w_final = [b for b in bases if hasattr(b, '_final_methods')] - cls._final_methods.update(*(b._final_methods for b in bases_w_final)) + cls._rfm_final_methods.update( + *(b._rfm_final_methods for b in cls._rfm_bases) + ) if getattr(cls, '_rfm_override_final', None): return for v in namespace.values(): - for b in bases_w_final: - if callable(v) and v.__name__ in b._final_methods: + for b in cls._rfm_bases: + if callable(v) and v.__name__ in b._rfm_final_methods: msg = (f"'{cls.__qualname__}.{v.__name__}' attempts to " f"override final method " f"'{b.__qualname__}.{v.__name__}'; " diff --git a/reframe/core/namespaces.py b/reframe/core/namespaces.py index eee629a7a0..eb6448a06a 100644 --- a/reframe/core/namespaces.py +++ b/reframe/core/namespaces.py @@ -59,7 +59,7 @@ def __repr__(self): def _raise_namespace_clash(self, name): '''Raise an error if there is a namespace clash.''' - raise ValueError( + raise KeyError( f'{name!r} is already present in the current namespace' ) diff --git a/unittests/test_parameters.py b/unittests/test_parameters.py index fd48df06b4..b447acc3ab 100644 --- a/unittests/test_parameters.py +++ b/unittests/test_parameters.py @@ -9,6 +9,7 @@ import reframe as rfm +from reframe.core.exceptions import ReframeSyntaxError class NoParams(rfm.RunOnlyRegressionTest): @@ -208,7 +209,7 @@ class Ham(Spam): def test_double_declare(): - with pytest.raises(ValueError): + with pytest.raises(ReframeSyntaxError): class MyTest(rfm.RegressionTest): P0 = parameter([1, 2, 3]) P0 = parameter() @@ -261,7 +262,7 @@ class Foo(rfm.RegressionMixin): def test_parameter_override(): - with pytest.raises(ValueError): + with pytest.raises(ReframeSyntaxError): # Trigger the check from MetaNamespace class MyTest(rfm.RegressionTest): p = parameter([1, 2]) @@ -285,7 +286,7 @@ class Foo(rfm.RegressionTest): def test_override_parameter(): - with pytest.raises(ValueError): + with pytest.raises(ReframeSyntaxError): class Foo(rfm.RegressionTest): p = parameter([1, 2]) p = 1 diff --git a/unittests/test_variables.py b/unittests/test_variables.py index a1c692645a..0372351641 100644 --- a/unittests/test_variables.py +++ b/unittests/test_variables.py @@ -8,6 +8,7 @@ import math import reframe as rfm +from reframe.core.exceptions import ReframeSyntaxError @pytest.fixture @@ -92,7 +93,7 @@ class Eggs(Spam, Ham): def test_double_declare(): - with pytest.raises(ValueError): + with pytest.raises(ReframeSyntaxError): class MyTest(rfm.RegressionTest): v0 = variable(int, value=1) v0 = variable(float, value=0.5) From 01a1b780283d122bf4aad61736aac86561f6e993 Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Fri, 30 Jul 2021 19:58:37 +0200 Subject: [PATCH 18/34] Update check syntax --- cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py | 2 +- cscs-checks/system/jobreport/gpu_report.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py b/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py index a743cc44da..7d5bac8277 100644 --- a/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py +++ b/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py @@ -61,7 +61,7 @@ def set_gpu_arch(self): def set_num_gpus_per_node(self): hooks.set_num_gpus_per_node(self) - @performance_report + @performance_variables def report_slow_nodes(self): '''Report the base perf metrics and also all the slow nodes.''' diff --git a/cscs-checks/system/jobreport/gpu_report.py b/cscs-checks/system/jobreport/gpu_report.py index 77a6396487..52f7fc746b 100644 --- a/cscs-checks/system/jobreport/gpu_report.py +++ b/cscs-checks/system/jobreport/gpu_report.py @@ -97,7 +97,7 @@ def gpu_usage_sanity(self): def total_nodes_reported(self): return sn.count(self.nodes_reported) - @performance_report + @performance_variables def set_perf_patterns(self): '''The number of reported nodes can be used as a perf metric. From 6f8694d0e0b8fa8de9d66e1066a2e6745318ff71 Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Mon, 2 Aug 2021 14:48:36 +0200 Subject: [PATCH 19/34] Transpose nested loop in meta --- reframe/core/meta.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index de98445d7d..9896c2a1d9 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -427,12 +427,12 @@ def __init__(cls, name, bases, namespace, **kwargs): if getattr(cls, '_rfm_override_final', None): return - for v in namespace.values(): - for b in cls._rfm_bases: - if callable(v) and v.__name__ in b._rfm_final_methods: - msg = (f"'{cls.__qualname__}.{v.__name__}' attempts to " + for b in cls._rfm_bases: + for key in b._rfm_final_methods: + if key in namespace and callable(namespace[key]): + msg = (f"'{cls.__qualname__}.{key}' attempts to " f"override final method " - f"'{b.__qualname__}.{v.__name__}'; " + f"'{b.__qualname__}.{key}'; " f"you should use the pipeline hooks instead") raise ReframeSyntaxError(msg) From 84deeb0fc1edf51d4b4bb5ba8bb02129ee91fdb0 Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Tue, 3 Aug 2021 15:16:01 +0200 Subject: [PATCH 20/34] Prototype test syntax --- cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py | 4 ++-- cscs-checks/system/jobreport/gpu_report.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py b/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py index 7d5bac8277..9285585e12 100644 --- a/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py +++ b/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py @@ -61,7 +61,7 @@ def set_gpu_arch(self): def set_num_gpus_per_node(self): hooks.set_num_gpus_per_node(self) - @performance_variables + @run_before('performance') def report_slow_nodes(self): '''Report the base perf metrics and also all the slow nodes.''' @@ -88,4 +88,4 @@ def report_slow_nodes(self): except SanityError: perf_report[nid] = self.perf(nid) - return perf_report + self.perf_variables = perf_report diff --git a/cscs-checks/system/jobreport/gpu_report.py b/cscs-checks/system/jobreport/gpu_report.py index 52f7fc746b..0205ec7bf3 100644 --- a/cscs-checks/system/jobreport/gpu_report.py +++ b/cscs-checks/system/jobreport/gpu_report.py @@ -97,8 +97,8 @@ def gpu_usage_sanity(self): def total_nodes_reported(self): return sn.count(self.nodes_reported) - @performance_variables - def set_perf_patterns(self): + @run_before('performance') + def set_perf_variables(self): '''The number of reported nodes can be used as a perf metric. For now, the low limit can go to zero, but this can be set to a more @@ -110,6 +110,6 @@ def set_perf_patterns(self): 'nodes_reported': (self.num_tasks, self.perf_floor, 0) }, } - return { + self.perf_variables = { 'nodes_reported': self.total_nodes_reported() } From e560cbd25cdb41f647aa28a09c762fa82b153b41 Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Tue, 3 Aug 2021 19:01:12 +0200 Subject: [PATCH 21/34] Make perf_variables a variable --- reframe/core/deferrable.py | 6 +-- reframe/core/meta.py | 91 ++------------------------------------ reframe/core/pipeline.py | 71 ++++++++++++++++++++--------- 3 files changed, 57 insertions(+), 111 deletions(-) diff --git a/reframe/core/deferrable.py b/reframe/core/deferrable.py index 1014d6bb7b..a9b3757689 100644 --- a/reframe/core/deferrable.py +++ b/reframe/core/deferrable.py @@ -56,17 +56,17 @@ def evaluate(self): fn_args = [] for arg in self._args: fn_args.append( - arg.evaluate() if isinstance(arg, type(self)) else arg + arg.evaluate() if isinstance(arg, _DeferredExpression) else arg ) fn_kwargs = {} for k, v in self._kwargs.items(): fn_kwargs[k] = ( - v.evaluate() if isinstance(v, type(self)) else v + v.evaluate() if isinstance(v, _DeferredExpression) else v ) ret = self._fn(*fn_args, **fn_kwargs) - if isinstance(ret, type(self)): + while isinstance(ret, _DeferredExpression): ret = ret.evaluate() self._cached = (ret,) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 9896c2a1d9..bdfa4ddf98 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -14,6 +14,7 @@ import reframe.core.parameters as parameters import reframe.core.variables as variables import reframe.core.hooks as hooks +import reframe.core.performance as perf import reframe.utility as utils from reframe.core.exceptions import ReframeSyntaxError @@ -82,16 +83,7 @@ def __setitem__(self, key, value): 'once in the class body' ) - elif hasattr(value, '_rfm_perf_vars'): - try: - super().__setitem__('_rfm_perf_vars', value) - except KeyError: - raise ReframeSyntaxError( - 'the @performance_variables decorator can only be ' - 'used once in the class body' - ) - - elif hasattr(value, '_rfm_perf_fn'): + elif hasattr(value, '_rfm_perf_key'): self['_rfm_perf_fns'].add(value) # Register the final methods @@ -271,68 +263,8 @@ def sanity_function(fn): namespace['sanity_function'] = sanity_function namespace['deferrable'] = deferrable - # Machinery to add performance variables - def performance_function(units, *, perf_key=None): - '''Decorate a function to extract a performance variable. - - The ``units`` argument indicates the units of the performance - variable to be extracted. - The ``perf_key`` optional arg will be used as the name of the - performance variable. If not provided, the function name will - be used as the performance variable name. - ''' - - class perf_units: - __slots__ = ('_rfm_perf_units') - - def __init__(self, units): - if not isinstance(units, str): - raise TypeError( - 'performance units must be provided in string ' - 'format' - ) - - self._rfm_perf_units = units - - units = perf_units(units) - - def _fn(fn): - # Performance functions must only have a single positional arg - # without a default value - if not utils.is_trivially_callable(fn, non_def_args=1): - raise ReframeSyntaxError( - f'performance function {fn.__name__} has more than ' - f'one argument without a default value' - ) - - @deferrable - @functools.wraps(fn) - def _perf_fn(*args, **kwargs): - ret = fn(*args, **kwargs) - if isinstance(ret, _DeferredExpression): - return ret.evaluate(), units - else: - return ret, units - - setattr(_perf_fn, '_rfm_perf_fn', units) - _key = perf_key if perf_key else _perf_fn.__name__ - setattr(_perf_fn, '_rfm_perf_name', _key) - - return _perf_fn - - return _fn - - def performance_variables(fn): - '''Mark a function to generate the dict with all the perf vars. - - It must return an object of type - ``typ.Dict[str, _DeferredExpression]``. - ''' - setattr(fn, '_rfm_perf_vars', True) - return fn - - namespace['performance_function'] = performance_function - namespace['performance_variables'] = performance_variables + # Machinery to add performance functions + namespace['performance_function'] = perf.perf_deco namespace['_rfm_perf_fns'] = set() return metacls.MetaNamespace(namespace) @@ -346,14 +278,6 @@ class was created or even at the instance level (e.g. doing constructed. ''' - perf_fn_deco = namespace['performance_function'] - - def make_performance_function(self, fn, unit, *args, **kwargs): - '''Wrapper to make a performance function inline.''' - return perf_fn_deco(unit)(fn)(self, *args, **kwargs) - - namespace['make_performance_function'] = make_performance_function - directives = [ 'parameter', 'variable', 'bind', 'run_before', 'run_after', 'require_deps', 'required', 'deferrable', 'sanity_function', @@ -405,13 +329,6 @@ def __init__(cls, name, bases, namespace, **kwargs): break - # Search the bases if no local fn used the deco @performance_variables. - if '_rfm_perf_vars' not in namespace: - for base in cls._rfm_bases: - if hasattr(base, '_rfm_perf_vars'): - cls._rfm_perf_vars = getattr(base, '_rfm_perf_vars') - break - # Update the performance function set with the bases. for base in cls._rfm_bases: cls._rfm_perf_fns = cls._rfm_perf_fns.union( diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 69e9b9dce2..b255179ae1 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -36,6 +36,7 @@ from reframe.core.buildsystems import BuildSystemField from reframe.core.containers import ContainerPlatformField from reframe.core.deferrable import _DeferredExpression +from reframe.core.performance import (DeferredPerformanceFunction, perf_deco) from reframe.core.exceptions import (BuildError, DependencyError, PerformanceError, PipelineError, SanityError, SkipTestError, @@ -123,6 +124,24 @@ class RegressionMixin(metaclass=RegressionTestMeta): .. versionadded:: 3.4.2 ''' + @final + def make_performance_function(self, func, units, *args, **kwargs): + '''Wrapper to make a performance function inline. + + If ``func`` is an instance of the :class:`_DeferredExpression` class, + the performance function will be built by extending this deferred + expression into a deferred performance function. Otherwise, a new + deferred performance function will be created from the function + :func:`func`. + ''' + if isinstance(func, _DeferredExpression): + return DeferredPerformanceFunction.build_from_deferred_expr( + func, units + ) + else: + return DeferredPerformanceFunction(func, units, self, + *args, **kwargs) + def __getattr__(self, name): ''' Intercept the AttributeError if the name is a required variable.''' if (name in self._rfm_var_space and @@ -638,6 +657,24 @@ def pipeline_hooks(cls): #: :default: :class:`None` perf_patterns = variable(typ.Dict[str, _DeferredExpression], type(None)) + #: Mapping with the performance variables of the test. + #: + #: Refer to the :doc:`ReFrame Tutorials ` for concrete usage + #: examples. + #: + #: If no performance variables are explicitly provided, ReFrame will + #: populate this field with all the member functions decorated with the + #: :func:`performance_function` decorator. If no performance functions + #: are present in the class, no performance checking or reporting will + #: be carried out. + #: + #: :type: A dictionary with keys of type :class:`str` and values of type + #: :class:`DeferredPerformanceFunction`. A + #: :class:`DeferredPerformanceFunction` object is obtained when a + #: function decorated with the :func:`performance_function` is called. + #: :default: :class:`required` + perf_variables = variable(typ.Dict[str, DeferredPerformanceFunction]) + #: List of modules to be loaded before running this test. #: #: These modules will be loaded during the :func:`setup` phase. @@ -1658,7 +1695,7 @@ def check_performance(self): ''' with osext.change_dir(self._stagedir): - if hasattr(self, '_rfm_perf_vars') or self._rfm_perf_fns: + if hasattr(self, 'perf_variables') or self._rfm_perf_fns: if hasattr(self, 'perf_patterns'): raise ReframeSyntaxError( f"assigning a value to 'perf_pattenrs' conflicts ", @@ -1667,28 +1704,20 @@ def check_performance(self): ) # Build the performance dict - if not hasattr(self, '_rfm_perf_vars'): - self.perf_patterns = {} + if not hasattr(self, 'perf_variables'): + self.perf_variables = {} for fn in self._rfm_perf_fns: - self.perf_patterns[fn._rfm_perf_name] = fn(self) - - else: - self.perf_patterns = self._rfm_perf_vars() + self.perf_variables[fn._rfm_perf_key] = fn(self) # Log the performance variables self._setup_perf_logging() - for tag, expr in self.perf_patterns.items(): + for tag, expr in self.perf_variables.items(): try: - value, unit = sn.evaluate(expr) - unit = getattr(unit, '_rfm_perf_units', None) - if not unit: - raise TypeError('Trigger the TypeError below') - - except TypeError: - raise ReframeSyntaxError( - f'function assigned for performance variable ' - f'{tag!r} is not a performance function' - ) + value = expr.evaluate() + unit = expr.units + except Exception as e: + # Probably makes sense to raise a warning here instead + raise e key = '%s:%s' % (self._current_partition.fullname, tag) try: @@ -1709,11 +1738,11 @@ def check_performance(self): ref = ref[:3] except KeyError: - ref = (0, None, None) + ref = (0, None, None, unit) - self._perfvalues[key] = (value, *ref, unit) + self._perfvalues[key] = (value, *ref) self._perf_logger.log_performance(logging.INFO, tag, value, - *ref, unit) + *ref) elif not hasattr(self, 'perf_patterns'): return From e0db6c5098c4d27c51f2707dd6742bc478b044c4 Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Wed, 4 Aug 2021 11:26:59 +0200 Subject: [PATCH 22/34] Give perf_variables a default value --- reframe/core/deferrable.py | 27 ++++++++++++++++ reframe/core/meta.py | 47 +++++++++++++++++++++++++--- reframe/core/pipeline.py | 63 +++++++++++++++++++------------------- 3 files changed, 102 insertions(+), 35 deletions(-) diff --git a/reframe/core/deferrable.py b/reframe/core/deferrable.py index a9b3757689..5037e24b16 100644 --- a/reframe/core/deferrable.py +++ b/reframe/core/deferrable.py @@ -6,6 +6,8 @@ import builtins import functools +import reframe.utility as utils + def deferrable(func): '''Function decorator for converting a function to a deferred @@ -355,3 +357,28 @@ def __abs__(a): @deferrable def __invert__(a): return ~a + + +class _DeferredPerformanceExpression(_DeferredExpression): + '''Represents a performance function whose evaluation has been deferred. + + It extends the :class:`_DeferredExpression` class by adding the ``unit`` + attribute. This attribute represents the unit of the performance + metric to be extracted by the performance function. + ''' + + def __init__(self, fn, unit, *args, **kwargs): + super().__init__(fn, *args, **kwargs) + self._unit = unit + + @classmethod + def construct_from_deferred_expr(cls, expr, unit): + if not isinstance(expr, _DeferredExpression): + raise TypeError("'expr' argument is not an instance of the " + "_DeferredExpression class") + + return cls(expr._fn, unit, *(expr._args), **(expr._kwargs)) + + @property + def unit(self): + return self._unit diff --git a/reframe/core/meta.py b/reframe/core/meta.py index bdfa4ddf98..4be2ad68fe 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -14,11 +14,11 @@ import reframe.core.parameters as parameters import reframe.core.variables as variables import reframe.core.hooks as hooks -import reframe.core.performance as perf import reframe.utility as utils from reframe.core.exceptions import ReframeSyntaxError -from reframe.core.deferrable import deferrable, _DeferredExpression +from reframe.core.deferrable import (deferrable, _DeferredExpression, + _DeferredPerformanceExpression) class RegressionTestMeta(type): @@ -264,7 +264,46 @@ def sanity_function(fn): namespace['deferrable'] = deferrable # Machinery to add performance functions - namespace['performance_function'] = perf.perf_deco + def performance_function(units, *, perf_key=None): + '''Decorate a function to extract a performance variable. + + The ``units`` argument indicates the units of the performance + variable to be extracted. + The ``perf_key`` optional arg will be used as the name of the + performance variable. If not provided, the function name will + be used as the performance variable name. + ''' + + if not isinstance(units, str): + raise TypeError( + 'performance units must be provided in string format' + ) + + if perf_key and not isinstance(perf_key, str): + raise TypeError("'perf_key' must be a string") + + def _deco_wrapper(func): + if not utils.is_trivially_callable(func, non_def_args=1): + raise ReframeSyntaxError( + f'performance function {fn.__name__!r} has more than ' + f'one argument without a default value' + ) + + @functools.wraps(func) + def _perf_fn(*args, **kwargs): + return _DeferredPerformanceExpression( + func, units, *args, **kwargs + ) + + # Set the perf_key + _perf_key = perf_key if perf_key else func.__name__ + setattr(_perf_fn, '_rfm_perf_key', _perf_key) + + return _perf_fn + + return _deco_wrapper + + namespace['performance_function'] = performance_function namespace['_rfm_perf_fns'] = set() return metacls.MetaNamespace(namespace) @@ -281,7 +320,7 @@ class was created or even at the instance level (e.g. doing directives = [ 'parameter', 'variable', 'bind', 'run_before', 'run_after', 'require_deps', 'required', 'deferrable', 'sanity_function', - 'final', 'performance_function', 'performance_variables' + 'final', 'performance_function' ] for b in directives: namespace.pop(b, None) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index b255179ae1..e5452bc2af 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -35,8 +35,8 @@ from reframe.core.backends import getlauncher, getscheduler from reframe.core.buildsystems import BuildSystemField from reframe.core.containers import ContainerPlatformField -from reframe.core.deferrable import _DeferredExpression -from reframe.core.performance import (DeferredPerformanceFunction, perf_deco) +from reframe.core.deferrable import (_DeferredExpression, + _DeferredPerformanceExpression) from reframe.core.exceptions import (BuildError, DependencyError, PerformanceError, PipelineError, SanityError, SkipTestError, @@ -125,7 +125,7 @@ class RegressionMixin(metaclass=RegressionTestMeta): ''' @final - def make_performance_function(self, func, units, *args, **kwargs): + def make_performance_function(self, func, unit, *args, **kwargs): '''Wrapper to make a performance function inline. If ``func`` is an instance of the :class:`_DeferredExpression` class, @@ -135,12 +135,12 @@ def make_performance_function(self, func, units, *args, **kwargs): :func:`func`. ''' if isinstance(func, _DeferredExpression): - return DeferredPerformanceFunction.build_from_deferred_expr( - func, units + return _DeferredPerformanceExpression.construct_from_deferred_expr( + func, unit ) else: - return DeferredPerformanceFunction(func, units, self, - *args, **kwargs) + return _DeferredPerformanceExpression(func, unit, self, + *args, **kwargs) def __getattr__(self, name): ''' Intercept the AttributeError if the name is a required variable.''' @@ -663,17 +663,18 @@ def pipeline_hooks(cls): #: examples. #: #: If no performance variables are explicitly provided, ReFrame will - #: populate this field with all the member functions decorated with the - #: :func:`performance_function` decorator. If no performance functions - #: are present in the class, no performance checking or reporting will - #: be carried out. + #: populate this field during the test's instantiation with all the member + #: functions decorated with the :func:`performance_function` decorator. If + #: no performance functions are present in the class, no performance + #: checking or reporting will be carried out. #: #: :type: A dictionary with keys of type :class:`str` and values of type - #: :class:`DeferredPerformanceFunction`. A - #: :class:`DeferredPerformanceFunction` object is obtained when a + #: :class:`_DeferredPerformanceExpression`. A + #: :class:`_DeferredPerformanceExpression` object is obtained when a #: function decorated with the :func:`performance_function` is called. - #: :default: :class:`required` - perf_variables = variable(typ.Dict[str, DeferredPerformanceFunction]) + #: :default: ``{}`` + perf_variables = variable(typ.Dict[str, _DeferredPerformanceExpression], + value={}) #: List of modules to be loaded before running this test. #: @@ -681,8 +682,8 @@ def pipeline_hooks(cls): #: #: :type: :class:`List[str]` #: :default: ``[]`` - modules = variable( - typ.List[str], typ.List[typ.Dict[str, object]], value=[]) + modules = variable(typ.List[str], typ.List[typ.Dict[str, object]], + value=[]) #: Environment variables to be set before running this test. #: @@ -847,7 +848,7 @@ def __new__(cls, *args, **kwargs): # Build pipeline hook registry and add the pre-init hook cls._rfm_pipeline_hooks = cls._process_hook_registry() - cls._rfm_pipeline_hooks['pre___init__'] = [cls.__pre_init__] + cls._rfm_pipeline_hooks['pre___init__'] = [obj.__pre_init__] # Attach the hooks to the pipeline stages for stage in _PIPELINE_STAGES: @@ -860,6 +861,11 @@ def __pre_init__(self): '''Initialize the test defaults from a pre-init hook.''' self.__deferred_rfm_init.evaluate() + # Build the default performance dict + if not self.perf_variables: + for fn in self._rfm_perf_fns: + self.perf_variables[fn._rfm_perf_key] = fn(self) + def __init__(self): pass @@ -1695,26 +1701,21 @@ def check_performance(self): ''' with osext.change_dir(self._stagedir): - if hasattr(self, 'perf_variables') or self._rfm_perf_fns: + if self.perf_variables or self._rfm_perf_fns: if hasattr(self, 'perf_patterns'): raise ReframeSyntaxError( f"assigning a value to 'perf_pattenrs' conflicts ", - f"with using the 'performance_variables' decorator ", + f"with using the 'performance_function' decorator ", + f"or setting a value to 'perf_variables' " f"(class {self.__class__.__qualname__})" ) - # Build the performance dict - if not hasattr(self, 'perf_variables'): - self.perf_variables = {} - for fn in self._rfm_perf_fns: - self.perf_variables[fn._rfm_perf_key] = fn(self) - # Log the performance variables self._setup_perf_logging() for tag, expr in self.perf_variables.items(): try: value = expr.evaluate() - unit = expr.units + unit = expr.unit except Exception as e: # Probably makes sense to raise a warning here instead raise e @@ -1729,7 +1730,7 @@ def check_performance(self): if len(ref) == 4: if ref[3] != unit: raise ReframeSyntaxError( - f'units for {tag!r} in the reference ' + f'unit for {tag!r} in the reference ' f'{key!r} do not match those specified in ' f'the performance function ({unit})' ) @@ -1738,11 +1739,11 @@ def check_performance(self): ref = ref[:3] except KeyError: - ref = (0, None, None, unit) + ref = (0, None, None) - self._perfvalues[key] = (value, *ref) + self._perfvalues[key] = (value, *ref, unit) self._perf_logger.log_performance(logging.INFO, tag, value, - *ref) + *ref, unit) elif not hasattr(self, 'perf_patterns'): return From 164c59e792489c0eafcb265ccce7ac86dc194757 Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Wed, 4 Aug 2021 12:01:12 +0200 Subject: [PATCH 23/34] Raise warning if any perf var fails --- reframe/core/pipeline.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index e5452bc2af..d751606da1 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1717,8 +1717,10 @@ def check_performance(self): value = expr.evaluate() unit = expr.unit except Exception as e: - # Probably makes sense to raise a warning here instead - raise e + logging.getlogger().warning( + f'skipping evaluation of performance variable ' + f'{tag!r} in test {self.name!r}: {e}' + ) key = '%s:%s' % (self._current_partition.fullname, tag) try: @@ -1729,10 +1731,11 @@ def check_performance(self): # performance function. if len(ref) == 4: if ref[3] != unit: - raise ReframeSyntaxError( - f'unit for {tag!r} in the reference ' - f'{key!r} do not match those specified in ' - f'the performance function ({unit})' + logging.getlogger().warning( + f'unit for the performance variable ' + f'{tag!r} in the reference {key!r} ' + f'does not match the unit specified ' + f'in the performance function ({unit!r})' ) # Pop the unit from the ref tuple (redundant) From 0d5f4cb2a334771833281d9d1d5a24e1d578666a Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Wed, 4 Aug 2021 14:16:06 +0200 Subject: [PATCH 24/34] Address PR comments --- .../gpu/gpu_burn/gpu_burn_test.py | 51 +++++++++---------- .../microbenchmarks/gpu/gpu_burn/__init__.py | 16 +++--- reframe/core/deferrable.py | 6 +++ reframe/core/meta.py | 24 +++------ reframe/core/pipeline.py | 24 ++++----- reframe/utility/__init__.py | 7 ++- 6 files changed, 58 insertions(+), 70 deletions(-) diff --git a/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py b/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py index 9285585e12..74dd5380d1 100644 --- a/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py +++ b/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py @@ -6,6 +6,7 @@ import reframe as rfm import reframe.utility.sanity as sn +import reframe.utility.osext as osext from reframe.core.exceptions import SanityError from hpctestlib.microbenchmarks.gpu.gpu_burn import GpuBurn @@ -24,28 +25,28 @@ class gpu_burn_check(GpuBurn): num_tasks = 0 reference = { 'dom:gpu': { - 'min_perf': (4115, -0.10, None), + 'min_perf': (4115, -0.10, None, 'Gflop/s'), }, 'daint:gpu': { - 'min_perf': (4115, -0.10, None), + 'min_perf': (4115, -0.10, None, 'Gflop/s'), }, 'arolla:cn': { - 'min_perf': (5861, -0.10, None), + 'min_perf': (5861, -0.10, None, 'Gflop/s'), }, 'tsa:cn': { - 'min_perf': (5861, -0.10, None), + 'min_perf': (5861, -0.10, None, 'Gflop/s'), }, 'ault:amda100': { - 'min_perf': (15000, -0.10, None), + 'min_perf': (15000, -0.10, None, 'Gflop/s'), }, 'ault:amdv100': { - 'min_perf': (5500, -0.10, None), + 'min_perf': (5500, -0.10, None, 'Gflop/s'), }, 'ault:intelv100': { - 'min_perf': (5500, -0.10, None), + 'min_perf': (5500, -0.10, None, 'Gflop/s'), }, 'ault:amdvega': { - 'min_perf': (3450, -0.10, None), + 'min_perf': (3450, -0.10, None, 'Gflop/s'), }, } @@ -65,27 +66,21 @@ def set_num_gpus_per_node(self): def report_slow_nodes(self): '''Report the base perf metrics and also all the slow nodes.''' - # Dict with the base perf metrics to report - perf_report = { - 'min_perf': self.perf(), - 'max_temp': self.temp(), - } - # Only report the nodes that don't meet the perf reference - key = f'{self.current_partition.fullname}:min_perf' - if key in self.reference: - regex = r'\[(\S+)\] GPU\s+\d\(OK\): (\d+) GF/s' - nids = set(sn.extractall(regex, self.stdout, 1)) + with osext.change_dir(self.stagedir): + key = f'{self.current_partition.fullname}:min_perf' + if key in self.reference: + regex = r'\[(\S+)\] GPU\s+\d\(OK\): (\d+) GF/s' + nids = set(sn.extractall(regex, self.stdout, 1)) - # Get the references - ref, lt, ut = self.reference[key] + # Get the references + ref, lt, ut, *_ = self.reference[key] - # Flag the slow nodes - for nid in nids: - try: - val = self.perf(nid).evaluate()[0] - sn.assert_reference(val, ref, lt, ut).evaluate() - except SanityError: - perf_report[nid] = self.perf(nid) + # Flag the slow nodes + for nid in nids: + try: + val = self.min_perf(nid) + sn.assert_reference(val, ref, lt, ut).evaluate() + except SanityError: + self.perf_variables[nid] = val - self.perf_variables = perf_report diff --git a/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py b/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py index 8dd7491207..1b02384c23 100644 --- a/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py +++ b/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py @@ -96,12 +96,12 @@ def count_successful_burns(self): r'^\s*\[[^\]]*\]\s*GPU\s*\d+\(OK\)', self.stdout) ), self.num_tasks_assigned) - def extract_perf(self, what, nid=None): + def _extract_perf_metric(self, metric, nid=None): '''Utility to extract performance metrics.''' - if what not in {'perf', 'temp'}: + if metric not in {'perf', 'temp'}: raise ValueError( - f"unsupported value in 'what' argument ({what}!r)" + f"unsupported value in 'metric' argument ({metric}!r)" ) if nid is None: @@ -109,14 +109,14 @@ def extract_perf(self, what, nid=None): patt = (rf'^\s*\[{nid}\]\s*GPU\s+\d+\(\S*\):\s+(?P\S*)\s+GF\/s' rf'\s+(?P\S*)\s+Celsius') - return sn.extractall(patt, self.stdout, what, float).evaluate() + return sn.extractall(patt, self.stdout, metric, float) @performance_function('Gflop/s') - def perf(self, nid=None): + def min_perf(self, nid=None): '''Lowest performance recorded.''' - return sn.min(self.extract_perf('perf', nid)) + return sn.min(self._extract_perf_metric('perf', nid)) @performance_function('degC') - def temp(self, nid=None): + def max_temp(self, nid=None): '''Maximum temperature recorded.''' - return sn.max(self.extract_perf('temp', nid)) + return sn.max(self._extract_perf_metric('temp', nid)) diff --git a/reframe/core/deferrable.py b/reframe/core/deferrable.py index 5037e24b16..47ecab2064 100644 --- a/reframe/core/deferrable.py +++ b/reframe/core/deferrable.py @@ -369,6 +369,12 @@ class _DeferredPerformanceExpression(_DeferredExpression): def __init__(self, fn, unit, *args, **kwargs): super().__init__(fn, *args, **kwargs) + + if not isinstance(unit, str): + raise TypeError( + 'performance units must be a string' + ) + self._unit = unit @classmethod diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 4be2ad68fe..a331b4ecde 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -43,8 +43,8 @@ def __setitem__(self, key, value): value.__set_name__(self, key) except KeyError: raise ReframeSyntaxError( - f'variable {key} is already declared' - ) + f'variable {key!r} is already declared' + ) from None # Override the regular class attribute (if present) and return self._namespace.pop(key, None) @@ -56,8 +56,8 @@ def __setitem__(self, key, value): self['_rfm_local_param_space'][key] = value except KeyError: raise ReframeSyntaxError( - f'parameter {key} is already declared in this class' - ) + f'parameter {key!r} is already declared in this class' + ) from None # Override the regular class attribute (if present) and return self._namespace.pop(key, None) @@ -81,8 +81,7 @@ def __setitem__(self, key, value): raise ReframeSyntaxError( 'the @sanity_function decorator can only be used ' 'once in the class body' - ) - + ) from None elif hasattr(value, '_rfm_perf_key'): self['_rfm_perf_fns'].add(value) @@ -274,11 +273,6 @@ def performance_function(units, *, perf_key=None): be used as the performance variable name. ''' - if not isinstance(units, str): - raise TypeError( - 'performance units must be provided in string format' - ) - if perf_key and not isinstance(perf_key, str): raise TypeError("'perf_key' must be a string") @@ -295,10 +289,8 @@ def _perf_fn(*args, **kwargs): func, units, *args, **kwargs ) - # Set the perf_key _perf_key = perf_key if perf_key else func.__name__ setattr(_perf_fn, '_rfm_perf_key', _perf_key) - return _perf_fn return _deco_wrapper @@ -370,10 +362,8 @@ def __init__(cls, name, bases, namespace, **kwargs): # Update the performance function set with the bases. for base in cls._rfm_bases: - cls._rfm_perf_fns = cls._rfm_perf_fns.union( - {f for f in getattr(base, '_rfm_perf_fns') - if f.__name__ not in namespace} - ) + cls._rfm_perf_fns |= {f for f in getattr(base, '_rfm_perf_fns') + if f.__name__ not in namespace} # Add the final functions from its parents cls._rfm_final_methods.update( diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index d751606da1..a92d18589d 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1722,32 +1722,30 @@ def check_performance(self): f'{tag!r} in test {self.name!r}: {e}' ) - key = '%s:%s' % (self._current_partition.fullname, tag) + key = f'{self._current_partition.fullname}:{tag}' try: ref = self.reference[key] - # If units are also provided in the reference, make - # sure they match with the units provided by the - # performance function. + # If units are also provided in the reference, raise + # a warning if they match with the units provided by + # the performance function. if len(ref) == 4: if ref[3] != unit: logging.getlogger().warning( - f'unit for the performance variable ' - f'{tag!r} in the reference {key!r} ' + f'reference unit ({key!r}) for the ' + f'performance variable {tag!r} ' f'does not match the unit specified ' f'in the performance function ({unit!r})' ) # Pop the unit from the ref tuple (redundant) ref = ref[:3] - except KeyError: ref = (0, None, None) self._perfvalues[key] = (value, *ref, unit) self._perf_logger.log_performance(logging.INFO, tag, value, *ref, unit) - elif not hasattr(self, 'perf_patterns'): return else: @@ -1783,11 +1781,11 @@ def check_performance(self): # even if the don't meet the reference. for tag, expr in self.perf_patterns.items(): value = sn.evaluate(expr) - key = '%s:%s' % (self._current_partition.fullname, tag) + key = f'{self._current_partition.fullname}:{tag}' if key not in self.reference: raise SanityError( - "tag `%s' not resolved in references for `%s'" % - (tag, self._current_partition.fullname)) + f'tag {tag!r} not resolved in references for ' + f'{self._current_partition.fullname}' self._perfvalues[key] = (value, *self.reference[key]) self._perf_logger.log_performance(logging.INFO, tag, value, @@ -1800,8 +1798,8 @@ def check_performance(self): # Verify that val is a number if not isinstance(val, numbers.Number): raise SanityError( - "the value extracted for performance variable '%s' " - "is not a number: %s" % (key, val) + f'the value extracted for performance variable ' + f'{key!r} is not a number: {val}' ) tag = key.split(':')[-1] diff --git a/reframe/utility/__init__.py b/reframe/utility/__init__.py index 2b8d211dc9..70e86378ab 100644 --- a/reframe/utility/__init__.py +++ b/reframe/utility/__init__.py @@ -319,10 +319,9 @@ def is_trivially_callable(fn, *, non_def_args=0): if not callable(fn): raise TypeError('argument is not a callable') - return len( - [p for p in inspect.signature(fn).parameters.values() - if p.default is p.empty] - ) == non_def_args + explicit_args = [p for p in inspect.signature(fn).parameters.values() + if p.default is p.empty] + return len(explicit_args) == non_def_args def _is_builtin_type(cls): From bfdd7583dec9d84ee77edba59cc5478ed3c11832 Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Wed, 4 Aug 2021 15:01:32 +0200 Subject: [PATCH 25/34] Remove unused imports --- reframe/core/deferrable.py | 2 -- reframe/core/meta.py | 3 +-- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/reframe/core/deferrable.py b/reframe/core/deferrable.py index 47ecab2064..2bddbe1c46 100644 --- a/reframe/core/deferrable.py +++ b/reframe/core/deferrable.py @@ -6,8 +6,6 @@ import builtins import functools -import reframe.utility as utils - def deferrable(func): '''Function decorator for converting a function to a deferred diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 0f4c88356d..32ab7b9c56 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -17,8 +17,7 @@ import reframe.utility as utils from reframe.core.exceptions import ReframeSyntaxError -from reframe.core.deferrable import (deferrable, _DeferredExpression, - _DeferredPerformanceExpression) +from reframe.core.deferrable import deferrable, _DeferredPerformanceExpression class RegressionTestMeta(type): From 7499b4de72962525b4cc4228f818fe72a223b51a Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Fri, 6 Aug 2021 18:48:22 +0200 Subject: [PATCH 26/34] Add unit tests --- reframe/core/meta.py | 8 ++- reframe/core/pipeline.py | 1 + unittests/test_deferrable.py | 31 ++++++++ unittests/test_meta.py | 90 ++++++++++++++++++++++++ unittests/test_pipeline.py | 132 ++++++++++++++++++++++++++++++++++- unittests/test_utility.py | 2 + 6 files changed, 260 insertions(+), 4 deletions(-) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 32ab7b9c56..3100055815 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -271,15 +271,17 @@ def performance_function(units, *, perf_key=None): performance variable. If not provided, the function name will be used as the performance variable name. ''' + if not isinstance(units, str): + raise ReframeSyntaxError('performance units must be a string') if perf_key and not isinstance(perf_key, str): - raise TypeError("'perf_key' must be a string") + raise ReframeSyntaxError("'perf_key' must be a string") def _deco_wrapper(func): if not utils.is_trivially_callable(func, non_def_args=1): raise ReframeSyntaxError( - f'performance function {fn.__name__!r} has more than ' - f'one argument without a default value' + f'performance function {func.__name__!r} has more ' + f'than one argument without a default value' ) @functools.wraps(func) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 0b4056f8fb..7aecc36c44 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1721,6 +1721,7 @@ def check_performance(self): f'skipping evaluation of performance variable ' f'{tag!r} in test {self.name!r}: {e}' ) + continue key = f'{self._current_partition.fullname}:{tag}' try: diff --git a/unittests/test_deferrable.py b/unittests/test_deferrable.py index 6149165e2e..ea5ed24170 100644 --- a/unittests/test_deferrable.py +++ b/unittests/test_deferrable.py @@ -16,6 +16,22 @@ def test_defer(): assert isinstance(a, _DeferredExpression) +def test_deferrable_perf(): + from reframe.core.deferrable import _DeferredPerformanceExpression as dpe + + a = sn.defer(3) + b = dpe.construct_from_deferred_expr(a, 'some_unit') + assert b.unit == 'some_unit' + + # Test wrong unit type + with pytest.raises(TypeError): + dpe(lambda x: x, 3) + + # Test not from deferred expr + with pytest.raises(TypeError): + dpe.construct_from_deferred_expr(lambda x: x, 'some_unit') + + def test_evaluate(): a = sn.defer(3) assert 3 == a.evaluate() @@ -23,6 +39,21 @@ def test_evaluate(): assert 3 == sn.evaluate(3) +def test_recursive_evaluate(): + @sn.deferrable + def c(): + @sn.deferrable + def b(): + @sn.deferrable + def a(): + return sn.defer(3) + + return a() + return b() + + assert 3 == c().evaluate() + + def test_depr_warn(monkeypatch): monkeypatch.setattr(warnings, '_RAISE_DEPRECATION_ALWAYS', True) with pytest.warns(ReframeDeprecationWarning): diff --git a/unittests/test_meta.py b/unittests/test_meta.py index 36f7318abd..29d1a39553 100644 --- a/unittests/test_meta.py +++ b/unittests/test_meta.py @@ -53,6 +53,7 @@ class MyTest(MyMeta): sanity_function(ext) v = required final(ext) + performance_function('some_units')(ext) def __init__(self): assert not hasattr(self, 'parameter') @@ -65,6 +66,7 @@ def __init__(self): assert not hasattr(self, 'sanity_function') assert not hasattr(self, 'required') assert not hasattr(self, 'final') + assert not hasattr(self, 'performance_function') MyTest() @@ -254,3 +256,91 @@ class AllowFinalOverride(Base): def foo(self): '''Overriding foo is now allowed.''' + + +def test_callable_attributes(MyMeta): + '''Test issue #2113. + + Setting a callable without the __name__ attribute would crash the + metaclass. + ''' + + class Callable: + def __call__(self): + pass + + class Base(MyMeta): + f = Callable() + + +def test_performance_function(MyMeta): + assert hasattr(MyMeta, '_rfm_perf_fns') + + class Base(MyMeta): + @performance_function('units') + def perf_a(self): + pass + + @performance_function('units') + def perf_b(self): + pass + + def assert_perf_fn_return(self): + assert isinstance( + self.perf_a(), deferrable._DeferredPerformanceExpression + ) + + # Test the return type of the performance functions + Base().assert_perf_fn_return() + + # Test the performance function set has been built correctly + assert len(Base._rfm_perf_fns) == 2 + perf_fns = {'perf_a', 'perf_b'} + for fn in Base._rfm_perf_fns: + assert fn._rfm_perf_key in perf_fns + perf_fns.remove(fn._rfm_perf_key) + + assert not perf_fns + + class Derived(Base): + '''Test perf fn inheritance and perf_key argument.''' + + def perf_a(self): + '''Override perf fn with a non perf fn.''' + + @performance_function('units') + def perf_c(self): + '''Add a new perf fn.''' + + @performance_function('units', perf_key='my_perf_fn') + def perf_d(self): + '''Perf function with custom key.''' + + # Test the performance function set is correct with class inheritance + assert len(Derived._rfm_perf_fns) == 3 + perf_fns = {'perf_b', 'perf_c', 'my_perf_fn'} + for fn in Derived._rfm_perf_fns: + assert fn._rfm_perf_key in perf_fns + perf_fns.remove(fn._rfm_perf_key) + + assert not perf_fns + + +def test_performance_function_errors(MyMeta): + with pytest.raises(ReframeSyntaxError): + class wrong_perf_key_type(MyMeta): + @performance_function('units', perf_key=3) + def perf_fn(self): + pass + + with pytest.raises(ReframeSyntaxError): + class wrong_function_signature(MyMeta): + @performance_function('units') + def perf_fn(self, extra_arg): + pass + + with pytest.raises(ReframeSyntaxError): + class wrong_units(MyMeta): + @performance_function(5) + def perf_fn(self): + pass diff --git a/unittests/test_pipeline.py b/unittests/test_pipeline.py index 20030a2ab6..554cb95cbe 100644 --- a/unittests/test_pipeline.py +++ b/unittests/test_pipeline.py @@ -1201,7 +1201,7 @@ def test_performance_invalid_value(dummytest, sanity_file, _run_sanity(dummytest, *dummy_gpu_exec_ctx) -def test_performance_var_evaluation(dummytest, sanity_file, +def test_perf_patterns_evaluation(dummytest, sanity_file, perf_file, dummy_gpu_exec_ctx): # All performance values must be evaluated, despite the first one # failing To test this, we need an extract function that will have a @@ -1240,6 +1240,136 @@ def extract_perf(patt, tag): assert 'v3' in log_output +@pytest.fixture +def perftest(testsys_system, perf_file, sanity_file): + class MyTest(rfm.RunOnlyRegressionTest): + sourcesdir = None + + @sanity_function + def dummy_sanity(self): + return sn.assert_found(r'success', sanity_file) + + @performance_function('unit') + def value1(self): + pass + + @performance_function('unit') + def value2(self): + pass + + @performance_function('unit', perf_key='value3') + def value_3(self): + pass + + yield MyTest() + + +def test_validate_default_perf_variables(perftest): + assert len(perftest.perf_variables) == 3 + assert 'value1' in perftest.perf_variables + assert 'value2' in perftest.perf_variables + assert 'value3' in perftest.perf_variables + + +def test_perf_vars_without_reference(perftest, sanity_file, + perf_file, dummy_gpu_exec_ctx): + logfile = 'perf.log' + + @sn.deferrable + def extract_perf(patt, tag): + val = sn.evaluate( + sn.extractsingle(patt, perf_file, tag, float) + ) + with open(logfile, 'a') as fp: + fp.write(f'{tag}={val}') + + return val + + def wrapped_extract_perf(x, *args): + return extract_perf(*args) + + sanity_file.write_text('result = success\n') + perf_file.write_text('perf1 = 1.0\n' + 'perf3 = 3.3\n') + perftest.perf_variables = { + 'value1': perftest.make_performance_function( + extract_perf(r'perf1 = (?P\S+)', 'v1'), 'unit' + ), + 'value3': perftest.make_performance_function( + wrapped_extract_perf, 'unit', r'perf3 = (?P\S+)', 'v3' + ) + } + _run_sanity(perftest, *dummy_gpu_exec_ctx) + + logfile = os.path.join(perftest.stagedir, logfile) + with open(logfile) as fp: + log_output = fp.read() + + assert 'v1' in log_output + assert 'v3' in log_output + + +def test_perf_vars_with_reference(perftest, sanity_file, + perf_file, dummy_gpu_exec_ctx): + logfile = 'perf.log' + + @sn.deferrable + def extract_perf(patt, tag): + val = sn.evaluate( + sn.extractsingle(patt, perf_file, tag, float) + ) + with open(logfile, 'a') as fp: + fp.write(f'{tag}={val}') + + return val + + sanity_file.write_text('result = success\n') + perf_file.write_text('perf1 = 1.0\n') + + # Make the unit in the reference different from the performance function + perftest.reference = { + '*': { + 'value1': (0, None, None, 'unit_') + } + } + perftest.perf_variables = { + 'value1': perftest.make_performance_function( + extract_perf(r'perf1 = (?P\S+)', 'v1'), 'unit' + ), + } + _run_sanity(perftest, *dummy_gpu_exec_ctx) + + logfile = os.path.join(perftest.stagedir, logfile) + with open(logfile) as fp: + log_output = fp.read() + + assert 'v1' in log_output + + +def test_incompat_perf_syntax(perftest, sanity_file, + perf_file, dummy_gpu_exec_ctx): + sanity_file.write_text('result = success\n') + perf_file.write_text('perf1 = 1.0\n') + perftest.perf_patterns = {} + with pytest.raises(ReframeSyntaxError): + _run_sanity(perftest, *dummy_gpu_exec_ctx) + + +def test_perf_function_raises_exception(perftest, sanity_file, + perf_file, dummy_gpu_exec_ctx): + # The lambda takes a single arg, and the make_performance_function + # will inject the self argument too. This triggers an exception + # when the performance function is evaluated and here we test that + # it is caught properly. + + sanity_file.write_text('result = success\n') + perftest.perf_variables = { + 'value': perftest.make_performance_function( + lambda x: x, 'unit', 'random_arg' + ) + } + _run_sanity(perftest, *dummy_gpu_exec_ctx) + @pytest.fixture def container_test(tmp_path): def _container_test(platform, image): diff --git a/unittests/test_utility.py b/unittests/test_utility.py index a1acde6e30..801ca20db4 100644 --- a/unittests/test_utility.py +++ b/unittests/test_utility.py @@ -1726,6 +1726,8 @@ def bar(x, y): assert util.is_trivially_callable(foo) assert util.is_trivially_callable(bar, non_def_args=2) + with pytest.raises(TypeError): + util.is_trivially_callable(1) def test_nodelist_abbrev(): From 6463f80331e20f00691af9839ea7f5360cf36552 Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Fri, 6 Aug 2021 18:51:36 +0200 Subject: [PATCH 27/34] Fix PEP complaints --- unittests/test_pipeline.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/unittests/test_pipeline.py b/unittests/test_pipeline.py index 554cb95cbe..c58d9b6b36 100644 --- a/unittests/test_pipeline.py +++ b/unittests/test_pipeline.py @@ -1202,7 +1202,7 @@ def test_performance_invalid_value(dummytest, sanity_file, def test_perf_patterns_evaluation(dummytest, sanity_file, - perf_file, dummy_gpu_exec_ctx): + perf_file, dummy_gpu_exec_ctx): # All performance values must be evaluated, despite the first one # failing To test this, we need an extract function that will have a # side effect when evaluated, whose result we will check after calling @@ -1310,7 +1310,7 @@ def wrapped_extract_perf(x, *args): def test_perf_vars_with_reference(perftest, sanity_file, - perf_file, dummy_gpu_exec_ctx): + perf_file, dummy_gpu_exec_ctx): logfile = 'perf.log' @sn.deferrable @@ -1370,6 +1370,7 @@ def test_perf_function_raises_exception(perftest, sanity_file, } _run_sanity(perftest, *dummy_gpu_exec_ctx) + @pytest.fixture def container_test(tmp_path): def _container_test(platform, image): From 3bcc349028f654f9db0a2e9d7eb4ef727049478d Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Mon, 9 Aug 2021 15:22:58 +0200 Subject: [PATCH 28/34] move make_performance_function into sn module --- reframe/core/pipeline.py | 18 ------------------ reframe/utility/sanity.py | 20 +++++++++++++++++++- unittests/test_pipeline.py | 15 ++++++--------- 3 files changed, 25 insertions(+), 28 deletions(-) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 7aecc36c44..cfca62aaa6 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -124,24 +124,6 @@ class RegressionMixin(metaclass=RegressionTestMeta): .. versionadded:: 3.4.2 ''' - @final - def make_performance_function(self, func, unit, *args, **kwargs): - '''Wrapper to make a performance function inline. - - If ``func`` is an instance of the :class:`_DeferredExpression` class, - the performance function will be built by extending this deferred - expression into a deferred performance function. Otherwise, a new - deferred performance function will be created from the function - :func:`func`. - ''' - if isinstance(func, _DeferredExpression): - return _DeferredPerformanceExpression.construct_from_deferred_expr( - func, unit - ) - else: - return _DeferredPerformanceExpression(func, unit, self, - *args, **kwargs) - def __getattr__(self, name): ''' Intercept the AttributeError if the name is a required variable.''' if (name in self._rfm_var_space and diff --git a/reframe/utility/sanity.py b/reframe/utility/sanity.py index e6047bcfa2..d3b79c2f35 100644 --- a/reframe/utility/sanity.py +++ b/reframe/utility/sanity.py @@ -14,7 +14,8 @@ import reframe.utility as util import reframe.core.warnings as warn -from reframe.core.deferrable import deferrable, _DeferredExpression +from reframe.core.deferrable import (deferrable, _DeferredExpression, + _DeferredPerformanceExpression) from reframe.core.exceptions import SanityError @@ -40,6 +41,23 @@ def _open(filename, *args, **kwargs): raise SanityError(f'{filename}: {e.strerror}') +def make_performance_function(func, unit, *args, **kwargs): + '''Transform a callable or deferred expr. into a performance function. + + If ``func`` is an instance of the :class:`_DeferredExpression` class, + the performance function will be built by extending this deferred + expression into a deferred performance function. Otherwise, a new + deferred performance function will be created from the function + :func:`func`. + ''' + if isinstance(func, _DeferredExpression): + return _DeferredPerformanceExpression.construct_from_deferred_expr( + func, unit + ) + else: + return _DeferredPerformanceExpression(func, unit, *args, **kwargs) + + # Create an alias decorator def sanity_function(func): warn.user_deprecation_warning( diff --git a/unittests/test_pipeline.py b/unittests/test_pipeline.py index c58d9b6b36..8af3c8bbc8 100644 --- a/unittests/test_pipeline.py +++ b/unittests/test_pipeline.py @@ -1285,18 +1285,15 @@ def extract_perf(patt, tag): return val - def wrapped_extract_perf(x, *args): - return extract_perf(*args) - sanity_file.write_text('result = success\n') perf_file.write_text('perf1 = 1.0\n' 'perf3 = 3.3\n') perftest.perf_variables = { - 'value1': perftest.make_performance_function( + 'value1': sn.make_performance_function( extract_perf(r'perf1 = (?P\S+)', 'v1'), 'unit' ), - 'value3': perftest.make_performance_function( - wrapped_extract_perf, 'unit', r'perf3 = (?P\S+)', 'v3' + 'value3': sn.make_performance_function( + extract_perf, 'unit', r'perf3 = (?P\S+)', 'v3' ) } _run_sanity(perftest, *dummy_gpu_exec_ctx) @@ -1333,7 +1330,7 @@ def extract_perf(patt, tag): } } perftest.perf_variables = { - 'value1': perftest.make_performance_function( + 'value1': sn.make_performance_function( extract_perf(r'perf1 = (?P\S+)', 'v1'), 'unit' ), } @@ -1364,8 +1361,8 @@ def test_perf_function_raises_exception(perftest, sanity_file, sanity_file.write_text('result = success\n') perftest.perf_variables = { - 'value': perftest.make_performance_function( - lambda x: x, 'unit', 'random_arg' + 'value': sn.make_performance_function( + lambda x: x, 'unit', perftest, 'random_arg' ) } _run_sanity(perftest, *dummy_gpu_exec_ctx) From 439ced6e2197fde6f4bf349696a71399717b8193 Mon Sep 17 00:00:00 2001 From: Javier Otero Date: Thu, 19 Aug 2021 13:05:38 +0200 Subject: [PATCH 29/34] Address PR comments --- .../gpu/gpu_burn/gpu_burn_test.py | 5 +- docs/deferrable_functions_reference.rst | 14 +++++ docs/regression_test_api.rst | 15 ++++++ .../microbenchmarks/gpu/gpu_burn/__init__.py | 2 +- reframe/core/deferrable.py | 20 ++++--- reframe/core/meta.py | 26 ++++++--- reframe/core/parameters.py | 11 ++++ reframe/core/pipeline.py | 48 +++++++++++------ reframe/utility/sanity.py | 24 ++++++--- unittests/test_deferrable.py | 21 ++++++++ unittests/test_meta.py | 53 +++++++++++++------ unittests/test_parameters.py | 10 ++++ unittests/test_pipeline.py | 32 ++++++----- 13 files changed, 204 insertions(+), 77 deletions(-) diff --git a/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py b/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py index 74dd5380d1..3b63fea20f 100644 --- a/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py +++ b/cscs-checks/microbenchmarks/gpu/gpu_burn/gpu_burn_test.py @@ -79,8 +79,9 @@ def report_slow_nodes(self): # Flag the slow nodes for nid in nids: try: - val = self.min_perf(nid) + node_perf = self.min_perf(nid) + val = node_perf.evaluate(cache=True) sn.assert_reference(val, ref, lt, ut).evaluate() except SanityError: - self.perf_variables[nid] = val + self.perf_variables[nid] = node_perf diff --git a/docs/deferrable_functions_reference.rst b/docs/deferrable_functions_reference.rst index 996820b0fe..2675d4025e 100644 --- a/docs/deferrable_functions_reference.rst +++ b/docs/deferrable_functions_reference.rst @@ -11,6 +11,8 @@ Explicit evaluation of deferrable functions ------------------------------------------- Deferrable functions may be evaluated at any time by calling :func:`evaluate` on their return value or by passing the deferred function itself to the :func:`~reframe.utility.sanity.evaluate()` free function. +These :func:`evaluate` functions take an optional :class:`bool` argument ``cache``, which can be use to cache the evaluation of the deferrable function. +Hence, if caching is enabled on a given deferrable function, any subsequent calls to :func:`evaluate` will simply return the previously cached results. Implicit evaluation of deferrable functions ------------------------------------------- @@ -48,6 +50,18 @@ Currently ReFrame provides three broad categories of deferrable functions: They include, but are not limited to, functions to iterate over regex matches in a file, extracting and converting values from regex matches, computing statistical information on series of data etc. +Deferrable performance functions +-------------------------------- + +Deferrable performance functions are a special type of deferrable functions which are intended for measuring a given quantity. +Therefore, this kind of deferrable functions have an associated unit that can be used to interpret the return values from these functions. +The unit of a deferrable performance function can be accessed through the public member :attr:`unit`. +Regular deferrable functions can be promoted to deferrable performance functions using the :func:`~reframe.utility.sanity.make_performance_function` utility. +Also, this utility allows to create performance functions directly from any callable. + +Deferrable function utilities +----------------------------- + Users can write their own deferrable functions as well. The page ":doc:`deferrables`" explains in detail how deferrable functions work and how users can write their own. diff --git a/docs/regression_test_api.rst b/docs/regression_test_api.rst index 3818559127..a7edf23feb 100644 --- a/docs/regression_test_api.rst +++ b/docs/regression_test_api.rst @@ -277,6 +277,21 @@ Built-in functions .. versionadded:: 3.7.0 +.. py:decorator:: RegressionMixin.performance_function(unit, *, perf_key=None) + + Decorate a member function as a performance function of the test. + + This decorator converts the decorated method into a performance deferrable function (see :ref:`deferrable-functions`) whose evaluation is deferred to the performance stage of the regression test. + The decorated function must take a single argument without a default value (i.e. ``self``) and any number of arguments with default values. + A test may decorate multiple member functions as performance functions, where each of the decorated functions must be provided with the units of the performance quantitites to be extracted from the test. + These performance units must be of type :class:`str`. + Any performance function may be overridden in a derived class and multiple bases may define their own performance functions. + In the event of a name conflict, the derived class will follow Python's `MRO `_ to choose the appropriate performance function. + However, defining more than one performance function with the same name in the same class is disallowed. + + The full set of performance functions of a regression test is stored under :attr:`~reframe.core.pipeline.RegerssionTest.perf_variables` as key-value pairs, where, by default, the key is the name of the decorated member function, and the value is the deferred performance function itself. + Optionally, the key under which a performance function is stored in :attr:`~reframe.core.pipeline.RegressionTest.perf_variables` can be customised by passing the desired key as the ``perf_key`` argument to this decorator. + .. py:decorator:: RegressionMixin.deferrable(func) Converts the decorated method into a deferrable function. diff --git a/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py b/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py index 1b02384c23..0955ec3780 100644 --- a/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py +++ b/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py @@ -101,7 +101,7 @@ def _extract_perf_metric(self, metric, nid=None): if metric not in {'perf', 'temp'}: raise ValueError( - f"unsupported value in 'metric' argument ({metric}!r)" + f"unsupported value in 'metric' argument: {metric!r}" ) if nid is None: diff --git a/reframe/core/deferrable.py b/reframe/core/deferrable.py index 2bddbe1c46..3782812652 100644 --- a/reframe/core/deferrable.py +++ b/reframe/core/deferrable.py @@ -44,15 +44,16 @@ def __init__(self, fn, *args, **kwargs): # We cache the value of the last evaluation inside a tuple. # We don't cache the value directly, because it can be any. - - # NOTE: The cache for the moment is only used by - # `__rfm_json_encode__`. Enabling caching in the evaluation is a - # reasonable optimization, but might break compatibility, so it needs - # to be thought thoroughly and communicated properly in the - # documentation. self._cached = () + self._return_cached = False + + def evaluate(self, cache=False): + # Return the cached value (if any) + if self._return_cached and not cache: + return self._cached[0] + elif cache: + self._return_cached = cache - def evaluate(self): fn_args = [] for arg in self._args: fn_args.append( @@ -66,10 +67,15 @@ def evaluate(self): ) ret = self._fn(*fn_args, **fn_kwargs) + + # Evaluate the return for as long as a deferred expression returns + # another deferred expression. while isinstance(ret, _DeferredExpression): ret = ret.evaluate() + # Cache the results for any subsequent evaluate calls. self._cached = (ret,) + return ret def __bool__(self): diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 3100055815..e02b05f40b 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -82,7 +82,13 @@ def __setitem__(self, key, value): 'once in the class body' ) from None elif hasattr(value, '_rfm_perf_key'): - self['_rfm_perf_fns'].add(value) + try: + self['_rfm_perf_fns'][key] = value + except KeyError: + raise ReframeSyntaxError( + f'the performance function {key} has already been ' + f'defined in this class' + ) from None # Register the final methods if hasattr(value, '_rfm_final'): @@ -272,14 +278,14 @@ def performance_function(units, *, perf_key=None): be used as the performance variable name. ''' if not isinstance(units, str): - raise ReframeSyntaxError('performance units must be a string') + raise TypeError('performance units must be a string') if perf_key and not isinstance(perf_key, str): - raise ReframeSyntaxError("'perf_key' must be a string") + raise TypeError("'perf_key' must be a string") def _deco_wrapper(func): if not utils.is_trivially_callable(func, non_def_args=1): - raise ReframeSyntaxError( + raise TypeError( f'performance function {func.__name__!r} has more ' f'than one argument without a default value' ) @@ -297,7 +303,7 @@ def _perf_fn(*args, **kwargs): return _deco_wrapper namespace['performance_function'] = performance_function - namespace['_rfm_perf_fns'] = set() + namespace['_rfm_perf_fns'] = namespaces.LocalNamespace() return metacls.MetaNamespace(namespace) def __new__(metacls, name, bases, namespace, **kwargs): @@ -361,10 +367,14 @@ def __init__(cls, name, bases, namespace, **kwargs): break - # Update the performance function set with the bases. + # Update the performance function dict with the bases. for base in cls._rfm_bases: - cls._rfm_perf_fns |= {f for f in getattr(base, '_rfm_perf_fns') - if f.__name__ not in namespace} + for k, v in base._rfm_perf_fns.items(): + if k not in namespace: + try: + cls._rfm_perf_fns[k] = v + except KeyError: + '''Performance function overridden by other class''' # Add the final functions from its parents cls._rfm_final_methods.update( diff --git a/reframe/core/parameters.py b/reframe/core/parameters.py index 6f678269be..b9eb267c3b 100644 --- a/reframe/core/parameters.py +++ b/reframe/core/parameters.py @@ -12,6 +12,7 @@ import itertools import reframe.core.namespaces as namespaces +import reframe.utility as utils from reframe.core.exceptions import ReframeSyntaxError @@ -45,6 +46,16 @@ def filter_params(x): return x self.values = tuple(values) + + # Validate the filter_param argument + try: + valid = utils.is_trivially_callable(filter_params, non_def_args=1) + except TypeError: + raise TypeError('the provided parameter filter is not a callable') + else: + if not valid: + raise TypeError('filter function must take a single argument') + self.filter_params = filter_params diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 85b991ab88..adf4ccc9da 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -647,22 +647,36 @@ def pipeline_hooks(cls): #: :default: :class:`None` perf_patterns = variable(typ.Dict[str, _DeferredExpression], type(None)) - #: Mapping with the performance variables of the test. + #: The performance variables associated with the test. + #: + #: In this context, a performance variable is a key-value pair, where the + #: key is the desired variable name and the value is the deferred + #: performance expression (i.e. the result of a :doc:`deferrable + #: performance function`) that computes + #: or extracts the performance variable's value. + #: + #: By default, ReFrame will populate this field during the test's + #: instantiation with all the member functions decorated with the + #: :func:`performance_function` decorator. If no performance functions + #: are present in the class, no performance checking or reporting will be + #: carried out. + #: + #: This mapping may be extended or replaced by other performance variables + #: that may be defined in any pipeline hook executing before the + #: performance stage. To this end, deferred performance functions can be + #: created inline using the utility + #: :func:`~reframe.utils.sanity.make_performance_function`. #: #: Refer to the :doc:`ReFrame Tutorials ` for concrete usage #: examples. #: - #: If no performance variables are explicitly provided, ReFrame will - #: populate this field during the test's instantiation with all the member - #: functions decorated with the :func:`performance_function` decorator. If - #: no performance functions are present in the class, no performance - #: checking or reporting will be carried out. - #: - #: :type: A dictionary with keys of type :class:`str` and values of type - #: :class:`_DeferredPerformanceExpression`. A - #: :class:`_DeferredPerformanceExpression` object is obtained when a - #: function decorated with the :func:`performance_function` is called. - #: :default: ``{}`` + #: :type: A dictionary with keys of type :class:`str` and deferred + #: performance expressions (i.e., the result of a + #: :doc:`deferrable performance function + #: `) as values. + #: :default: collection of performance variables associated to each of + #: the member functions decorated with the + #: :func:`performance_function` decorator. perf_variables = variable(typ.Dict[str, _DeferredPerformanceExpression], value={}) @@ -853,7 +867,7 @@ def __pre_init__(self): # Build the default performance dict if not self.perf_variables: - for fn in self._rfm_perf_fns: + for fn in self._rfm_perf_fns.values(): self.perf_variables[fn._rfm_perf_key] = fn(self) def __init__(self): @@ -1696,8 +1710,7 @@ def check_performance(self): raise ReframeSyntaxError( f"assigning a value to 'perf_pattenrs' conflicts ", f"with using the 'performance_function' decorator ", - f"or setting a value to 'perf_variables' " - f"(class {self.__class__.__qualname__})" + f"or setting a value to 'perf_variables'" ) # Log the performance variables @@ -1709,7 +1722,7 @@ def check_performance(self): except Exception as e: logging.getlogger().warning( f'skipping evaluation of performance variable ' - f'{tag!r} in test {self.name!r}: {e}' + f'{tag!r}: {e}' ) continue @@ -1726,7 +1739,8 @@ def check_performance(self): f'reference unit ({key!r}) for the ' f'performance variable {tag!r} ' f'does not match the unit specified ' - f'in the performance function ({unit!r})' + f'in the performance function ({unit!r}): ' + f'{unit!r} will be used' ) # Pop the unit from the ref tuple (redundant) diff --git a/reframe/utility/sanity.py b/reframe/utility/sanity.py index d3b79c2f35..7d7e20b61a 100644 --- a/reframe/utility/sanity.py +++ b/reframe/utility/sanity.py @@ -42,13 +42,14 @@ def _open(filename, *args, **kwargs): def make_performance_function(func, unit, *args, **kwargs): - '''Transform a callable or deferred expr. into a performance function. + '''Convert a callable or deferred expression into a performance function. - If ``func`` is an instance of the :class:`_DeferredExpression` class, - the performance function will be built by extending this deferred - expression into a deferred performance function. Otherwise, a new - deferred performance function will be created from the function - :func:`func`. + If ``func`` is a deferred expression, the performance function will be + built by extending this deferred expression into a deferred performance + expression. Otherwise, a new deferred performance expression will be + created from the function :func:`func`. + + ..versionadded:: 3.8.0 ''' if isinstance(func, _DeferredExpression): return _DeferredPerformanceExpression.construct_from_deferred_expr( @@ -904,15 +905,22 @@ def defer(x): return x -def evaluate(expr): +def evaluate(expr, cache=False): '''Evaluate a deferred expression. If ``expr`` is not a deferred expression, it will be returned as is. + If ``expr`` is a deferred expression and ``cache`` is ``True``, the + results of the deferred expression will be cached and subsequent calls + to ``evaluate`` on this deferred expression will simply return the + previously cached result. .. versionadded:: 2.21 + + .. versionchanged:: 3.4 + Added support for evaluation caching. ''' if isinstance(expr, _DeferredExpression): - return expr.evaluate() + return expr.evaluate(cache=cache) else: return expr diff --git a/unittests/test_deferrable.py b/unittests/test_deferrable.py index ea5ed24170..734a927f66 100644 --- a/unittests/test_deferrable.py +++ b/unittests/test_deferrable.py @@ -54,6 +54,27 @@ def a(): assert 3 == c().evaluate() +def test_evaluate_cached(): + # A dummy mutable + my_list = [1] + + @sn.deferrable + def my_expr(): + return my_list[0] + + expr = my_expr() + assert expr.evaluate() == 1 + my_list = [2] + assert expr.evaluate(cache=True) == 2 + my_list = [3] + assert expr.evaluate() == 2 + + # Test that using cache=True updates the previously cached result + assert expr.evaluate(cache=True) == 3 + my_list = [4] + assert expr.evaluate() == 3 + + def test_depr_warn(monkeypatch): monkeypatch.setattr(warnings, '_RAISE_DEPRECATION_ALWAYS', True) with pytest.warns(ReframeDeprecationWarning): diff --git a/unittests/test_meta.py b/unittests/test_meta.py index 29d1a39553..7b9f12106c 100644 --- a/unittests/test_meta.py +++ b/unittests/test_meta.py @@ -293,14 +293,9 @@ def assert_perf_fn_return(self): # Test the return type of the performance functions Base().assert_perf_fn_return() - # Test the performance function set has been built correctly - assert len(Base._rfm_perf_fns) == 2 - perf_fns = {'perf_a', 'perf_b'} - for fn in Base._rfm_perf_fns: - assert fn._rfm_perf_key in perf_fns - perf_fns.remove(fn._rfm_perf_key) - - assert not perf_fns + # Test the performance function dict has been built correctly + perf_dict = {fn for fn in Base._rfm_perf_fns} + assert perf_dict == {'perf_a', 'perf_b'} class Derived(Base): '''Test perf fn inheritance and perf_key argument.''' @@ -317,29 +312,53 @@ def perf_d(self): '''Perf function with custom key.''' # Test the performance function set is correct with class inheritance - assert len(Derived._rfm_perf_fns) == 3 - perf_fns = {'perf_b', 'perf_c', 'my_perf_fn'} - for fn in Derived._rfm_perf_fns: - assert fn._rfm_perf_key in perf_fns - perf_fns.remove(fn._rfm_perf_key) + perf_dict = {fn._rfm_perf_key for fn in Derived._rfm_perf_fns.values()} + assert perf_dict == {'perf_b', 'perf_c', 'my_perf_fn'} - assert not perf_fns + # Test multiple inheritance and name conflict resolution + class ClashingBase(MyMeta): + @performance_function('units', perf_key='clash') + def perf_a(self): + return 'A' + class Join(ClashingBase, Base): + '''Test that we follow MRO's order.''' -def test_performance_function_errors(MyMeta): + class JoinAndOverride(ClashingBase, Base): + @performance_function('units') + def perf_a(self): + return 'B' + + assert Join._rfm_perf_fns['perf_a']('self').evaluate() == 'A' + assert JoinAndOverride._rfm_perf_fns['perf_a']('self').evaluate() == 'B' + + +def test_double_define_performance_function(MyMeta): with pytest.raises(ReframeSyntaxError): + class Foo(MyMeta): + @performance_function('unit') + def foo(self): + pass + + @performance_function('unit') + def foo(self): + '''This doesn't make sense, so we raise an error''' + + +def test_performance_function_errors(MyMeta): + with pytest.raises(TypeError): class wrong_perf_key_type(MyMeta): @performance_function('units', perf_key=3) def perf_fn(self): pass - with pytest.raises(ReframeSyntaxError): + with pytest.raises(TypeError): class wrong_function_signature(MyMeta): @performance_function('units') def perf_fn(self, extra_arg): pass - with pytest.raises(ReframeSyntaxError): + with pytest.raises(TypeError): class wrong_units(MyMeta): @performance_function(5) def perf_fn(self): diff --git a/unittests/test_parameters.py b/unittests/test_parameters.py index dfc5556a06..f5e31ff300 100644 --- a/unittests/test_parameters.py +++ b/unittests/test_parameters.py @@ -79,6 +79,16 @@ class MyTest(ExtendParams): assert MyTest.param_space['P2'] == ('f', 'g',) +def test_wrong_filter(): + with pytest.raises(TypeError): + class MyTest(ExtendParams): + P1 = parameter(inherit_params=True, filter_params='not callable') + + with pytest.raises(TypeError): + class MyTest(ExtendParams): + P1 = parameter(inherit_params=True, filter_params=lambda x, y: []) + + def test_is_abstract_test(): class MyTest(Abstract): pass diff --git a/unittests/test_pipeline.py b/unittests/test_pipeline.py index 26ddc390a1..0070832189 100644 --- a/unittests/test_pipeline.py +++ b/unittests/test_pipeline.py @@ -1317,6 +1317,9 @@ def extract_perf(patt, tag): def test_perf_vars_with_reference(perftest, sanity_file, perf_file, dummy_gpu_exec_ctx): + # This test also checks that a performance function that raises an + # exception is simply skipped. + logfile = 'perf.log' @sn.deferrable @@ -1329,6 +1332,14 @@ def extract_perf(patt, tag): return val + def dummy_perf(x): + # Dummy function to check that a performance variable is simply + # skipped when the wrong number of arguments are passed to it. + with open(logfile, 'a') as fp: + fp.write('v2') + + return 1 + sanity_file.write_text('result = success\n') perf_file.write_text('perf1 = 1.0\n') @@ -1342,6 +1353,9 @@ def extract_perf(patt, tag): 'value1': sn.make_performance_function( extract_perf(r'perf1 = (?P\S+)', 'v1'), 'unit' ), + 'value2':sn.make_performance_function( + dummy_perf, 'other_units', perftest, 'extra_arg' + ), } _run_sanity(perftest, *dummy_gpu_exec_ctx) @@ -1350,7 +1364,7 @@ def extract_perf(patt, tag): log_output = fp.read() assert 'v1' in log_output - + assert 'v2' not in log_output def test_incompat_perf_syntax(perftest, sanity_file, perf_file, dummy_gpu_exec_ctx): @@ -1361,22 +1375,6 @@ def test_incompat_perf_syntax(perftest, sanity_file, _run_sanity(perftest, *dummy_gpu_exec_ctx) -def test_perf_function_raises_exception(perftest, sanity_file, - perf_file, dummy_gpu_exec_ctx): - # The lambda takes a single arg, and the make_performance_function - # will inject the self argument too. This triggers an exception - # when the performance function is evaluated and here we test that - # it is caught properly. - - sanity_file.write_text('result = success\n') - perftest.perf_variables = { - 'value': sn.make_performance_function( - lambda x: x, 'unit', perftest, 'random_arg' - ) - } - _run_sanity(perftest, *dummy_gpu_exec_ctx) - - @pytest.fixture def container_test(tmp_path): def _container_test(platform, image): From 87f4583388979e2aecbd0c7dc1b88287d803e828 Mon Sep 17 00:00:00 2001 From: Javier Otero <71280927+jjotero@users.noreply.github.com> Date: Thu, 19 Aug 2021 13:30:48 +0200 Subject: [PATCH 30/34] Update docs --- docs/deferrable_functions_reference.rst | 2 +- reframe/core/pipeline.py | 4 ++-- reframe/utility/sanity.py | 11 ++++++++--- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/docs/deferrable_functions_reference.rst b/docs/deferrable_functions_reference.rst index 2675d4025e..e9dc49433f 100644 --- a/docs/deferrable_functions_reference.rst +++ b/docs/deferrable_functions_reference.rst @@ -11,7 +11,7 @@ Explicit evaluation of deferrable functions ------------------------------------------- Deferrable functions may be evaluated at any time by calling :func:`evaluate` on their return value or by passing the deferred function itself to the :func:`~reframe.utility.sanity.evaluate()` free function. -These :func:`evaluate` functions take an optional :class:`bool` argument ``cache``, which can be use to cache the evaluation of the deferrable function. +These :func:`evaluate` functions take an optional :class:`bool` argument ``cache``, which can be used to cache the evaluation of the deferrable function. Hence, if caching is enabled on a given deferrable function, any subsequent calls to :func:`evaluate` will simply return the previously cached results. Implicit evaluation of deferrable functions diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index adf4ccc9da..12144ac663 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -665,7 +665,7 @@ def pipeline_hooks(cls): #: that may be defined in any pipeline hook executing before the #: performance stage. To this end, deferred performance functions can be #: created inline using the utility - #: :func:`~reframe.utils.sanity.make_performance_function`. + #: :func:`~reframe.utility.sanity.make_performance_function`. #: #: Refer to the :doc:`ReFrame Tutorials ` for concrete usage #: examples. @@ -674,7 +674,7 @@ def pipeline_hooks(cls): #: performance expressions (i.e., the result of a #: :doc:`deferrable performance function #: `) as values. - #: :default: collection of performance variables associated to each of + #: :default: Collection of performance variables associated to each of #: the member functions decorated with the #: :func:`performance_function` decorator. perf_variables = variable(typ.Dict[str, _DeferredPerformanceExpression], diff --git a/reframe/utility/sanity.py b/reframe/utility/sanity.py index 7d7e20b61a..f697d97dbd 100644 --- a/reframe/utility/sanity.py +++ b/reframe/utility/sanity.py @@ -47,9 +47,14 @@ def make_performance_function(func, unit, *args, **kwargs): If ``func`` is a deferred expression, the performance function will be built by extending this deferred expression into a deferred performance expression. Otherwise, a new deferred performance expression will be - created from the function :func:`func`. - - ..versionadded:: 3.8.0 + created from the function :func:`func`. The argument ``unit`` is the unit + associated with the deferrable performance expression, and ``*args`` and + ``**kwargs`` are the arguments to be captured by this deferred expression. + See + :doc:`deferrable functions reference ` + for further information on deferrable functions. + + .. versionadded:: 3.8.0 ''' if isinstance(func, _DeferredExpression): return _DeferredPerformanceExpression.construct_from_deferred_expr( From 00dfb53c2f1d09a6723156a9e7cff2bb03b11c00 Mon Sep 17 00:00:00 2001 From: Javier Otero <71280927+jjotero@users.noreply.github.com> Date: Thu, 19 Aug 2021 13:39:26 +0200 Subject: [PATCH 31/34] Fix PEP complaints --- hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py | 2 +- unittests/test_pipeline.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py b/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py index 0955ec3780..1844540851 100644 --- a/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py +++ b/hpctestlib/microbenchmarks/gpu/gpu_burn/__init__.py @@ -101,7 +101,7 @@ def _extract_perf_metric(self, metric, nid=None): if metric not in {'perf', 'temp'}: raise ValueError( - f"unsupported value in 'metric' argument: {metric!r}" + f"unsupported value in 'metric' argument: {metric!r}" ) if nid is None: diff --git a/unittests/test_pipeline.py b/unittests/test_pipeline.py index 0070832189..fa5942aa38 100644 --- a/unittests/test_pipeline.py +++ b/unittests/test_pipeline.py @@ -1353,7 +1353,7 @@ def dummy_perf(x): 'value1': sn.make_performance_function( extract_perf(r'perf1 = (?P\S+)', 'v1'), 'unit' ), - 'value2':sn.make_performance_function( + 'value2': sn.make_performance_function( dummy_perf, 'other_units', perftest, 'extra_arg' ), } @@ -1366,6 +1366,7 @@ def dummy_perf(x): assert 'v1' in log_output assert 'v2' not in log_output + def test_incompat_perf_syntax(perftest, sanity_file, perf_file, dummy_gpu_exec_ctx): sanity_file.write_text('result = success\n') From 5b195118ab20ac4fc05cd66239c9037dadfe928a Mon Sep 17 00:00:00 2001 From: Javier Otero <71280927+jjotero@users.noreply.github.com> Date: Mon, 23 Aug 2021 12:18:28 +0200 Subject: [PATCH 32/34] Update docs with PR comments --- docs/deferrable_functions_reference.rst | 18 ++++++++++++------ docs/regression_test_api.rst | 4 ++-- reframe/core/deferrable.py | 1 - reframe/core/meta.py | 2 +- reframe/core/parameters.py | 4 +++- reframe/core/pipeline.py | 9 +++++---- reframe/utility/__init__.py | 22 +++++++++++++++------- reframe/utility/sanity.py | 14 ++++++++++---- 8 files changed, 48 insertions(+), 26 deletions(-) diff --git a/docs/deferrable_functions_reference.rst b/docs/deferrable_functions_reference.rst index e9dc49433f..4a5d27cc53 100644 --- a/docs/deferrable_functions_reference.rst +++ b/docs/deferrable_functions_reference.rst @@ -7,6 +7,9 @@ Deferrable Functions Reference *Deferrable functions* are the functions whose execution may be postponed to a later time after they are called. The key characteristic of these functions is that they store their arguments when they are called, and the execution itself does not occur until the function is evaluated either explicitly or implicitly. +ReFrame provides an ample set of deferrable utilities and it also allows users to write their own deferrable functions when needed. +Please refer to ":doc:`deferrables`" for a hands-on explanation on how deferrable functions work and how to create custom deferrable functions. + Explicit evaluation of deferrable functions ------------------------------------------- @@ -14,6 +17,9 @@ Deferrable functions may be evaluated at any time by calling :func:`evaluate` on These :func:`evaluate` functions take an optional :class:`bool` argument ``cache``, which can be used to cache the evaluation of the deferrable function. Hence, if caching is enabled on a given deferrable function, any subsequent calls to :func:`evaluate` will simply return the previously cached results. +.. versionchanged:: 3.8.0 + Support cached evaluation. + Implicit evaluation of deferrable functions ------------------------------------------- @@ -50,8 +56,10 @@ Currently ReFrame provides three broad categories of deferrable functions: They include, but are not limited to, functions to iterate over regex matches in a file, extracting and converting values from regex matches, computing statistical information on series of data etc. + .. _deferrable-performance-functions: + Deferrable performance functions --------------------------------- +................................ Deferrable performance functions are a special type of deferrable functions which are intended for measuring a given quantity. Therefore, this kind of deferrable functions have an associated unit that can be used to interpret the return values from these functions. @@ -59,12 +67,10 @@ The unit of a deferrable performance function can be accessed through the public Regular deferrable functions can be promoted to deferrable performance functions using the :func:`~reframe.utility.sanity.make_performance_function` utility. Also, this utility allows to create performance functions directly from any callable. -Deferrable function utilities ------------------------------ - -Users can write their own deferrable functions as well. -The page ":doc:`deferrables`" explains in detail how deferrable functions work and how users can write their own. +.. versionadded:: 3.8.0 +List of deferrable functions and utilities +------------------------------------------ .. py:decorator:: reframe.utility.sanity.deferrable(func) diff --git a/docs/regression_test_api.rst b/docs/regression_test_api.rst index 2447a81eb2..478d6cb5d3 100644 --- a/docs/regression_test_api.rst +++ b/docs/regression_test_api.rst @@ -326,7 +326,7 @@ Built-in functions Decorate a member function as a performance function of the test. - This decorator converts the decorated method into a performance deferrable function (see :ref:`deferrable-functions`) whose evaluation is deferred to the performance stage of the regression test. + This decorator converts the decorated method into a performance deferrable function (see :ref:`deferrable-performance-functions`) whose evaluation is deferred to the performance stage of the regression test. The decorated function must take a single argument without a default value (i.e. ``self``) and any number of arguments with default values. A test may decorate multiple member functions as performance functions, where each of the decorated functions must be provided with the units of the performance quantitites to be extracted from the test. These performance units must be of type :class:`str`. @@ -334,7 +334,7 @@ Built-in functions In the event of a name conflict, the derived class will follow Python's `MRO `_ to choose the appropriate performance function. However, defining more than one performance function with the same name in the same class is disallowed. - The full set of performance functions of a regression test is stored under :attr:`~reframe.core.pipeline.RegerssionTest.perf_variables` as key-value pairs, where, by default, the key is the name of the decorated member function, and the value is the deferred performance function itself. + The full set of performance functions of a regression test is stored under :attr:`~reframe.core.pipeline.RegressionTest.perf_variables` as key-value pairs, where, by default, the key is the name of the decorated member function, and the value is the deferred performance function itself. Optionally, the key under which a performance function is stored in :attr:`~reframe.core.pipeline.RegressionTest.perf_variables` can be customised by passing the desired key as the ``perf_key`` argument to this decorator. .. py:decorator:: RegressionMixin.deferrable(func) diff --git a/reframe/core/deferrable.py b/reframe/core/deferrable.py index 3782812652..b81bd9d823 100644 --- a/reframe/core/deferrable.py +++ b/reframe/core/deferrable.py @@ -75,7 +75,6 @@ def evaluate(self, cache=False): # Cache the results for any subsequent evaluate calls. self._cached = (ret,) - return ret def __bool__(self): diff --git a/reframe/core/meta.py b/reframe/core/meta.py index e02b05f40b..d6815f14a0 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -86,7 +86,7 @@ def __setitem__(self, key, value): self['_rfm_perf_fns'][key] = value except KeyError: raise ReframeSyntaxError( - f'the performance function {key} has already been ' + f'the performance function {key!r} has already been ' f'defined in this class' ) from None diff --git a/reframe/core/parameters.py b/reframe/core/parameters.py index 995afd15a0..6a6d9376b3 100644 --- a/reframe/core/parameters.py +++ b/reframe/core/parameters.py @@ -51,7 +51,9 @@ def filter_params(x): try: valid = utils.is_trivially_callable(filter_params, non_def_args=1) except TypeError: - raise TypeError('the provided parameter filter is not a callable') + raise TypeError( + 'the provided parameter filter is not a callable' + ) from None else: if not valid: raise TypeError('filter function must take a single argument') diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 12144ac663..4732f33ac2 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -651,8 +651,8 @@ def pipeline_hooks(cls): #: #: In this context, a performance variable is a key-value pair, where the #: key is the desired variable name and the value is the deferred - #: performance expression (i.e. the result of a :doc:`deferrable - #: performance function`) that computes + #: performance expression (i.e. the result of a :ref:`deferrable + #: performance `) that computes #: or extracts the performance variable's value. #: #: By default, ReFrame will populate this field during the test's @@ -675,8 +675,9 @@ def pipeline_hooks(cls): #: :doc:`deferrable performance function #: `) as values. #: :default: Collection of performance variables associated to each of - #: the member functions decorated with the - #: :func:`performance_function` decorator. + #: the member functions decorated with the :func:`@performance_function + #: ` + #: decorator. perf_variables = variable(typ.Dict[str, _DeferredPerformanceExpression], value={}) diff --git a/reframe/utility/__init__.py b/reframe/utility/__init__.py index 70e86378ab..a4b6fc1d09 100644 --- a/reframe/utility/__init__.py +++ b/reframe/utility/__init__.py @@ -307,13 +307,21 @@ def attrs(obj): def is_trivially_callable(fn, *, non_def_args=0): '''Assert that a callable object is trivially callable. - A trivially callable object is one that can be called without providing - any additional arguments to its call method. Member functions with a single - argument without a default value may be considered trivially callable, - since the ``self`` placeholder is automatically provided when invoked from - the instance. The number of allowed arguments without a default value for a - callable to be considered trivially callable may be controlled with the - ``non_def_args`` argument. + An object is trivially callable when it can be invoked by providing just + an expected number of non-default arguments to its call method. For + example, (non-static) member functions expect a single argument without a + default value, which will passed as ``cls`` or ``self`` during invocation + depending on whether the function is a classmethod or not, respectively. + On the other hand, member functions that are static methods are not passed + any values by default when invoked. Therefore, these functions can only be + trivially callable when their call method expects no arguments by default. + + :param fn: A callable to be tested if its trivially callable. + :param non_def_args: The number of non-default arguments the callable + ``fn`` expects when invoked. + :return: This function returns ``True`` if the expected number of + arguments matches the value of ``non_def_args``. Otherwise, it returns + ``False``. ''' if not callable(fn): diff --git a/reframe/utility/sanity.py b/reframe/utility/sanity.py index f697d97dbd..e20696edad 100644 --- a/reframe/utility/sanity.py +++ b/reframe/utility/sanity.py @@ -916,13 +916,19 @@ def evaluate(expr, cache=False): If ``expr`` is not a deferred expression, it will be returned as is. If ``expr`` is a deferred expression and ``cache`` is ``True``, the results of the deferred expression will be cached and subsequent calls - to ``evaluate`` on this deferred expression will simply return the - previously cached result. + to :func:`evaluate` on this deferred expression (when ``cache=False``) + will simply return the previously cached result. + + .. note:: + When the ``cache`` argument is passed as ``True``, a deferred + expression will always be evaluated and its results will be re-cached. + This may replace any other results that may have been cached in + previous evaluations. .. versionadded:: 2.21 - .. versionchanged:: 3.4 - Added support for evaluation caching. + .. versionchanged:: 3.8.0 + Add support for evaluation caching. ''' if isinstance(expr, _DeferredExpression): return expr.evaluate(cache=cache) From b7f926dda40f4d0439c5a6c0ab575f4b37541e50 Mon Sep 17 00:00:00 2001 From: Javier Otero <71280927+jjotero@users.noreply.github.com> Date: Mon, 23 Aug 2021 12:36:18 +0200 Subject: [PATCH 33/34] Update perf_variables docstring --- reframe/core/pipeline.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 4732f33ac2..80aaf0c64d 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -652,7 +652,7 @@ def pipeline_hooks(cls): #: In this context, a performance variable is a key-value pair, where the #: key is the desired variable name and the value is the deferred #: performance expression (i.e. the result of a :ref:`deferrable - #: performance `) that computes + #: performance function`) that computes #: or extracts the performance variable's value. #: #: By default, ReFrame will populate this field during the test's @@ -671,9 +671,8 @@ def pipeline_hooks(cls): #: examples. #: #: :type: A dictionary with keys of type :class:`str` and deferred - #: performance expressions (i.e., the result of a - #: :doc:`deferrable performance function - #: `) as values. + #: performance expressions as values (see + #: :ref:`deferrable-performance-functions`). #: :default: Collection of performance variables associated to each of #: the member functions decorated with the :func:`@performance_function #: ` From 06813e981899f53fcb1d96b26af1497151d3a3fc Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Mon, 23 Aug 2021 14:05:43 +0200 Subject: [PATCH 34/34] Docs fine tuning --- docs/deferrable_functions_reference.rst | 11 ++++++++--- docs/regression_test_api.rst | 2 +- reframe/core/pipeline.py | 9 ++++++--- reframe/utility/__init__.py | 6 +++--- reframe/utility/sanity.py | 6 +++++- 5 files changed, 23 insertions(+), 11 deletions(-) diff --git a/docs/deferrable_functions_reference.rst b/docs/deferrable_functions_reference.rst index 4a5d27cc53..1dab287888 100644 --- a/docs/deferrable_functions_reference.rst +++ b/docs/deferrable_functions_reference.rst @@ -10,6 +10,7 @@ The key characteristic of these functions is that they store their arguments whe ReFrame provides an ample set of deferrable utilities and it also allows users to write their own deferrable functions when needed. Please refer to ":doc:`deferrables`" for a hands-on explanation on how deferrable functions work and how to create custom deferrable functions. + Explicit evaluation of deferrable functions ------------------------------------------- @@ -18,7 +19,8 @@ These :func:`evaluate` functions take an optional :class:`bool` argument ``cache Hence, if caching is enabled on a given deferrable function, any subsequent calls to :func:`evaluate` will simply return the previously cached results. .. versionchanged:: 3.8.0 - Support cached evaluation. + Support of cached evaluation is added. + Implicit evaluation of deferrable functions ------------------------------------------- @@ -58,8 +60,12 @@ Currently ReFrame provides three broad categories of deferrable functions: .. _deferrable-performance-functions: + +-------------------------------- Deferrable performance functions -................................ +-------------------------------- + +.. versionadded:: 3.8.0 Deferrable performance functions are a special type of deferrable functions which are intended for measuring a given quantity. Therefore, this kind of deferrable functions have an associated unit that can be used to interpret the return values from these functions. @@ -67,7 +73,6 @@ The unit of a deferrable performance function can be accessed through the public Regular deferrable functions can be promoted to deferrable performance functions using the :func:`~reframe.utility.sanity.make_performance_function` utility. Also, this utility allows to create performance functions directly from any callable. -.. versionadded:: 3.8.0 List of deferrable functions and utilities ------------------------------------------ diff --git a/docs/regression_test_api.rst b/docs/regression_test_api.rst index 478d6cb5d3..2390e3954a 100644 --- a/docs/regression_test_api.rst +++ b/docs/regression_test_api.rst @@ -326,7 +326,7 @@ Built-in functions Decorate a member function as a performance function of the test. - This decorator converts the decorated method into a performance deferrable function (see :ref:`deferrable-performance-functions`) whose evaluation is deferred to the performance stage of the regression test. + This decorator converts the decorated method into a performance deferrable function (see ":ref:`deferrable-performance-functions`" for more details) whose evaluation is deferred to the performance stage of the regression test. The decorated function must take a single argument without a default value (i.e. ``self``) and any number of arguments with default values. A test may decorate multiple member functions as performance functions, where each of the decorated functions must be provided with the units of the performance quantitites to be extracted from the test. These performance units must be of type :class:`str`. diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 80aaf0c64d..e87659f4a3 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -657,9 +657,10 @@ def pipeline_hooks(cls): #: #: By default, ReFrame will populate this field during the test's #: instantiation with all the member functions decorated with the - #: :func:`performance_function` decorator. If no performance functions - #: are present in the class, no performance checking or reporting will be - #: carried out. + #: :func:`@performance_function + #: ` decorator. + #: If no performance functions are present in the class, no performance + #: checking or reporting will be carried out. #: #: This mapping may be extended or replaced by other performance variables #: that may be defined in any pipeline hook executing before the @@ -677,6 +678,8 @@ def pipeline_hooks(cls): #: the member functions decorated with the :func:`@performance_function #: ` #: decorator. + #: + #: .. versionadded:: 3.8.0 perf_variables = variable(typ.Dict[str, _DeferredPerformanceExpression], value={}) diff --git a/reframe/utility/__init__.py b/reframe/utility/__init__.py index a4b6fc1d09..44dadba318 100644 --- a/reframe/utility/__init__.py +++ b/reframe/utility/__init__.py @@ -305,7 +305,7 @@ def attrs(obj): def is_trivially_callable(fn, *, non_def_args=0): - '''Assert that a callable object is trivially callable. + '''Check that a callable object is trivially callable. An object is trivially callable when it can be invoked by providing just an expected number of non-default arguments to its call method. For @@ -319,9 +319,9 @@ def is_trivially_callable(fn, *, non_def_args=0): :param fn: A callable to be tested if its trivially callable. :param non_def_args: The number of non-default arguments the callable ``fn`` expects when invoked. - :return: This function returns ``True`` if the expected number of + :return: This function returns :obj:`True` if the expected number of arguments matches the value of ``non_def_args``. Otherwise, it returns - ``False``. + :obj:`False`. ''' if not callable(fn): diff --git a/reframe/utility/sanity.py b/reframe/utility/sanity.py index e20696edad..8cfd4607c1 100644 --- a/reframe/utility/sanity.py +++ b/reframe/utility/sanity.py @@ -919,6 +919,9 @@ def evaluate(expr, cache=False): to :func:`evaluate` on this deferred expression (when ``cache=False``) will simply return the previously cached result. + :param expr: The expression to be evaluated. + :param cache: Cache the result of this evaluation. + .. note:: When the ``cache`` argument is passed as ``True``, a deferred expression will always be evaluated and its results will be re-cached. @@ -928,8 +931,9 @@ def evaluate(expr, cache=False): .. versionadded:: 2.21 .. versionchanged:: 3.8.0 - Add support for evaluation caching. + The ``cache`` argument is added. ''' + if isinstance(expr, _DeferredExpression): return expr.evaluate(cache=cache) else: