From f666f3e11d8e633e8205553e1aa1ccebaddb7b57 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Mon, 22 Nov 2021 20:52:19 +0100 Subject: [PATCH 01/62] Rename `fullname` to `variant_name` --- reframe/core/fixtures.py | 2 +- reframe/core/meta.py | 4 ++-- reframe/core/pipeline.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/reframe/core/fixtures.py b/reframe/core/fixtures.py index 078e990850..e1f206e7d0 100644 --- a/reframe/core/fixtures.py +++ b/reframe/core/fixtures.py @@ -462,7 +462,7 @@ def scope(self): def get_name(self, variant_num=None): '''Utility to retrieve the full name of a given fixture variant.''' - return self.cls.fullname(variant_num) + return self.cls.variant_name(variant_num) @property def action(self): diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 87471dbdf7..95ef014a48 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -823,8 +823,8 @@ def is_abstract(cls): ''' return cls.num_variants == 0 - def fullname(cls, variant_num=None): - '''Return the full name of a test for a given test variant number. + def variant_name(cls, variant_num=None): + '''Return the name of a test variant for a given variant number. This function returns a unique name for each of the provided variant numbers. If no ``variant_num`` is provided, this function returns the diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index acd85fafb6..d83f6455f9 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -903,7 +903,7 @@ def __init_subclass__(cls, *, special=False, pin_prefix=False, @deferrable def __rfm_init__(self, *args, prefix=None, **kwargs): if not hasattr(self, 'name'): - self.name = type(self).fullname(self.variant_num) + self.name = type(self).variant_name(self.variant_num) # Add the parameters from the parameterized_test decorator. if args or kwargs: From 4fea3187b4f4b0b42f1fdba850144d2ee6784106 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Mon, 22 Nov 2021 21:04:11 +0100 Subject: [PATCH 02/62] Remove `get_name` --- reframe/core/fixtures.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/reframe/core/fixtures.py b/reframe/core/fixtures.py index e1f206e7d0..f7be071c08 100644 --- a/reframe/core/fixtures.py +++ b/reframe/core/fixtures.py @@ -152,7 +152,7 @@ def add(self, fixture, variant_num, parent_name, partitions, prog_envs): cls = fixture.cls scope = fixture.scope - fname = fixture.get_name(variant_num) + fname = fixture.cls.variant_name(variant_num) variables = fixture.variables reg_names = [] self._registry.setdefault(cls, dict()) @@ -460,10 +460,6 @@ def scope(self): '''The fixture scope.''' return self._scope - def get_name(self, variant_num=None): - '''Utility to retrieve the full name of a given fixture variant.''' - return self.cls.variant_name(variant_num) - @property def action(self): '''Action specified on this fixture.''' From f2a04024a5b62d5d36f349f3be09bb1f5064ec03 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Tue, 23 Nov 2021 17:47:54 +0100 Subject: [PATCH 03/62] Use `fixt` instead of `fix` for variables referring to fixtures --- reframe/core/meta.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 95ef014a48..766b720e56 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -783,12 +783,12 @@ class MyTest(rfm.RegressionTest): rdepth = kwargs.get('_current_depth', 0) if recurse and (max_depth is None or rdepth < max_depth): - for fix, variant in ret['fixtures'].items(): + for fixt, variant in ret['fixtures'].items(): if len(variant) > 1: continue - fcls = cls.fixture_space[fix].cls - ret['fixtures'][fix] = fcls.get_variant_info( + fcls = cls.fixture_space[fixt].cls + ret['fixtures'][fixt] = fcls.get_variant_info( variant[0], recurse=recurse, max_depth=max_depth, _current_depth=rdepth+1 ) From 4420cc3114eb66018cfc3f31d64d1a68c5538368 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Tue, 23 Nov 2021 22:45:55 +0100 Subject: [PATCH 04/62] Deprecate setting the name attribute --- reframe/core/fixtures.py | 5 ++-- reframe/core/meta.py | 8 +++--- reframe/core/pipeline.py | 31 ++++++++++++++++++++---- reframe/frontend/loader.py | 6 ++--- unittests/resources/checks/hellocheck.py | 1 - unittests/test_cli.py | 2 +- 6 files changed, 37 insertions(+), 16 deletions(-) diff --git a/reframe/core/fixtures.py b/reframe/core/fixtures.py index f7be071c08..899e55f4c2 100644 --- a/reframe/core/fixtures.py +++ b/reframe/core/fixtures.py @@ -282,7 +282,6 @@ def instantiate_all(self): # alongside the other variables specified during the fixture's # declaration. fixtvars = { - 'name': name, 'valid_prog_environs': penv, 'valid_systems': part, **variables @@ -290,8 +289,8 @@ def instantiate_all(self): try: # Instantiate the fixture - inst = cls(variant_num=varnum, variables=fixtvars, - is_fixture=True) + inst = cls(variant_num=varnum, fixt_name=name, + variables=fixtvars) except Exception: exc_info = sys.exc_info() getlogger().warning( diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 766b720e56..270d4a77d0 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -485,6 +485,7 @@ def __call__(cls, *args, **kwargs): # respective points in the parameter and fixture spaces. variant_num = kwargs.pop('variant_num', None) param_index, fixt_index = cls._map_variant_num(variant_num) + fixt_name = kwargs.pop('fixt_name', None) # Intercept variables to be set before initialization variables = kwargs.pop('variables', {}) @@ -507,7 +508,8 @@ def __call__(cls, *args, **kwargs): obj._rfm_fixt_variant = fixt_index # Flag the instance as fixture - if is_fixture: + if fixt_name: + obj._rfm_unique_name = fixt_name obj._rfm_is_fixture = True # Set the variables passed to the constructor @@ -841,7 +843,7 @@ def variant_name(cls, variant_num=None): if _use_compact_names(): if cls.num_variants > 1: - name += f'@{variant_num}' + name += f'%{variant_num}' else: pid, fid = cls._map_variant_num(variant_num) @@ -851,6 +853,6 @@ def variant_name(cls, variant_num=None): for v in cls.param_space[pid].values()) if len(cls.fixture_space) > 1: - name += f'@{fid}' + name += f'%{fid}' return name diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index d83f6455f9..694830be1f 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -220,7 +220,7 @@ def pipeline_hooks(cls): #: of every test parameter: ``TestClassName__``. #: Any non-alphanumeric value in a parameter's representation is #: converted to ``_``. - name = variable(typ.Str[r'[^\/]+']) + # name = variable(typ.Str[r'[^\/]+']) #: List of programming environments supported by this test. #: @@ -902,18 +902,18 @@ def __init_subclass__(cls, *, special=False, pin_prefix=False, @deferrable def __rfm_init__(self, *args, prefix=None, **kwargs): - if not hasattr(self, 'name'): - self.name = type(self).variant_name(self.variant_num) + if not self.is_fixture(): + self._rfm_unique_name = type(self).variant_name(self.variant_num) # Add the parameters from the parameterized_test decorator. if args or kwargs: arg_names = map(lambda x: util.toalphanum(str(x)), itertools.chain(args, kwargs.values())) - self.name += '_' + '_'.join(arg_names) + self._rfm_unique_name += '_' + '_'.join(arg_names) # Pass if descr is a required variable. if not hasattr(self, 'descr'): - self.descr = self.name + self.descr = self._rfm_unique_name self._perfvalues = {} @@ -1025,8 +1025,29 @@ def __getattr__(self, name): f'{type(self).__qualname__!r} object has no attribute {name!r}' ) + def __setattr__(self, name, value): + if name == 'name': + user_deprecation_warning( + 'setting the name of the test is deprecated; see for XXX for details' + ) + self._rfm_unique_name = value + else: + super().__setattr__(name, value) + # Export read-only views to interesting fields + @property + def unique_name(self): + '''Return the unique name of this test. + + .. versionadded:: 3.10.0 + ''' + return self._rfm_unique_name + + @property + def name(self): + return self._rfm_unique_name + @property def current_environ(self): '''The programming environment that the regression test is currently diff --git a/reframe/frontend/loader.py b/reframe/frontend/loader.py index 3fb94f2ea4..97d46907d8 100644 --- a/reframe/frontend/loader.py +++ b/reframe/frontend/loader.py @@ -215,13 +215,13 @@ def load_from_module(self, module): testfile = module.__file__ try: - conflicted = self._loaded[c.name] + conflicted = self._loaded[c.unique_name] except KeyError: - self._loaded[c.name] = testfile + self._loaded[c.unique_name] = testfile tests.append(c) else: raise NameConflictError( - f'test {c.name!r} from {testfile!r} ' + f'test {c.unique_name!r} from {testfile!r} ' f'is already defined in {conflicted!r}' ) diff --git a/unittests/resources/checks/hellocheck.py b/unittests/resources/checks/hellocheck.py index 187238c448..3ecab4086f 100644 --- a/unittests/resources/checks/hellocheck.py +++ b/unittests/resources/checks/hellocheck.py @@ -10,7 +10,6 @@ @rfm.simple_test class HelloTest(rfm.RegressionTest): def __init__(self): - self.name = 'hellocheck' self.descr = 'C Hello World test' # All available systems are supported diff --git a/unittests/test_cli.py b/unittests/test_cli.py index 0f5a6015bc..71077d9e93 100644 --- a/unittests/test_cli.py +++ b/unittests/test_cli.py @@ -556,7 +556,7 @@ def test_filtering_multiple_criteria(run_reframe): returncode, stdout, stderr = run_reframe( checkpath=['unittests/resources/checks'], action='list', - more_options=['-t', 'foo', '-n', 'hellocheck'] + more_options=['-t', 'foo', '-n', 'HelloTest'] ) assert 'Traceback' not in stdout assert 'Traceback' not in stderr From 67b250ae027111315485d96f35e1143281e9192f Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 24 Nov 2021 10:33:38 +0100 Subject: [PATCH 05/62] WIP: Unique and display names --- reframe/core/pipeline.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 694830be1f..7d037490be 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1038,15 +1038,31 @@ def __setattr__(self, name, value): @property def unique_name(self): - '''Return the unique name of this test. + '''The unique name of this test. .. versionadded:: 3.10.0 ''' return self._rfm_unique_name + @property + def display_name(self): + '''A human-readable version of the name this test. + + This name contains a string representation of the various parameters + of this specific test variant. + + .. versionadded;: 3.10.0 + + .. note:: + The display name may not be unique. + + ''' + return self.unique_name + @property def name(self): - return self._rfm_unique_name + # For backward compatibility + return self.unique_name @property def current_environ(self): From 88571ab4635d2081b4a11715ff79449d6621b144 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 24 Nov 2021 23:40:05 +0100 Subject: [PATCH 06/62] WIP: Add support for display names --- reframe/core/pipeline.py | 22 +++++++++++++++++++++- reframe/frontend/cli.py | 2 +- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 7d037490be..719aa72d51 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1057,7 +1057,27 @@ def display_name(self): The display name may not be unique. ''' - return self.unique_name + def _format_params(info, prefix=' %'): + name = '' + for p, v in info['params'].items(): + name += f'{prefix}{p}={v}' + + for f, v in info['fixtures'].items(): + if isinstance(v, tuple): + # This is join fixture + continue + + name += _format_params(v, f'{prefix}{f}.') + + return name + + if hasattr(self, '_rfm_display_name'): + return self._rfm_display_name + + cls = type(self) + variant_info = cls.get_variant_info(self.variant_num, recurse=True) + self._rfm_display_name = cls.__name__ + _format_params(variant_info) + return self._rfm_display_name @property def name(self): diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index 4a9fd734f2..9b75c91d94 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -57,7 +57,7 @@ def fmt_deps(): location = inspect.getfile(type(check)) if not detailed: - return f'- {check.name} (found in {location!r})' + return f'- {check.display_name} (found in {location!r})' if check.num_tasks > 0: node_alloc_scheme = (f'standard ({check.num_tasks} task(s) -- ' From 3b29ad15f6fee7d9e56e756ac08c34966d9566e5 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sun, 28 Nov 2021 22:29:30 +0100 Subject: [PATCH 07/62] WIP: Custom parameter formatting --- hpctestlib/sciapps/amber/nve.py | 2 +- reframe/core/meta.py | 3 +- reframe/core/parameters.py | 60 ++++++++++++++++++++++++++------- reframe/core/pipeline.py | 17 ++++++++-- reframe/frontend/cli.py | 2 +- reframe/utility/__init__.py | 9 +++-- 6 files changed, 72 insertions(+), 21 deletions(-) diff --git a/hpctestlib/sciapps/amber/nve.py b/hpctestlib/sciapps/amber/nve.py index 1cac8fd983..b3081013a1 100644 --- a/hpctestlib/sciapps/amber/nve.py +++ b/hpctestlib/sciapps/amber/nve.py @@ -86,7 +86,7 @@ class amber_nve_check(rfm.RunOnlyRegressionTest, pin_prefix=True): ('FactorIX_production_NVE', -234188.0, 1.0E-04), ('JAC_production_NVE_4fs', -44810.0, 1.0E-03), ('JAC_production_NVE', -58138.0, 5.0E-04) - ]) + ], fmt=lambda x: x[0]) # Parameter encoding the variant of the test. # diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 270d4a77d0..36f0f8e745 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -843,7 +843,8 @@ def variant_name(cls, variant_num=None): if _use_compact_names(): if cls.num_variants > 1: - name += f'%{variant_num}' + width = utils.count_digits(cls.num_variants) + name += f'_{variant_num:0{width}}' else: pid, fid = cls._map_variant_num(variant_num) diff --git a/reframe/core/parameters.py b/reframe/core/parameters.py index 4f089b91bb..6f02248901 100644 --- a/reframe/core/parameters.py +++ b/reframe/core/parameters.py @@ -15,6 +15,25 @@ from reframe.core.exceptions import ReframeSyntaxError +class _ParamMetadata: + '''Store metadata for parameters. + + Currently this structure holds only the custom display function, but in + the future could contain other metadata such as type information or source + code information. + + ''' + + __slots__ = ('__fmt_fn',) + + def __init__(self, fmt_fn): + self.__fmt_fn = fmt_fn + + @property + def format(self): + return self.__fmt_fn + + class TestParam: '''Regression test paramter class. @@ -28,7 +47,7 @@ class TestParam: ''' def __init__(self, values=None, - inherit_params=False, filter_params=None): + inherit_params=False, filter_params=None, fmt=None): if values is None: values = [] @@ -46,19 +65,26 @@ def filter_params(x): self.values = tuple(values) - # Validate the filter_param argument - try: - valid = utils.is_trivially_callable(filter_params, non_def_args=1) - except TypeError: - raise TypeError( - 'the provided parameter filter is not a callable' - ) from None - else: - if not valid: - raise TypeError('filter function must take a single argument') + # Validate and set the filter_params function + if (not callable(filter_params) or + not utils.is_trivially_callable(filter_params, non_def_args=1)): + raise TypeError("'filter_params' argument must be a callable " + "accepting a single argument") self.filter_params = filter_params + # Validate and set the alternative function + if fmt is None: + def fmt(x): + return x + + if (not callable(fmt) or + not utils.is_trivially_callable(fmt, non_def_args=1)): + raise TypeError("'fmt' argument must be a callable " + "accepting a single argument") + + self.metadata = _ParamMetadata(fmt) + class ParamSpace(namespaces.Namespace): '''Regression test parameter space @@ -109,7 +135,6 @@ def join(self, other, cls): if (key in self.params and self.params[key] != () and other.params[key] != ()): - raise ReframeSyntaxError( f'parameter space conflict: ' f'parameter {key!r} is defined in more than ' @@ -119,6 +144,7 @@ def join(self, other, cls): self.params[key] = ( other.params.get(key, ()) + self.params.get(key, ()) ) + self.params_meta[key] = other.params_meta[key] def extend(self, cls): '''Extend the parameter space with the local parameter space.''' @@ -138,6 +164,8 @@ def extend(self, cls): f"(parameter {name!r})" ) from None + self.params_meta[name] = p.metadata + # Clear the local param space local_param_space.clear() @@ -198,6 +226,14 @@ def __iter__(self): def params(self): return self._namespace + @property + def params_meta(self): + '''Metadata of parameters''' + if not hasattr(self, '_params_meta'): + self._params_meta = {} + + return self._params_meta + def __len__(self): '''Returns the number of all possible parameter combinations. diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 719aa72d51..1754b3bf2e 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -910,10 +910,11 @@ def __rfm_init__(self, *args, prefix=None, **kwargs): arg_names = map(lambda x: util.toalphanum(str(x)), itertools.chain(args, kwargs.values())) self._rfm_unique_name += '_' + '_'.join(arg_names) + self._rfm_old_style_params = True # Pass if descr is a required variable. if not hasattr(self, 'descr'): - self.descr = self._rfm_unique_name + self.descr = self.display_name self._perfvalues = {} @@ -1057,10 +1058,18 @@ def display_name(self): The display name may not be unique. ''' + cls = type(self) + def _format_params(info, prefix=' %'): name = '' for p, v in info['params'].items(): - name += f'{prefix}{p}={v}' + try: + format_fn = cls.param_space.params_meta[p].format + except KeyError: + print(cls.fixture_space) + def format_fn(x): return x + + name += f'{prefix}{p}={format_fn(v)}' for f, v in info['fixtures'].items(): if isinstance(v, tuple): @@ -1071,10 +1080,12 @@ def _format_params(info, prefix=' %'): return name + if hasattr(self, '_rfm_old_style_params'): + return self.unique_name + if hasattr(self, '_rfm_display_name'): return self._rfm_display_name - cls = type(self) variant_info = cls.get_variant_info(self.variant_num, recurse=True) self._rfm_display_name = cls.__name__ + _format_params(variant_info) return self._rfm_display_name diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index 9b75c91d94..d4247c0d6e 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -57,7 +57,7 @@ def fmt_deps(): location = inspect.getfile(type(check)) if not detailed: - return f'- {check.display_name} (found in {location!r})' + return f'- {check.unique_name} [{check.display_name}]' if check.num_tasks > 0: node_alloc_scheme = (f'standard ({check.num_tasks} task(s) -- ' diff --git a/reframe/utility/__init__.py b/reframe/utility/__init__.py index 577a0b364f..d81937ec9b 100644 --- a/reframe/utility/__init__.py +++ b/reframe/utility/__init__.py @@ -783,8 +783,11 @@ def _parse_node(nodename): return basename, width, nodeid -def _count_digits(n): - '''Count digits of a decimal number.''' +def count_digits(n): + '''Count the digits of a decimal number. + + :meta private: + ''' num_digits = 1 while n > 10: @@ -841,7 +844,7 @@ def __str__(self): abbrev.append(f'{self.name}{s_start}') else: last = start + delta*(size-1) - digits_last = _count_digits(last) + digits_last = count_digits(last) pad = self.width - digits_last nd_range = self.name if pad > 0: From e50d2420daf314350886078dcae7fe0127f98bbf Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Mon, 29 Nov 2021 23:09:40 +0100 Subject: [PATCH 08/62] Fix bug in get_variant_info() --- reframe/core/meta.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 36f0f8e745..e53d384b0f 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -778,8 +778,8 @@ class MyTest(rfm.RegressionTest): pid, fid = cls._map_variant_num(variant_num) ret = dict() - ret['params'] = cls.param_space[pid] - ret['fixtures'] = cls.fixture_space[fid] + ret['params'] = cls.param_space[pid] if pid is not None else {} + ret['fixtures'] = cls.fixture_space[fid] if fid is not None else {} # Get current recursion level rdepth = kwargs.get('_current_depth', 0) From 86796b63390e8c551ba662f6cf787b17d9e8ba83 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Mon, 29 Nov 2021 23:23:24 +0100 Subject: [PATCH 09/62] Remove stale print --- reframe/core/pipeline.py | 1 - 1 file changed, 1 deletion(-) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 1754b3bf2e..d0257982a0 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1066,7 +1066,6 @@ def _format_params(info, prefix=' %'): try: format_fn = cls.param_space.params_meta[p].format except KeyError: - print(cls.fixture_space) def format_fn(x): return x name += f'{prefix}{p}={format_fn(v)}' From 7253e5d85d565dc27dbfa34addefb2aecfbcff83 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 1 Dec 2021 18:59:44 +0100 Subject: [PATCH 10/62] Refactor parameter space to hold parameter objects --- reframe/core/parameters.py | 113 ++++++++++++++++------------------- reframe/core/pipeline.py | 2 +- unittests/test_parameters.py | 27 +++++---- 3 files changed, 68 insertions(+), 74 deletions(-) diff --git a/reframe/core/parameters.py b/reframe/core/parameters.py index 6f02248901..fd5acf38f8 100644 --- a/reframe/core/parameters.py +++ b/reframe/core/parameters.py @@ -15,25 +15,6 @@ from reframe.core.exceptions import ReframeSyntaxError -class _ParamMetadata: - '''Store metadata for parameters. - - Currently this structure holds only the custom display function, but in - the future could contain other metadata such as type information or source - code information. - - ''' - - __slots__ = ('__fmt_fn',) - - def __init__(self, fmt_fn): - self.__fmt_fn = fmt_fn - - @property - def format(self): - return self.__fmt_fn - - class TestParam: '''Regression test paramter class. @@ -51,15 +32,14 @@ def __init__(self, values=None, if values is None: values = [] - # By default, filter out all the parameter values defined in the - # base classes. if not inherit_params: + # By default, filter out all the parameter values defined in the + # base classes. def filter_params(x): return () - - # If inherit_params==True, inherit all the parameter values from the - # base classes as default behaviour. elif filter_params is None: + # If inherit_params==True, inherit all the parameter values from + # the base classes as default behaviour. def filter_params(x): return x @@ -83,7 +63,33 @@ def fmt(x): raise TypeError("'fmt' argument must be a callable " "accepting a single argument") - self.metadata = _ParamMetadata(fmt) + self.__fmt_fn = fmt + + @property + def format(self): + return self.__fmt_fn + + def update(self, other): + '''Update this parameter from another one. + + The values from the other parameter will be filtered according to the + filter function of this one and prepended to this parameter's values. + ''' + + try: + filt_vals = self.filter_params(other.values) + except Exception: + raise + else: + try: + self.values = tuple(filt_vals) + self.values + except TypeError: + raise ReframeSyntaxError( + f"'filter_param' must return an iterable" + ) from None + + def is_abstract(self): + return len(self.values) == 0 class ParamSpace(namespaces.Namespace): @@ -109,7 +115,7 @@ def __init__(self, target_cls=None, illegal_names=None): # Store all param combinations to allow random access. self.__param_combinations = tuple( itertools.product( - *(copy.deepcopy(p) for p in self.params.values()) + *(copy.deepcopy(p.values) for p in self.params.values()) ) ) @@ -128,43 +134,30 @@ def join(self, other, cls): :param other: instance of the ParamSpace class. :param cls: the target class. ''' - for key in other.params: + for name in other.params: # With multiple inheritance, a single parameter # could be doubly defined and lead to repeated # values - if (key in self.params and - self.params[key] != () and - other.params[key] != ()): + if self.defines(name) and other.defines(name): raise ReframeSyntaxError( f'parameter space conflict: ' - f'parameter {key!r} is defined in more than ' + f'parameter {name!r} is defined in more than ' f'one base class of class {cls.__qualname__!r}' ) - self.params[key] = ( - other.params.get(key, ()) + self.params.get(key, ()) - ) - self.params_meta[key] = other.params_meta[key] + if not self.defines(name): + # If we do not define the parameter, take it from other + self.params[name] = other.params[name] def extend(self, cls): '''Extend the parameter space with the local parameter space.''' local_param_space = getattr(cls, self.local_namespace_name, dict()) for name, p in local_param_space.items(): - try: - filt_vals = p.filter_params(self.params.get(name, ())) - except Exception: - raise - else: - try: - self.params[name] = (tuple(filt_vals) + p.values) - except TypeError: - raise ReframeSyntaxError( - f"'filter_param' must return an iterable " - f"(parameter {name!r})" - ) from None + if name in self.params: + p.update(self.params[name]) - self.params_meta[name] = p.metadata + self.params[name] = p # Clear the local param space local_param_space.clear() @@ -212,6 +205,18 @@ def inject(self, obj, cls=None, params_index=None): for key in self.params: setattr(obj, key, None) + @property + def params(self): + return self._namespace + + def defines(self, name): + '''Return True if parameter is defined. + + A parameter is defined if it exists in the namespace and it is not + abstract. + ''' + return name in self.params and not self.params[name].is_abstract() + def __iter__(self): '''Create a generator object to iterate over the parameter space @@ -222,18 +227,6 @@ def __iter__(self): ''' yield from self.__param_combinations - @property - def params(self): - return self._namespace - - @property - def params_meta(self): - '''Metadata of parameters''' - if not hasattr(self, '_params_meta'): - self._params_meta = {} - - return self._params_meta - def __len__(self): '''Returns the number of all possible parameter combinations. diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index d0257982a0..f8953b7081 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1064,7 +1064,7 @@ def _format_params(info, prefix=' %'): name = '' for p, v in info['params'].items(): try: - format_fn = cls.param_space.params_meta[p].format + format_fn = cls.param_space[p].format except KeyError: def format_fn(x): return x diff --git a/unittests/test_parameters.py b/unittests/test_parameters.py index f5c5452269..b7adc41ad1 100644 --- a/unittests/test_parameters.py +++ b/unittests/test_parameters.py @@ -42,32 +42,33 @@ def test_params_are_present(): class MyTest(TwoParams): pass - assert MyTest.param_space['P0'] == ('a',) - assert MyTest.param_space['P1'] == ('b',) + assert MyTest.param_space['P0'].values == ('a',) + assert MyTest.param_space['P1'].values == ('b',) def test_abstract_param(): class MyTest(Abstract): pass - assert MyTest.param_space['P0'] == () - assert MyTest.param_space['P1'] == ('b',) + print(MyTest.param_space) + assert MyTest.param_space['P0'].values == () + assert MyTest.param_space['P1'].values == ('b',) def test_param_override(): class MyTest(TwoParams): P1 = parameter(['-']) - assert MyTest.param_space['P0'] == ('a',) - assert MyTest.param_space['P1'] == ('-',) + assert MyTest.param_space['P0'].values == ('a',) + assert MyTest.param_space['P1'].values == ('-',) def test_param_inheritance(): class MyTest(TwoParams): P1 = parameter(['c'], inherit_params=True) - assert MyTest.param_space['P0'] == ('a',) - assert MyTest.param_space['P1'] == ('b', 'c',) + assert MyTest.param_space['P0'].values == ('a',) + assert MyTest.param_space['P1'].values == ('b', 'c',) def test_filter_params(): @@ -77,9 +78,9 @@ class MyTest(ExtendParams): P1 = parameter(inherit_params=True, filter_params=lambda x: list(x[2:])) - assert MyTest.param_space['P0'] == ('a',) - assert MyTest.param_space['P1'] == ('d', 'e',) - assert MyTest.param_space['P2'] == ('f', 'g',) + assert MyTest.param_space['P0'].values == ('a',) + assert MyTest.param_space['P1'].values == ('d', 'e',) + assert MyTest.param_space['P2'].values == ('f', 'g',) def test_wrong_filter(): @@ -312,7 +313,7 @@ class Foo(rfm.RegressionTest): p = 4 p = parameter([1, 2]) - assert Foo.p == (1, 2,) + assert Foo.p.values == (1, 2,) def test_override_parameter(): @@ -353,7 +354,7 @@ def test_class_attr_access(): class MyTest(rfm.RegressionTest): p = parameter([1, 2, 3]) - assert MyTest.p == (1, 2, 3,) + assert MyTest.p.values == (1, 2, 3,) with pytest.raises(ReframeSyntaxError, match='cannot override parameter'): MyTest.p = (4, 5,) From 64fc60d5802cfa772952a437613ef283a0c119b8 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Thu, 2 Dec 2021 00:18:38 +0100 Subject: [PATCH 11/62] Support custom formatting of parameterized fixture values --- reframe/core/fixtures.py | 4 ++-- reframe/core/meta.py | 14 ++++++++++---- reframe/core/parameters.py | 20 +++++++++++++------- reframe/core/pipeline.py | 20 +++++++++----------- unittests/test_parameters.py | 22 +++++++++++----------- 5 files changed, 45 insertions(+), 35 deletions(-) diff --git a/reframe/core/fixtures.py b/reframe/core/fixtures.py index 899e55f4c2..6119a1036f 100644 --- a/reframe/core/fixtures.py +++ b/reframe/core/fixtures.py @@ -508,7 +508,7 @@ def variables(self): class FixtureSpace(namespaces.Namespace): - ''' Regression test fixture space. + '''Regression test fixture space. The fixture space is first built by joining the available fixture spaces in the base classes, and later extended by the locally defined fixtures @@ -681,7 +681,7 @@ def __getitem__(self, key): underlying fixture object with that name. ''' if isinstance(key, int): - ret = dict() + ret = {} f_ids = self.__variant_combinations[key] for i, f in enumerate(self.fixtures): ret[f] = f_ids[i] diff --git a/reframe/core/meta.py b/reframe/core/meta.py index e53d384b0f..759532026b 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -774,16 +774,17 @@ class MyTest(rfm.RegressionTest): # 'f1': 0, # } # } + ''' pid, fid = cls._map_variant_num(variant_num) - ret = dict() - ret['params'] = cls.param_space[pid] if pid is not None else {} - ret['fixtures'] = cls.fixture_space[fid] if fid is not None else {} + ret = { + 'params': cls.param_space[pid] if pid is not None else {}, + 'fixtures': cls.fixture_space[fid] if fid is not None else {} + } # Get current recursion level rdepth = kwargs.get('_current_depth', 0) - if recurse and (max_depth is None or rdepth < max_depth): for fixt, variant in ret['fixtures'].items(): if len(variant) > 1: @@ -797,6 +798,11 @@ class MyTest(rfm.RegressionTest): return ret + @property + def raw_params(cls): + '''Expose the raw parameters.''' + return cls.param_space.params + @property def param_space(cls): '''Expose the parameter space.''' diff --git a/reframe/core/parameters.py b/reframe/core/parameters.py index fd5acf38f8..72522362e9 100644 --- a/reframe/core/parameters.py +++ b/reframe/core/parameters.py @@ -249,22 +249,28 @@ def __len__(self): def __getitem__(self, key): '''Access an element in the parameter space. - If the key is an integer, this function will retrieve a given point in - the parameter space. If the key is a parameter name, it will instead - return all the values assigned to that parameter. + If the key is an integer, this will be interpreted as a point in the + parameter space and this function will return a mapping of the + parameter names and their corresponding values. If the key is a + parameter name, it will instead return all the values assigned to that + parameter. If the key is an integer, this function will raise an :class:`IndexError` if the key is out of bounds. + ''' if isinstance(key, int): - ret = dict() + ret = {} val = self.__param_combinations[key] - for i, key in enumerate(self.params): - ret[key] = val[i] + for i, name in enumerate(self.params): + ret[name] = val[i] return ret - return self.params.get(key, ()) + try: + return self.params[key].values + except KeyError: + return () def is_empty(self): return self.params == {} diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index f8953b7081..7742c67e94 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1029,7 +1029,8 @@ def __getattr__(self, name): def __setattr__(self, name, value): if name == 'name': user_deprecation_warning( - 'setting the name of the test is deprecated; see for XXX for details' + 'setting the name of the test is deprecated; see for XXX for details', + from_version='3.10.0' ) self._rfm_unique_name = value else: @@ -1058,16 +1059,10 @@ def display_name(self): The display name may not be unique. ''' - cls = type(self) - - def _format_params(info, prefix=' %'): + def _format_params(cls, info, prefix=' %'): name = '' for p, v in info['params'].items(): - try: - format_fn = cls.param_space[p].format - except KeyError: - def format_fn(x): return x - + format_fn = cls.raw_params[p].format name += f'{prefix}{p}={format_fn(v)}' for f, v in info['fixtures'].items(): @@ -1075,7 +1070,8 @@ def format_fn(x): return x # This is join fixture continue - name += _format_params(v, f'{prefix}{f}.') + fixt_cls = cls.fixture_space[f].cls + name += _format_params(fixt_cls, v, f'{prefix}{f}.') return name @@ -1085,8 +1081,10 @@ def format_fn(x): return x if hasattr(self, '_rfm_display_name'): return self._rfm_display_name + cls = type(self) + basename = cls.__name__ variant_info = cls.get_variant_info(self.variant_num, recurse=True) - self._rfm_display_name = cls.__name__ + _format_params(variant_info) + self._rfm_display_name = basename + _format_params(cls, variant_info) return self._rfm_display_name @property diff --git a/unittests/test_parameters.py b/unittests/test_parameters.py index b7adc41ad1..e33585f373 100644 --- a/unittests/test_parameters.py +++ b/unittests/test_parameters.py @@ -42,8 +42,8 @@ def test_params_are_present(): class MyTest(TwoParams): pass - assert MyTest.param_space['P0'].values == ('a',) - assert MyTest.param_space['P1'].values == ('b',) + assert MyTest.param_space['P0'] == ('a',) + assert MyTest.param_space['P1'] == ('b',) def test_abstract_param(): @@ -51,24 +51,24 @@ class MyTest(Abstract): pass print(MyTest.param_space) - assert MyTest.param_space['P0'].values == () - assert MyTest.param_space['P1'].values == ('b',) + assert MyTest.param_space['P0'] == () + assert MyTest.param_space['P1'] == ('b',) def test_param_override(): class MyTest(TwoParams): P1 = parameter(['-']) - assert MyTest.param_space['P0'].values == ('a',) - assert MyTest.param_space['P1'].values == ('-',) + assert MyTest.param_space['P0'] == ('a',) + assert MyTest.param_space['P1'] == ('-',) def test_param_inheritance(): class MyTest(TwoParams): P1 = parameter(['c'], inherit_params=True) - assert MyTest.param_space['P0'].values == ('a',) - assert MyTest.param_space['P1'].values == ('b', 'c',) + assert MyTest.param_space['P0'] == ('a',) + assert MyTest.param_space['P1'] == ('b', 'c',) def test_filter_params(): @@ -78,9 +78,9 @@ class MyTest(ExtendParams): P1 = parameter(inherit_params=True, filter_params=lambda x: list(x[2:])) - assert MyTest.param_space['P0'].values == ('a',) - assert MyTest.param_space['P1'].values == ('d', 'e',) - assert MyTest.param_space['P2'].values == ('f', 'g',) + assert MyTest.param_space['P0'] == ('a',) + assert MyTest.param_space['P1'] == ('d', 'e',) + assert MyTest.param_space['P2'] == ('f', 'g',) def test_wrong_filter(): From 888b0ba4aee2c84b392c9e3ba0cc074c304910bb Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Thu, 2 Dec 2021 23:49:53 +0100 Subject: [PATCH 12/62] Print full dependency trees --- reframe/frontend/cli.py | 38 +++++++++++++++++++++++++++++++++++--- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index d4247c0d6e..7117f8e473 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -117,8 +117,7 @@ def list_checks(testcases, printer, detailed=False): deps[t.check.name].append((t, t.deps)) checks = set( - t.check for t in testcases - if detailed or not t.check.is_fixture() + t.check for t in testcases if not t.check.is_fixture() or detailed ) printer.info( '\n'.join(format_check(c, deps[c.name], detailed) for c in checks) @@ -126,6 +125,39 @@ def list_checks(testcases, printer, detailed=False): printer.info(f'Found {len(checks)} check(s)\n') +def list_checks2(testcases, printer, detailed=False): + printer.info('[List of matched checks]') + + def dep_lines(u, *, prefix, depth=0, lines=None, printed=None): + if lines is None: + lines = [] + + if printed is None: + printed = set() + + adj = u.deps + for v in adj: + if v.check.name not in printed: + dep_lines(v, prefix=prefix + 2*' ', depth=depth+1, + lines=lines, printed=printed) + + printed.add(v.check.name) + + if depth: + lines.append( + f'{prefix}^{u.check.display_name} [{u.check.unique_name}]' + ) + + return lines + + # We need the leaf test cases to be printed at the leftmost + testcases = list(t for t in testcases if t.in_degree == 0) + for t in testcases: + printer.info(f'- {t.check.display_name} [{t.check.unique_name}]') + for l in reversed(dep_lines(t, prefix=' ')): + printer.info(l) + + def list_tags(testcases, printer): printer.info('[List of unique tags]') tags = set() @@ -930,7 +962,7 @@ def _case_failed(t): # Act on checks if options.list or options.list_detailed: - list_checks(testcases, printer, options.list_detailed) + list_checks2(testcases, printer, options.list_detailed) sys.exit(0) if options.list_tags: From ea2b0e7cf4078f2742d668affb0493dd8ba39a8c Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Fri, 3 Dec 2021 22:22:27 +0100 Subject: [PATCH 13/62] Adapt naming scheme of fixtures --- reframe/core/fixtures.py | 75 +++++++++++++++++++++++--------------- reframe/core/meta.py | 20 +++++----- reframe/core/pipeline.py | 16 +++++++- reframe/frontend/cli.py | 7 +++- unittests/test_fixtures.py | 5 ++- unittests/test_meta.py | 6 +-- 6 files changed, 82 insertions(+), 47 deletions(-) diff --git a/reframe/core/fixtures.py b/reframe/core/fixtures.py index 6119a1036f..5e622e62d2 100644 --- a/reframe/core/fixtures.py +++ b/reframe/core/fixtures.py @@ -33,28 +33,46 @@ class FixtureData: This data is required to instantiate the fixture. ''' - def __init__(self, variant_num, envs, parts, variables, scope): - self.data = (variant_num, envs, parts, variables, scope,) + __slots__ = ('__data',) + + def __init__(self, variant, envs, parts, variables, scope, scope_enc): + self.__data = (variant, envs, parts, variables, scope, scope_enc) + + @property + def data(self): + return self.__data @property def variant_num(self): - return self.data[0] + return self.__data[0] @property def environments(self): - return self.data[1] + return self.__data[1] @property def partitions(self): - return self.data[2] + return self.__data[2] @property def variables(self): - return self.data[3] + return self.__data[3] @property def scope(self): - return self.data[4] + return self.__data[4] + + @property + def scope_enc(self): + return self.__data[5] + + def mashup(self): + s = f'{self.variant_num}/{self.scope_enc}' + if self.variables: + s += '/' + '&'.join(f'{k}={self.variables[k]}' + for k in sorted(self.variables)) + + return sha256(s.encode('utf-8')).hexdigest()[:8] class FixtureRegistry: @@ -164,7 +182,7 @@ def add(self, fixture, variant_num, parent_name, partitions, prog_envs): in sorted(variables.items())) ) if self._hash: - vname = '%' + sha256(vname.encode('utf-8')).hexdigest()[:8] + vname = '_' + sha256(vname.encode('utf-8')).hexdigest()[:8] fname += vname @@ -178,8 +196,6 @@ def add(self, fixture, variant_num, parent_name, partitions, prog_envs): # Register the fixture if scope == 'session': # The name is mangled with the system name - name = f'{fname}~{self._sys_name}' - # Select a valid environment supported by a partition for part in valid_partitions: valid_envs = self._filter_valid_environs(part, prog_envs) @@ -189,14 +205,14 @@ def add(self, fixture, variant_num, parent_name, partitions, prog_envs): return [] # Register the fixture - self._registry[cls][name] = FixtureData( - variant_num, [valid_envs[0]], [part], variables, scope - ) + fixt_data = FixtureData(variant_num, [valid_envs[0]], [part], + variables, scope, self._sys_name) + name = f'{cls.__name__}_{fixt_data.mashup()}' + self._registry[cls][name] = fixt_data reg_names.append(name) elif scope == 'partition': for part in valid_partitions: # The mangled name contains the full partition name - name = f'{fname}~{part}' # Select an environment supported by the partition valid_envs = self._filter_valid_environs(part, prog_envs) @@ -204,31 +220,30 @@ def add(self, fixture, variant_num, parent_name, partitions, prog_envs): continue # Register the fixture - self._registry[cls][name] = FixtureData( - variant_num, [valid_envs[0]], [part], variables, scope - ) + fixt_data = FixtureData(variant_num, [valid_envs[0]], [part], + variables, scope, part) + name = f'{cls.__name__}_{fixt_data.mashup()}' + self._registry[cls][name] = fixt_data reg_names.append(name) elif scope == 'environment': for part in valid_partitions: for env in self._filter_valid_environs(part, prog_envs): # The mangled name contains the full part and env names - ext = f'{part}+{env}' - name = f'{fname}~{ext}' - # Register the fixture - self._registry[cls][name] = FixtureData( - variant_num, [env], [part], variables, scope - ) + fixt_data = FixtureData(variant_num, [env], [part], + variables, scope, f'{part}+{env}') + name = f'{cls.__name__}_{fixt_data.mashup()}' + self._registry[cls][name] = fixt_data reg_names.append(name) elif scope == 'test': # The mangled name contains the parent test name. - name = f'{fname}~{parent_name}' # Register the fixture - self._registry[cls][name] = FixtureData( - variant_num, list(prog_envs), list(valid_partitions), - variables, scope - ) + fixt_data = FixtureData(variant_num, list(prog_envs), + list(valid_partitions), + variables, scope, parent_name) + name = f'{cls.__name__}_{fixt_data.mashup()}' + self._registry[cls][name] = fixt_data reg_names.append(name) return reg_names @@ -276,7 +291,7 @@ def instantiate_all(self): ret = [] for cls, variants in self._registry.items(): for name, args in variants.items(): - varnum, penv, part, variables, _ = args.data + varnum, penv, part, variables, *_ = args.data # Set the fixture name and stolen env and part from the parent, # alongside the other variables specified during the fixture's @@ -290,7 +305,7 @@ def instantiate_all(self): try: # Instantiate the fixture inst = cls(variant_num=varnum, fixt_name=name, - variables=fixtvars) + fixt_data=args, fixt_vars=fixtvars) except Exception: exc_info = sys.exc_info() getlogger().warning( diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 759532026b..879139aec0 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -486,14 +486,15 @@ def __call__(cls, *args, **kwargs): variant_num = kwargs.pop('variant_num', None) param_index, fixt_index = cls._map_variant_num(variant_num) fixt_name = kwargs.pop('fixt_name', None) + fixt_data = kwargs.pop('fixt_data', None) # Intercept variables to be set before initialization - variables = kwargs.pop('variables', {}) - if not isinstance(variables, collections.abc.Mapping): - raise TypeError("'variables' argument must be a mapping") + fixt_vars = kwargs.pop('fixt_vars', {}) + if not isinstance(fixt_vars, collections.abc.Mapping): + raise TypeError("'fixt_vars' argument must be a mapping") # Intercept is_fixture argument to flag an instance as a fixture - is_fixture = kwargs.pop('is_fixture', False) + # is_fixture = kwargs.pop('is_fixture', False) obj = cls.__new__(cls, *args, **kwargs) @@ -510,10 +511,11 @@ def __call__(cls, *args, **kwargs): # Flag the instance as fixture if fixt_name: obj._rfm_unique_name = fixt_name + obj._rfm_fixt_data = fixt_data obj._rfm_is_fixture = True # Set the variables passed to the constructor - for k, v in variables.items(): + for k, v in fixt_vars.items(): if k in cls.var_space: setattr(obj, k, v) @@ -786,12 +788,12 @@ class MyTest(rfm.RegressionTest): # Get current recursion level rdepth = kwargs.get('_current_depth', 0) if recurse and (max_depth is None or rdepth < max_depth): - for fixt, variant in ret['fixtures'].items(): + for fname, variant in ret['fixtures'].items(): if len(variant) > 1: continue - fcls = cls.fixture_space[fixt].cls - ret['fixtures'][fixt] = fcls.get_variant_info( + fixt = cls.fixture_space[fname] + ret['fixtures'][fname] = fixt.cls.get_variant_info( variant[0], recurse=recurse, max_depth=max_depth, _current_depth=rdepth+1 ) @@ -860,6 +862,6 @@ def variant_name(cls, variant_num=None): for v in cls.param_space[pid].values()) if len(cls.fixture_space) > 1: - name += f'%{fid}' + name += f'_{fid}' return name diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 7742c67e94..7c821f76dd 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1070,8 +1070,12 @@ def _format_params(cls, info, prefix=' %'): # This is join fixture continue - fixt_cls = cls.fixture_space[f].cls - name += _format_params(fixt_cls, v, f'{prefix}{f}.') + fixt = cls.fixture_space[f] + name += _format_params(fixt.cls, v, f'{prefix}{f}.') + + # Append any variables set for the fixtures + for var, val in fixt.variables.items(): + name += f'{prefix}{f}.{var}={val}' return name @@ -1085,6 +1089,14 @@ def _format_params(cls, info, prefix=' %'): basename = cls.__name__ variant_info = cls.get_variant_info(self.variant_num, recurse=True) self._rfm_display_name = basename + _format_params(cls, variant_info) + if self.is_fixture(): + # Add the variable info and scope + fixt_data = self._rfm_fixt_data + suffix = ''.join(f' %{k}={v}' for k, + v in fixt_data.variables.items()) + suffix += f' ~{fixt_data.scope_enc}' + self._rfm_display_name += suffix + return self._rfm_display_name @property diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index 7117f8e473..bf53b7dba6 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -152,10 +152,15 @@ def dep_lines(u, *, prefix, depth=0, lines=None, printed=None): # We need the leaf test cases to be printed at the leftmost testcases = list(t for t in testcases if t.in_degree == 0) + num_checks = 0 for t in testcases: printer.info(f'- {t.check.display_name} [{t.check.unique_name}]') - for l in reversed(dep_lines(t, prefix=' ')): + num_checks += 1 + for l in reversed(dep_lines(t, prefix=' ')): printer.info(l) + num_checks += 1 + + printer.info(f'Found {num_checks} check(s)\n') def list_tags(testcases, printer): diff --git a/unittests/test_fixtures.py b/unittests/test_fixtures.py index 470fc0b608..53b8d12ab3 100644 --- a/unittests/test_fixtures.py +++ b/unittests/test_fixtures.py @@ -259,13 +259,14 @@ class Foo(rfm.RegressionTest): def test_fixture_data(): '''Test the structure that holds the raw fixture data in the registry.''' - d = fixtures.FixtureData(1, 2, 3, 4, 5) - assert d.data == (1, 2, 3, 4, 5) + d = fixtures.FixtureData(1, 2, 3, 4, 5, 'foo') + assert d.data == (1, 2, 3, 4, 5, 'foo') assert d.variant_num == 1 assert d.environments == 2 assert d.partitions == 3 assert d.variables == 4 assert d.scope == 5 + assert d.scope_enc == 'foo' @pytest.fixture diff --git a/unittests/test_meta.py b/unittests/test_meta.py index 0b250f17dd..8f753958dd 100644 --- a/unittests/test_meta.py +++ b/unittests/test_meta.py @@ -381,13 +381,13 @@ class Foo(MyMeta): v = variable(int, value=1) assert Foo().v == 1 - assert Foo(variables={'v': 10}).v == 10 + assert Foo(fixt_vars={'v': 10}).v == 10 # Non-variables are silently ignored - assert not hasattr(Foo(variables={'vv': 10}), 'vv') + assert not hasattr(Foo(fixt_vars={'vv': 10}), 'vv') with pytest.raises(TypeError): - Foo(variables='not a mapping') + Foo(fixt_vars='not a mapping') def test_variants(MyMeta): From bb57f8b23408961225cf676e967d325d386fea89 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 8 Dec 2021 21:16:22 +0100 Subject: [PATCH 14/62] Fix test counts when listing checks --- reframe/core/pipeline.py | 17 +++++++++++++++-- reframe/frontend/cli.py | 13 ++++++++----- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 7c821f76dd..a675404e09 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -20,6 +20,7 @@ import numbers import os import shutil +import warnings import reframe.core.environments as env import reframe.core.fields as fields @@ -221,6 +222,14 @@ def pipeline_hooks(cls): #: Any non-alphanumeric value in a parameter's representation is #: converted to ``_``. # name = variable(typ.Str[r'[^\/]+']) + name = variable( + field=fields.DeprecatedField, + target_field=fields.TypedField(typ.Str[r'[^\/]+']), + message=("'name' is deprecated: " + "use either 'unique_name' or 'display_name': " + "note that setting the name of the test is now disallowed"), + from_version='3.10.0' + ) #: List of programming environments supported by this test. #: @@ -916,6 +925,10 @@ def __rfm_init__(self, *args, prefix=None, **kwargs): if not hasattr(self, 'descr'): self.descr = self.display_name + with warnings.catch_warnings(): + if not hasattr(self, 'name'): + self.name = self._rfm_unique_name + self._perfvalues = {} # Static directories of the regression check @@ -1026,7 +1039,7 @@ def __getattr__(self, name): f'{type(self).__qualname__!r} object has no attribute {name!r}' ) - def __setattr__(self, name, value): + def r__setattr__(self, name, value): if name == 'name': user_deprecation_warning( 'setting the name of the test is deprecated; see for XXX for details', @@ -1100,7 +1113,7 @@ def _format_params(cls, info, prefix=' %'): return self._rfm_display_name @property - def name(self): + def r_name(self): # For backward compatibility return self.unique_name diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index bf53b7dba6..3024c16676 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -127,6 +127,7 @@ def list_checks(testcases, printer, detailed=False): def list_checks2(testcases, printer, detailed=False): printer.info('[List of matched checks]') + unique_checks = set() def dep_lines(u, *, prefix, depth=0, lines=None, printed=None): if lines is None: @@ -141,7 +142,9 @@ def dep_lines(u, *, prefix, depth=0, lines=None, printed=None): dep_lines(v, prefix=prefix + 2*' ', depth=depth+1, lines=lines, printed=printed) - printed.add(v.check.name) + printed.add(v.check.unique_name) + if not v.check.is_fixture(): + unique_checks.add(v.check.unique_name) if depth: lines.append( @@ -152,15 +155,15 @@ def dep_lines(u, *, prefix, depth=0, lines=None, printed=None): # We need the leaf test cases to be printed at the leftmost testcases = list(t for t in testcases if t.in_degree == 0) - num_checks = 0 for t in testcases: printer.info(f'- {t.check.display_name} [{t.check.unique_name}]') - num_checks += 1 + if not t.check.is_fixture(): + unique_checks.add(t.check.unique_name) + for l in reversed(dep_lines(t, prefix=' ')): printer.info(l) - num_checks += 1 - printer.info(f'Found {num_checks} check(s)\n') + printer.info(f'Found {len(unique_checks)} check(s)\n') def list_tags(testcases, printer): From f84b010e528df2170dd390946e03a23a51733ac9 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 8 Dec 2021 21:37:24 +0100 Subject: [PATCH 15/62] Revert changes in pipeline.py --- reframe/core/pipeline.py | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index a675404e09..7c821f76dd 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -20,7 +20,6 @@ import numbers import os import shutil -import warnings import reframe.core.environments as env import reframe.core.fields as fields @@ -222,14 +221,6 @@ def pipeline_hooks(cls): #: Any non-alphanumeric value in a parameter's representation is #: converted to ``_``. # name = variable(typ.Str[r'[^\/]+']) - name = variable( - field=fields.DeprecatedField, - target_field=fields.TypedField(typ.Str[r'[^\/]+']), - message=("'name' is deprecated: " - "use either 'unique_name' or 'display_name': " - "note that setting the name of the test is now disallowed"), - from_version='3.10.0' - ) #: List of programming environments supported by this test. #: @@ -925,10 +916,6 @@ def __rfm_init__(self, *args, prefix=None, **kwargs): if not hasattr(self, 'descr'): self.descr = self.display_name - with warnings.catch_warnings(): - if not hasattr(self, 'name'): - self.name = self._rfm_unique_name - self._perfvalues = {} # Static directories of the regression check @@ -1039,7 +1026,7 @@ def __getattr__(self, name): f'{type(self).__qualname__!r} object has no attribute {name!r}' ) - def r__setattr__(self, name, value): + def __setattr__(self, name, value): if name == 'name': user_deprecation_warning( 'setting the name of the test is deprecated; see for XXX for details', @@ -1113,7 +1100,7 @@ def _format_params(cls, info, prefix=' %'): return self._rfm_display_name @property - def r_name(self): + def name(self): # For backward compatibility return self.unique_name From caca8cb2c8c7079e9e2ca9bbe19e497b53cc8de4 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Thu, 9 Dec 2021 00:24:39 +0100 Subject: [PATCH 16/62] Fix deprecation warnings --- reframe/core/meta.py | 2 +- reframe/core/pipeline.py | 14 +- reframe/frontend/loader.py | 4 +- unittests/resources/checks/frontend_checks.py | 217 +++++++----------- .../checks_unlisted/kbd_interrupt.py | 10 +- unittests/test_cli.py | 24 +- unittests/test_dependencies.py | 16 +- unittests/test_pipeline.py | 154 ++++++------- unittests/test_policies.py | 139 +++++++---- 9 files changed, 289 insertions(+), 291 deletions(-) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 879139aec0..cf6fdea674 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -845,7 +845,7 @@ def variant_name(cls, variant_num=None): :meta private: ''' - name = cls.__qualname__ + name = cls.__name__ if variant_num is None: return name diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 7c821f76dd..3162493454 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -9,7 +9,8 @@ __all__ = [ 'CompileOnlyRegressionTest', 'RegressionTest', 'RunOnlyRegressionTest', - 'DEPEND_BY_ENV', 'DEPEND_EXACT', 'DEPEND_FULLY', 'final', 'RegressionMixin' + 'DEPEND_BY_ENV', 'DEPEND_EXACT', 'DEPEND_FULLY', 'final', 'RegressionMixin', + 'make_test' ] @@ -116,6 +117,13 @@ def _wrapped(*args, **kwargs): _RFM_TEST_KIND_RUN = 2 +def make_test(name, bases, body, **kwargs): + namespace = RegressionTestMeta.__prepare__(name, bases, **kwargs) + namespace.update(body) + cls = RegressionTestMeta(name, bases, namespace, **kwargs) + return cls + + class RegressionMixin(metaclass=RegressionTestMeta): '''Base mixin class for regression tests. @@ -2312,10 +2320,10 @@ def __eq__(self, other): if not isinstance(other, RegressionTest): return NotImplemented - return self.name == other.name + return self.unique_name == other.unique_name def __hash__(self): - return hash(self.name) + return hash(self.unique_name) def __rfm_json_decode__(self, json): # 'tags' are decoded as list, so we convert them to a set diff --git a/reframe/frontend/loader.py b/reframe/frontend/loader.py index 97d46907d8..f70ec99781 100644 --- a/reframe/frontend/loader.py +++ b/reframe/frontend/loader.py @@ -100,8 +100,8 @@ def _validate_check(self, check): getlogger().warning( f'{checkfile}: {attr!r} is not copyable; ' f'not copyable attributes are not ' - f'allowed inside the __init__() method; ' - f'consider setting them in a pipeline hook instead' + f'allowed inside the __init__() method or post-init hooks; ' + f'consider setting them in another pipeline hook instead' ) return False diff --git a/unittests/resources/checks/frontend_checks.py b/unittests/resources/checks/frontend_checks.py index a7ef7b7d0b..516ba66774 100644 --- a/unittests/resources/checks/frontend_checks.py +++ b/unittests/resources/checks/frontend_checks.py @@ -18,21 +18,18 @@ class BaseFrontendCheck(rfm.RunOnlyRegressionTest): - def __init__(self): - self.local = True - self.executable = 'echo hello && echo perf: 10 Gflop/s' - self.sanity_patterns = sn.assert_found('hello', self.stdout) - self.tags = {type(self).__name__} - self.maintainers = ['VK'] + valid_systems = ['*'] + valid_prog_environs = ['*'] + executable = 'echo hello && echo perf: 10 Gflop/s' + local = True + + @sanity_function + def validate_output(self): + return sn.assert_found('hello', self.stdout) @rfm.simple_test class BadSetupCheck(BaseFrontendCheck): - def __init__(self): - super().__init__() - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - @run_after('setup') def raise_error(self): raise ReframeError('Setup failure') @@ -40,12 +37,6 @@ def raise_error(self): @rfm.simple_test class BadSetupCheckEarly(BaseFrontendCheck): - def __init__(self): - super().__init__() - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.local = False - @run_before('setup') def raise_error_early(self): raise ReframeError('Setup failure') @@ -53,54 +44,39 @@ def raise_error_early(self): @rfm.simple_test class NoSystemCheck(BaseFrontendCheck): - def __init__(self): - super().__init__() - self.valid_systems = [] - self.valid_prog_environs = ['*'] + valid_systems = [] @rfm.simple_test class NoPrgEnvCheck(BaseFrontendCheck): - def __init__(self): - super().__init__() - self.valid_systems = ['*'] - self.valid_prog_environs = [] + valid_prog_environs = [] @rfm.simple_test class SanityFailureCheck(BaseFrontendCheck): - def __init__(self): - super().__init__() - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.sanity_patterns = sn.assert_found('foo', self.stdout) + @sanity_function + def validate_output(self): + return sn.assert_found('foo', self.stdout) @rfm.simple_test class PerformanceFailureCheck(BaseFrontendCheck): - def __init__(self): - super().__init__() - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.perf_patterns = { - 'perf': sn.extractsingle(r'perf: (\d+)', self.stdout, 1, int) - } - self.reference = { - '*': { - 'perf': (20, -0.1, 0.1, 'Gflop/s') - } + reference = { + '*': { + 'perf': (20, -0.1, 0.1, 'Gflop/s') } + } + + @performance_function('Gflop/s') + def perf(self): + return sn.extractsingle(r'perf: (\d+)', self.stdout, 1, int) @rfm.simple_test class CustomPerformanceFailureCheck(BaseFrontendCheck, special=True): '''Simulate a performance check that ignores completely logging''' - def __init__(self): - super().__init__() - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.strict_check = False + strict_check = False def check_performance(self): raise PerformanceError('performance failure') @@ -109,12 +85,8 @@ def check_performance(self): class KeyboardInterruptCheck(BaseFrontendCheck, special=True): '''Simulate keyboard interrupt during test's execution.''' - def __init__(self, phase='wait'): - super().__init__() - self.executable = 'sleep 1' - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.phase = phase + executable = 'sleep 1' + phase = variable(str) @run_before('setup') def raise_before_setup(self): @@ -126,98 +98,83 @@ def run_wait(self): if self.phase == 'wait': raise KeyboardInterrupt else: - super().wait() + return super().run_wait() class SystemExitCheck(BaseFrontendCheck, special=True): '''Simulate system exit from within a check.''' - def __init__(self): - super().__init__() - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - def run_wait(self): # We do our nasty stuff in wait() to make things more complicated sys.exit(1) @rfm.simple_test -class CleanupFailTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.sourcesdir = None - self.executable = 'echo foo' - self.sanity_patterns = sn.assert_found(r'foo', self.stdout) - +class CleanupFailTest(BaseFrontendCheck): @run_before('cleanup') def fail(self): # Make this test fail on purpose raise Exception -class SleepCheck(BaseFrontendCheck): - _next_id = 0 - - def __init__(self, sleep_time): - super().__init__() - self.name = '%s_%s' % (self.name, SleepCheck._next_id) - self.sourcesdir = None - self.sleep_time = sleep_time - self.executable = 'python3' +class SleepCheck(rfm.RunOnlyRegressionTest, special=True): + sleep_time = variable(float, int) + poll_fail = variable(str, type(None), value=None) + print_timestamp = ( + 'python3 -c "import time; print(time.time(), flush=True)"' + ) + executable = 'python3' + prerun_cmds = [print_timestamp] + postrun_cmds = [print_timestamp] + sanity_patterns = sn.assert_true(1) + valid_systems = ['*'] + valid_prog_environs = ['*'] + + @run_before('run') + def set_sleep_time(self): self.executable_opts = [ - '-c "from time import sleep; sleep(%s)"' % sleep_time + f'-c "import time; time.sleep({self.sleep_time})"' ] - print_timestamp = ( - "python3 -c \"from datetime import datetime; " - "print(datetime.today().strftime('%s.%f'), flush=True)\"") - self.prerun_cmds = [print_timestamp] - self.postrun_cmds = [print_timestamp] - self.sanity_patterns = sn.assert_found(r'.*', self.stdout) - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - SleepCheck._next_id += 1 - - -class SleepCheckPollFail(SleepCheck, special=True): - '''Emulate a test failing in the polling phase.''' def run_complete(self): - raise ValueError + if self.poll_fail == 'early': + # Emulate a test failing in the polling phase. + raise ValueError + elif self.poll_fail == 'late' and self.job.finished(): + # Emulate a test failing in the polling phase after the test has + # finished + raise ValueError + return super().run_complete() -class SleepCheckPollFailLate(SleepCheck, special=True): - '''Emulate a test failing in the polling phase - after the test has finished.''' - def run_complete(self): - if self._job.finished(): - raise ValueError +class RetriesCheck(rfm.RunOnlyRegressionTest): + filename = variable(str) + num_runs = variable(int) + local = True + valid_systems = ['*'] + valid_prog_environs = ['*'] -class RetriesCheck(BaseFrontendCheck): - def __init__(self, run_to_pass, filename): - super().__init__() - self.sourcesdir = None - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.prerun_cmds = ['current_run=$(cat %s)' % filename] - self.executable = 'echo $current_run' - self.postrun_cmds = ['((current_run++))', - 'echo $current_run > %s' % filename] - self.sanity_patterns = sn.assert_found('%d' % run_to_pass, self.stdout) + @run_before('run') + def set_exec(self): + self.executable = f''' +current_run=$(cat {self.filename}) +echo $current_run +((current_run++)) +echo $current_run > {self.filename}''' + + @sanity_function + def validate(self): + return sn.assert_found(str(self.num_runs), self.stdout) class SelfKillCheck(rfm.RunOnlyRegressionTest, special=True): - def __init__(self): - self.local = True - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.executable = 'echo hello' - self.sanity_patterns = sn.assert_found('hello', self.stdout) - self.tags = {type(self).__name__} - self.maintainers = ['TM'] + valid_systems = ['*'] + valid_prog_environs = ['*'] + local = True + executable = 'echo' + sanity_patterns = sn.assert_true(1) def run(self): super().run() @@ -226,13 +183,12 @@ def run(self): class CompileFailureCheck(rfm.RegressionTest): - def __init__(self): - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.sanity_patterns = sn.assert_found(r'hello', self.stdout) - self.sourcesdir = None - self.sourcepath = 'x.c' - self.prebuild_cmds = ['echo foo > x.c'] + valid_systems = ['*'] + valid_prog_environs = ['*'] + sanity_patterns = sn.assert_true(1) + sourcesdir = None + sourcepath = 'x.c' + prebuild_cmds = ['echo foo > x.c'] # The following tests do not validate and should not be loaded @@ -241,23 +197,26 @@ def __init__(self): class TestWithGenerator(rfm.RunOnlyRegressionTest): '''This test is invalid in ReFrame and the loader must not load it''' - def __init__(self): - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] + valid_systems = ['*'] + valid_prog_environs = ['*'] + @run_after('init') + def post_init(self): def foo(): yield True - self.sanity_patterns = sn.defer(foo()) + self.x = foo() @rfm.simple_test class TestWithFileObject(rfm.RunOnlyRegressionTest): '''This test is invalid in ReFrame and the loader must not load it''' - def __init__(self): - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] + valid_systems = ['*'] + valid_prog_environs = ['*'] + + @run_after('init') + def file_handler(self): with open(__file__) as fp: pass diff --git a/unittests/resources/checks_unlisted/kbd_interrupt.py b/unittests/resources/checks_unlisted/kbd_interrupt.py index 7cc86d0f06..3ebd9ef402 100644 --- a/unittests/resources/checks_unlisted/kbd_interrupt.py +++ b/unittests/resources/checks_unlisted/kbd_interrupt.py @@ -15,12 +15,10 @@ @rfm.simple_test class KeyboardInterruptCheck(rfm.RunOnlyRegressionTest): - def __init__(self): - self.local = True - self.executable = 'sleep 1' - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.tags = {self.name} + local = True + executable = 'sleep 1' + valid_systems = ['*'] + valid_prog_environs = ['*'] @run_before('setup') def raise_keyboard_interrupt(self): diff --git a/unittests/test_cli.py b/unittests/test_cli.py index 71077d9e93..c7ee1039fd 100644 --- a/unittests/test_cli.py +++ b/unittests/test_cli.py @@ -244,7 +244,7 @@ def test_check_submit_success(run_reframe, remote_exec_ctx): def test_check_failure(run_reframe): returncode, stdout, _ = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'BadSetupCheck'] + more_options=['-n', 'BadSetupCheck$'] ) assert 'FAILED' in stdout assert returncode != 0 @@ -253,7 +253,7 @@ def test_check_failure(run_reframe): def test_check_setup_failure(run_reframe): returncode, stdout, stderr = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'BadSetupCheckEarly'], + more_options=['-n', 'BadSetupCheckEarly'], local=False, ) @@ -268,7 +268,7 @@ def test_check_kbd_interrupt(run_reframe): checkpath=[ 'unittests/resources/checks_unlisted/kbd_interrupt.py' ], - more_options=['-t', 'KeyboardInterruptCheck'], + more_options=['-n', 'KeyboardInterruptCheck'], local=False, ) assert 'Traceback' not in stdout @@ -280,7 +280,7 @@ def test_check_kbd_interrupt(run_reframe): def test_check_sanity_failure(run_reframe, tmp_path): returncode, stdout, stderr = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'SanityFailureCheck'] + more_options=['-n', 'SanityFailureCheck'] ) assert 'FAILED' in stdout @@ -297,7 +297,7 @@ def test_check_sanity_failure(run_reframe, tmp_path): def test_dont_restage(run_reframe, tmp_path): run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'SanityFailureCheck'] + more_options=['-n', 'SanityFailureCheck'] ) # Place a random file in the test's stage directory and rerun with @@ -307,7 +307,7 @@ def test_dont_restage(run_reframe, tmp_path): (stagedir / 'foobar').touch() returncode, stdout, stderr = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'SanityFailureCheck', + more_options=['-n', 'SanityFailureCheck', '--dont-restage', '--max-retries=1'] ) assert os.path.exists(stagedir / 'foobar') @@ -340,7 +340,7 @@ def test_checkpath_symlink(run_reframe, tmp_path): def test_performance_check_failure(run_reframe, tmp_path, perflogdir): returncode, stdout, stderr = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'PerformanceFailureCheck'] + more_options=['-n', 'PerformanceFailureCheck'] ) assert 'FAILED' in stdout @@ -360,7 +360,7 @@ def test_perflogdir_from_env(run_reframe, tmp_path, monkeypatch): monkeypatch.setenv('FOODIR', str(tmp_path / 'perflogs')) returncode, stdout, stderr = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'PerformanceFailureCheck'], + more_options=['-n', 'PerformanceFailureCheck'], perflogdir='$FOODIR' ) assert returncode == 1 @@ -373,7 +373,7 @@ def test_perflogdir_from_env(run_reframe, tmp_path, monkeypatch): def test_performance_report(run_reframe): returncode, stdout, _ = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'PerformanceFailureCheck', '--performance-report'] + more_options=['-n', 'PerformanceFailureCheck', '--performance-report'] ) assert r'PERFORMANCE REPORT' in stdout assert r'perf: 10 Gflop/s' in stdout @@ -382,7 +382,7 @@ def test_performance_report(run_reframe): def test_skip_system_check_option(run_reframe): returncode, stdout, _ = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['--skip-system-check', '-t', 'NoSystemCheck'] + more_options=['--skip-system-check', '-n', 'NoSystemCheck'] ) assert 'PASSED' in stdout assert returncode == 0 @@ -391,7 +391,7 @@ def test_skip_system_check_option(run_reframe): def test_skip_prgenv_check_option(run_reframe): returncode, stdout, _ = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['--skip-prgenv-check', '-t', 'NoPrgEnvCheck'] + more_options=['--skip-prgenv-check', '-n', 'NoPrgEnvCheck'] ) assert 'PASSED' in stdout assert returncode == 0 @@ -717,7 +717,7 @@ def test_overwrite_module_path(run_reframe, user_exec_ctx): def test_failure_stats(run_reframe): returncode, stdout, stderr = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'SanityFailureCheck', '--failure-stats'] + more_options=['-n', 'SanityFailureCheck', '--failure-stats'] ) assert r'FAILURE STATISTICS' in stdout assert r'sanity 1 [SanityFailureCheck' in stdout diff --git a/unittests/test_dependencies.py b/unittests/test_dependencies.py index 706f20f34c..54512dc8ea 100644 --- a/unittests/test_dependencies.py +++ b/unittests/test_dependencies.py @@ -519,16 +519,14 @@ def test_build_deps_empty(default_exec_ctx): @pytest.fixture def make_test(): - class MyTest(rfm.RegressionTest): - def __init__(self, name): - self.name = name - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.executable = 'echo' - self.executable_opts = [name] - def _make_test(name): - return MyTest(name) + class _Test(rfm.RegressionTest): + valid_systems = ['*'] + valid_prog_environs = ['*'] + executable = 'echo' + executable_opts = [name] + + return rfm.make_test(name, (_Test,), {})() return _make_test diff --git a/unittests/test_pipeline.py b/unittests/test_pipeline.py index d76da6be5a..afe9c3a14d 100644 --- a/unittests/test_pipeline.py +++ b/unittests/test_pipeline.py @@ -140,22 +140,20 @@ def _container_exec_ctx(platform): def test_eq(): - class T0(rfm.RegressionTest): - def __init__(self): - self.name = 'T0' - - class T1(rfm.RegressionTest): - def __init__(self): - self.name = 'T0' - - t0, t1 = T0(), T1() - assert t0 == t1 - assert hash(t0) == hash(t1) + T0 = rfm.make_test('T0', (rfm.RegressionTest,), {}) + T1 = rfm.make_test('T1', (rfm.RegressionTest,), {}) + T2 = rfm.make_test('T1', (rfm.RegressionTest,), {}) - t1.name = 'T1' + t0, t1, t2 = T0(), T1(), T2() assert t0 != t1 assert hash(t0) != hash(t1) + # T1 and T2 are different classes but have the same name, so the + # corresponding tests should compare equal + assert T1 is not T2 + assert t1 == t2 + assert hash(t1) == hash(t2) + def test_environ_setup(hellotest, local_exec_ctx): # Use test environment for the regression check @@ -524,11 +522,7 @@ def __init__(self): def test_extra_resources(HelloTest, testsys_system): @test_util.custom_prefix('unittests/resources/checks') class MyTest(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.local = True + local = True @run_after('setup') def set_resources(self): @@ -612,7 +606,6 @@ def test_setup_hooks(HelloTest, local_exec_ctx): class MyTest(HelloTest): def __init__(self): super().__init__() - self.name = type(self).__name__ self.executable = os.path.join('.', self.name) self.count = 0 @@ -634,11 +627,7 @@ def postfoo(self): def test_compile_hooks(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.count = 0 + count = variable(int, value=0) @run_before('compile') def setflags(self): @@ -659,11 +648,6 @@ def check_executable(self): def test_run_hooks(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - @run_before('run') def setflags(self): self.postrun_cmds = ['echo hello > greetings.txt'] @@ -681,11 +665,7 @@ def check_executable(self): def test_multiple_hooks(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.var = 0 + var = variable(int, value=0) @run_after('setup') def x(self): @@ -707,11 +687,7 @@ def z(self): def test_stacked_hooks(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.var = 0 + var = variable(int, value=0) @run_before('setup') @run_after('setup') @@ -733,11 +709,7 @@ class MyTest(rfm.RunOnlyRegressionTest, HelloTest): def test_inherited_hooks(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class BaseTest(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.var = 0 + var = variable(int, value=0) @run_after('setup') def x(self): @@ -818,11 +790,7 @@ def test_inherited_hooks_order(weird_mro_test, local_exec_ctx): def test_inherited_hooks_from_instantiated_tests(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class T0(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.var = 0 + var = variable(int, value=0) @run_after('setup') def x(self): @@ -847,12 +815,8 @@ def y(self): def test_overriden_hooks(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class BaseTest(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.var = 0 - self.foo = 0 + var = variable(int, value=0) + foo = variable(int, value=0) @run_after('setup') def x(self): @@ -881,12 +845,8 @@ def y(self): def test_disabled_hooks(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class BaseTest(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.var = 0 - self.foo = 0 + var = variable(int, value=0) + foo = variable(int, value=0) @run_after('setup') def x(self): @@ -914,18 +874,12 @@ def test_require_deps(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class T0(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.x = 1 + x = variable(int, value=1) @test_util.custom_prefix('unittests/resources/checks') class T1(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) + @run_after('init') + def setdeps(self): self.depends_on('T0') @require_deps @@ -959,7 +913,7 @@ def __init__(self, a, b): test = MyTest(1, 2) assert os.path.abspath(os.path.dirname(__file__)) == test.prefix - assert 'test_regression_test_name..MyTest_1_2' == test.name + assert 'MyTest_1_2' == test.name def test_strange_test_names(): @@ -976,7 +930,7 @@ def __init__(self, a, b): self.b = b test = MyTest('(a*b+c)/12', C(33)) - assert ('test_strange_test_names..MyTest__a_b_c__12_C_33_' == + assert ('MyTest__a_b_c__12_C_33_' == test.name) @@ -991,7 +945,7 @@ def __init__(self): super().__init__(1, 2) test = MyTest() - assert 'test_name_user_inheritance..MyTest' == test.name + assert 'MyTest' == test.name def test_name_runonly_test(): @@ -1002,7 +956,7 @@ def __init__(self, a, b): test = MyTest(1, 2) assert os.path.abspath(os.path.dirname(__file__)) == test.prefix - assert 'test_name_runonly_test..MyTest_1_2' == test.name + assert 'MyTest_1_2' == test.name def test_name_compileonly_test(): @@ -1013,7 +967,7 @@ def __init__(self, a, b): test = MyTest(1, 2) assert os.path.abspath(os.path.dirname(__file__)) == test.prefix - assert 'test_name_compileonly_test..MyTest_1_2' == test.name + assert 'MyTest_1_2' == test.name def test_trap_job_errors_without_sanity_patterns(local_exec_ctx): @@ -1430,18 +1384,22 @@ def container_test(tmp_path): def _container_test(platform, image): @test_util.custom_prefix(tmp_path) class ContainerTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.name = 'container_test' - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] + valid_prog_environs = ['*'] + valid_systems = ['*'] + prerun_cmds = ['touch foo'] + + @run_after('init') + def setup_container_platf(self): self.container_platform = platform self.container_platform.image = image self.container_platform.command = ( f"bash -c 'cd {_STAGEDIR_MOUNT}; pwd; ls; " f"cat /etc/os-release'" ) - self.prerun_cmds = ['touch foo'] - self.sanity_patterns = sn.all([ + + @sanity_function + def assert_os_release(self): + return sn.all([ sn.assert_found(rf'^{_STAGEDIR_MOUNT}', self.stdout), sn.assert_found(r'^foo', self.stdout), sn.assert_found( @@ -1536,3 +1494,39 @@ def access_topo(self): # This test should run to completion without problems _run(EchoTest(), *local_exec_ctx) + + +def test_make_test_without_builtins(local_exec_ctx): + hello_cls = rfm.make_test( + 'HelloTest', (rfm.RunOnlyRegressionTest,), + { + 'valid_systems': ['*'], + 'valid_prog_environs': ['*'], + 'executable': 'echo', + 'sanity_patterns': sn.assert_true(1) + } + ) + + assert hello_cls.__name__ == 'HelloTest' + _run(hello_cls(), *local_exec_ctx) + + +def test_make_test_with_builtins(local_exec_ctx): + class _X(rfm.RunOnlyRegressionTest): + valid_systems = ['*'] + valid_prog_environs = ['*'] + executable = 'echo' + message = variable(str) + + @run_before('run') + def set_message(self): + self.executable_opts = [self.message] + + @sanity_function + def validate(self): + return sn.assert_found(self.message, self.stdout) + + hello_cls = rfm.make_test('HelloTest', (_X,), {}) + hello_cls.setvar('message', 'hello') + assert hello_cls.__name__ == 'HelloTest' + _run(hello_cls(), *local_exec_ctx) diff --git a/unittests/test_policies.py b/unittests/test_policies.py index 5baf9ff94a..4ba5580107 100644 --- a/unittests/test_policies.py +++ b/unittests/test_policies.py @@ -12,6 +12,7 @@ import sys import time +import reframe as rfm import reframe.core.runtime as rt import reframe.frontend.dependencies as dependencies import reframe.frontend.executors as executors @@ -35,11 +36,9 @@ CompileFailureCheck, KeyboardInterruptCheck, RetriesCheck, - SelfKillCheck, SleepCheck, - SleepCheckPollFail, - SleepCheckPollFailLate, - SystemExitCheck, + SelfKillCheck, + SystemExitCheck ) @@ -80,6 +79,36 @@ def testsys_exec_ctx(make_exec_ctx_g): yield from make_exec_ctx_g(system='testsys:gpu') +def make_check(cls, *, alt_name=None, **vars): + if alt_name: + cls = rfm.make_test(alt_name, (cls,), {}) + + for k, v in vars.items(): + cls.setvar(k, v) + + return cls() + + +def make_kbd_check(phase='wait'): + return make_check(KeyboardInterruptCheck, phase=phase) + + +@pytest.fixture +def make_sleep_check(): + test_id = 0 + + def _make_check(sleep_time, poll_fail=None): + nonlocal test_id + test = make_check(SleepCheck, + sleep_time=sleep_time, + poll_fail=poll_fail, + alt_name=f'SleepCheck_{test_id}') + test_id += 1 + return test + + return _make_check + + @pytest.fixture(params=[policies.SerialExecutionPolicy, policies.AsynchronousExecutionPolicy]) def make_runner(request): @@ -143,7 +172,7 @@ class _T1(rfm.RunOnlyRegressionTest): sanity_patterns = sn.assert_true(1) def __init__(self): - self.depends_on(_T0.__qualname__) + self.depends_on('_T0') cases = executors.generate_testcases([_T0(), _T1()]) depgraph, _ = dependencies.build_deps(cases) @@ -403,7 +432,7 @@ def test_force_local_execution(make_runner, make_cases, testsys_exec_ctx): def test_kbd_interrupt_within_test(make_runner, make_cases, common_exec_ctx): runner = make_runner() with pytest.raises(KeyboardInterrupt): - runner.runall(make_cases([KeyboardInterruptCheck()])) + runner.runall(make_cases([make_kbd_check()])) stats = runner.stats assert 1 == len(stats.failed()) @@ -447,14 +476,15 @@ def test_pass_in_retries(make_runner, make_cases, tmp_path, common_exec_ctx): tmpfile = tmp_path / 'out.txt' tmpfile.write_text('0\n') runner = make_runner(max_retries=3) - pass_run_no = 2 - runner.runall(make_cases([RetriesCheck(pass_run_no, tmpfile)])) + runner.runall(make_cases([ + make_check(RetriesCheck, filename=str(tmpfile), num_runs=2) + ])) # Ensure that the test passed after retries in run `pass_run_no` assert 1 == runner.stats.num_cases() assert_runall(runner) assert 1 == len(runner.stats.failed(run=0)) - assert pass_run_no == rt.runtime().current_run + assert 2 == rt.runtime().current_run assert 0 == len(runner.stats.failed()) @@ -589,12 +619,14 @@ def _read_timestamps(tasks): return begin_stamps, end_stamps -def test_concurrency_unlimited(async_runner, make_cases, make_exec_ctx): +def test_concurrency_unlimited(async_runner, make_cases, + make_sleep_check, make_exec_ctx): num_checks = 3 make_exec_ctx(options=max_jobs_opts(num_checks)) runner, monitor = async_runner - runner.runall(make_cases([SleepCheck(.5) for i in range(num_checks)])) + runner.runall(make_cases([make_sleep_check(.5) + for i in range(num_checks)])) # Ensure that all tests were run and without failures. assert num_checks == runner.stats.num_cases() @@ -615,13 +647,15 @@ def test_concurrency_unlimited(async_runner, make_cases, make_exec_ctx): pytest.skip('the system seems too much loaded.') -def test_concurrency_limited(async_runner, make_cases, make_exec_ctx): +def test_concurrency_limited(async_runner, make_cases, + make_sleep_check, make_exec_ctx): # The number of checks must be <= 2*max_jobs. num_checks, max_jobs = 5, 3 make_exec_ctx(options=max_jobs_opts(max_jobs)) runner, monitor = async_runner - runner.runall(make_cases([SleepCheck(.5) for i in range(num_checks)])) + runner.runall(make_cases([make_sleep_check(.5) + for i in range(num_checks)])) # Ensure that all tests were run and without failures. assert num_checks == runner.stats.num_cases() @@ -642,13 +676,13 @@ def test_concurrency_limited(async_runner, make_cases, make_exec_ctx): end_stamps[:-max_jobs])) assert all(begin_after_end) - # NOTE: to ensure that these remaining jobs were also run - # in parallel one could do the command hereafter; however, it would - # require to substantially increase the sleep time (in SleepCheck), - # because of the delays in rescheduling (1s, 2s, 3s, 1s, 2s,...). - # We currently prefer not to do this last concurrency test to avoid an - # important prolongation of the unit test execution time. - # self.assertTrue(self.begin_stamps[-1] < self.end_stamps[max_jobs]) + # NOTE: to ensure that these remaining jobs were also run in parallel one + # could do the command hereafter; however, it would require to + # substantially increase the sleep time, because of the delays in + # rescheduling (1s, 2s, 3s, 1s, 2s,...). We currently prefer not to do + # this last concurrency test to avoid an important prolongation of the + # unit test execution time. self.assertTrue(self.begin_stamps[-1] < + # self.end_stamps[max_jobs]) # Warn if the first #max_jobs jobs were not run in parallel; the # corresponding strict check would be: @@ -657,12 +691,14 @@ def test_concurrency_limited(async_runner, make_cases, make_exec_ctx): pytest.skip('the system seems too loaded.') -def test_concurrency_none(async_runner, make_cases, make_exec_ctx): +def test_concurrency_none(async_runner, make_cases, + make_sleep_check, make_exec_ctx): num_checks = 3 make_exec_ctx(options=max_jobs_opts(1)) runner, monitor = async_runner - runner.runall(make_cases([SleepCheck(.5) for i in range(num_checks)])) + runner.runall(make_cases([make_sleep_check(.5) + for i in range(num_checks)])) # Ensure that all tests were run and without failures. assert num_checks == runner.stats.num_cases() @@ -698,20 +734,20 @@ def assert_interrupted_run(runner): def test_kbd_interrupt_in_wait_with_concurrency(async_runner, make_cases, - make_exec_ctx): + make_sleep_check, make_exec_ctx): make_exec_ctx(options=max_jobs_opts(4)) runner, _ = async_runner with pytest.raises(KeyboardInterrupt): runner.runall(make_cases([ - KeyboardInterruptCheck(), SleepCheck(10), - SleepCheck(10), SleepCheck(10) + make_kbd_check(), make_sleep_check(10), + make_sleep_check(10), make_sleep_check(10) ])) assert_interrupted_run(runner) def test_kbd_interrupt_in_wait_with_limited_concurrency( - async_runner, make_cases, make_exec_ctx + async_runner, make_cases, make_sleep_check, make_exec_ctx ): # The general idea for this test is to allow enough time for all the # four checks to be submitted and at the same time we need the @@ -722,82 +758,86 @@ def test_kbd_interrupt_in_wait_with_limited_concurrency( runner, _ = async_runner with pytest.raises(KeyboardInterrupt): runner.runall(make_cases([ - KeyboardInterruptCheck(), SleepCheck(10), - SleepCheck(10), SleepCheck(10) + make_kbd_check(), make_sleep_check(10), + make_sleep_check(10), make_sleep_check(10) ])) assert_interrupted_run(runner) -def test_kbd_interrupt_in_setup_with_concurrency(async_runner, make_cases, - make_exec_ctx): +def test_kbd_interrupt_in_setup_with_concurrency( + async_runner, make_cases, make_sleep_check, make_exec_ctx +): make_exec_ctx(options=max_jobs_opts(4)) runner, _ = async_runner with pytest.raises(KeyboardInterrupt): runner.runall(make_cases([ - SleepCheck(1), SleepCheck(1), SleepCheck(1), - KeyboardInterruptCheck(phase='setup') + make_sleep_check(1), make_sleep_check(1), make_sleep_check(1), + make_kbd_check(phase='setup') ])) assert_interrupted_run(runner) def test_kbd_interrupt_in_setup_with_limited_concurrency( - async_runner, make_cases, make_exec_ctx + async_runner, make_sleep_check, make_cases, make_exec_ctx ): make_exec_ctx(options=max_jobs_opts(2)) runner, _ = async_runner with pytest.raises(KeyboardInterrupt): runner.runall(make_cases([ - SleepCheck(1), SleepCheck(1), SleepCheck(1), - KeyboardInterruptCheck(phase='setup') + make_sleep_check(1), make_sleep_check(1), make_sleep_check(1), + make_kbd_check(phase='setup') ])) assert_interrupted_run(runner) def test_run_complete_fails_main_loop(async_runner, make_cases, - make_exec_ctx): + make_sleep_check, make_exec_ctx): make_exec_ctx(options=max_jobs_opts(1)) runner, _ = async_runner num_checks = 3 - runner.runall(make_cases([SleepCheckPollFail(10), - SleepCheck(0.1), SleepCheckPollFail(10)])) + runner.runall(make_cases([make_sleep_check(10, poll_fail='early'), + make_sleep_check(0.1), + make_sleep_check(10, poll_fail='early')])) assert_runall(runner) stats = runner.stats assert stats.num_cases() == num_checks assert len(stats.failed()) == 2 - # Verify that the succeeded test is the SleepCheck + # Verify that the succeeded test is a SleepCheck for t in stats.tasks(): if not t.failed: - assert isinstance(t.check, SleepCheck) + assert t.check.name.startswith('SleepCheck') def test_run_complete_fails_busy_loop(async_runner, make_cases, - make_exec_ctx): + make_sleep_check, make_exec_ctx): make_exec_ctx(options=max_jobs_opts(1)) runner, _ = async_runner num_checks = 3 - runner.runall(make_cases([SleepCheckPollFailLate(1), - SleepCheck(0.1), SleepCheckPollFailLate(0.5)])) + runner.runall(make_cases([make_sleep_check(1, poll_fail='late'), + make_sleep_check(0.1), + make_sleep_check(0.5, poll_fail='late')])) assert_runall(runner) stats = runner.stats assert stats.num_cases() == num_checks assert len(stats.failed()) == 2 - # Verify that the succeeded test is the SleepCheck + # Verify that the succeeded test is a SleepCheck for t in stats.tasks(): if not t.failed: - assert isinstance(t.check, SleepCheck) + assert t.check.name.startswith('SleepCheck') def test_compile_fail_reschedule_main_loop(async_runner, make_cases, - make_exec_ctx): + make_sleep_check, make_exec_ctx): make_exec_ctx(options=max_jobs_opts(1)) runner, _ = async_runner num_checks = 2 - runner.runall(make_cases([SleepCheckPollFail(.1), CompileFailureCheck()])) + runner.runall(make_cases([make_sleep_check(.1, poll_fail='early'), + CompileFailureCheck()])) stats = runner.stats assert num_checks == stats.num_cases() @@ -806,12 +846,13 @@ def test_compile_fail_reschedule_main_loop(async_runner, make_cases, def test_compile_fail_reschedule_busy_loop(async_runner, make_cases, - make_exec_ctx): + make_sleep_check, make_exec_ctx): make_exec_ctx(options=max_jobs_opts(1)) runner, _ = async_runner num_checks = 2 runner.runall( - make_cases([SleepCheckPollFailLate(1.5), CompileFailureCheck()]) + make_cases([make_sleep_check(1.5, poll_fail='late'), + CompileFailureCheck()]) ) stats = runner.stats assert num_checks == stats.num_cases() From 789deef196ef2c547b22c7808da06f0c34c34957 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sat, 11 Dec 2021 00:49:29 +0100 Subject: [PATCH 17/62] Modernize reframe tests in unit tests --- .../resources/checks/bad/invalid_check.py | 5 +- unittests/resources/checks/emptycheck.py | 5 +- unittests/resources/checks/hellocheck.py | 41 ++-- unittests/resources/checks/hellocheck_make.py | 28 +-- .../checks_unlisted/deprecated_test.py | 11 +- .../resources/checks_unlisted/deps_complex.py | 128 +++++------ .../resources/checks_unlisted/deps_simple.py | 13 +- unittests/test_pipeline.py | 202 +++++++++--------- 8 files changed, 217 insertions(+), 216 deletions(-) diff --git a/unittests/resources/checks/bad/invalid_check.py b/unittests/resources/checks/bad/invalid_check.py index ec4a20c2f5..de42f35cd6 100644 --- a/unittests/resources/checks/bad/invalid_check.py +++ b/unittests/resources/checks/bad/invalid_check.py @@ -8,9 +8,8 @@ @rfm.simple_test class SomeTest(rfm.RegressionTest): - def __init__(self): - self.valid_systems = [] - self.valid_prog_environs = [] + valid_systems = [] + valid_prog_environs = [] class NotATest: diff --git a/unittests/resources/checks/emptycheck.py b/unittests/resources/checks/emptycheck.py index a57a4b2c44..00b0e7c864 100644 --- a/unittests/resources/checks/emptycheck.py +++ b/unittests/resources/checks/emptycheck.py @@ -8,6 +8,5 @@ @rfm.simple_test class EmptyTest(rfm.RegressionTest): - def __init__(self): - self.valid_systems = [] - self.valid_prog_environs = [] + valid_systems = [] + valid_prog_environs = [] diff --git a/unittests/resources/checks/hellocheck.py b/unittests/resources/checks/hellocheck.py index 3ecab4086f..e2da5c7929 100644 --- a/unittests/resources/checks/hellocheck.py +++ b/unittests/resources/checks/hellocheck.py @@ -8,29 +8,33 @@ @rfm.simple_test -class HelloTest(rfm.RegressionTest): - def __init__(self): - self.descr = 'C Hello World test' +class HelloTest(rfm.RegressionTest, pin_prefix=True): + descr = 'C Hello World test' - # All available systems are supported - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.sourcepath = 'hello.c' - self.tags = {'foo', 'bar'} - self.sanity_patterns = sn.assert_found(r'Hello, World\!', self.stdout) - self.maintainers = ['VK'] + # All available systems are supported + valid_systems = ['*'] + valid_prog_environs = ['*'] + sourcepath = 'hello.c' + tags = {'foo', 'bar'} + maintainers = ['VK'] + + @sanity_function + def validate(self): + return sn.assert_found(r'Hello, World\!', self.stdout) @rfm.simple_test class CompileOnlyHelloTest(rfm.CompileOnlyRegressionTest): - def __init__(self): - self.descr = 'Compile-only C Hello World test' + descr = 'Compile-only C Hello World test' + + # All available systems are supported + valid_systems = ['*'] + valid_prog_environs = ['*'] + sourcepath = 'hello.c' - # All available systems are supported - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.sourcepath = 'hello.c' - self.sanity_patterns = sn.assert_not_found(r'(?i)error', self.stdout) + @sanity_function + def validate(self): + return sn.assert_not_found(r'(?i)error', self.stdout) @rfm.simple_test @@ -40,5 +44,6 @@ class SkipTest(rfm.RunOnlyRegressionTest): valid_prog_environs = ['*'] sanity_patterns = sn.assert_true(1) - def __init__(self): + @run_after('init') + def foo(self): self.skip_if(True, 'unsupported') diff --git a/unittests/resources/checks/hellocheck_make.py b/unittests/resources/checks/hellocheck_make.py index 7dd8f96c15..a6ceab1254 100644 --- a/unittests/resources/checks/hellocheck_make.py +++ b/unittests/resources/checks/hellocheck_make.py @@ -9,17 +9,21 @@ @rfm.simple_test class HelloMakeTest(rfm.RegressionTest): - def __init__(self): - self.descr = 'C++ Hello World test' + descr = 'Makefile test' - # All available systems are supported - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.build_system = 'Make' + # All available systems are supported + valid_systems = ['*'] + valid_prog_environs = ['*'] + build_system = 'Make' + executable = './hello_c' + keep_files = ['hello_c'] + tags = {'foo', 'bar'} + maintainers = ['VK'] + + @run_before('compile') + def setflags(self): self.build_system.cflags = ['-O2'] - self.build_system.cxxflags = ['-O2'] - self.executable = './hello_c' - self.keep_files = ['hello_c'] - self.tags = {'foo', 'bar'} - self.sanity_patterns = sn.assert_found(r'Hello, World\!', self.stdout) - self.maintainers = ['VK'] + + @sanity_function + def validate(self): + return sn.assert_found(r'Hello, World\!', self.stdout) diff --git a/unittests/resources/checks_unlisted/deprecated_test.py b/unittests/resources/checks_unlisted/deprecated_test.py index 3297d30c16..9e9c6c3daa 100644 --- a/unittests/resources/checks_unlisted/deprecated_test.py +++ b/unittests/resources/checks_unlisted/deprecated_test.py @@ -6,12 +6,11 @@ @rfm.simple_test class DeprecatedTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.local = True - self.executable = 'echo hello' - self.sanity_patterns = sn.assert_found('hello', self.stdout) + valid_systems = ['*'] + valid_prog_environs = ['*'] + local = True + executable = 'echo' + sanity_patterns = sn.assert_true(1) @run_before('setup') def deprecation_warning(self): diff --git a/unittests/resources/checks_unlisted/deps_complex.py b/unittests/resources/checks_unlisted/deps_complex.py index b4c1617013..c5db09b40b 100644 --- a/unittests/resources/checks_unlisted/deps_complex.py +++ b/unittests/resources/checks_unlisted/deps_complex.py @@ -6,6 +6,7 @@ import os import reframe as rfm import reframe.utility.sanity as sn +import reframe.utility.typecheck as typ # # The following tests implement the dependency graph below: @@ -35,19 +36,19 @@ class BaseTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.sourcesdir = None - self.executable = 'echo' - self._count = int(type(self).__name__[1:]) - self.sanity_patterns = sn.defer(True) - self.keep_files = ['out.txt'] - - @property - @deferrable - def count(self): - return self._count + valid_systems = ['*'] + valid_prog_environs = ['*'] + sourcesdir = None + executable = 'echo' + keep_files = ['out.txt'] + count = variable(int) + deps = variable(typ.List[str], value=[]) + + @run_after('init') + def init_deps(self): + self.count = int(self.unique_name[1:]) + for d in self.deps: + self.depends_on(d) @run_before('run') def write_count(self): @@ -59,115 +60,119 @@ def write_count(self): @rfm.simple_test class T0(BaseTest): - pass + sanity_patterns = sn.assert_true(1) @rfm.simple_test class T1(BaseTest): - def __init__(self): - super().__init__() - self.depends_on('T4') - self.depends_on('T5') - self.sanity_patterns = sn.assert_eq(self.count, 14) + deps = ['T4', 'T5'] + + @sanity_function + def validate(self): + return sn.assert_eq(self.count, 14) @require_deps def prepend_output(self, T4, T5): with open(os.path.join(T4().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) with open(os.path.join(T5().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) @rfm.simple_test class T2(BaseTest): - def __init__(self): - super().__init__() - self.depends_on('T6') + deps = ['T6'] + @sanity_function + def validate(self): # Make this test fail on purpose: expected value is 31 normally - self.sanity_patterns = sn.assert_eq(self.count, 30) + return sn.assert_eq(self.count, 30) @require_deps def prepend_output(self, T6): with open(os.path.join(T6().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) @rfm.simple_test class T3(T2): - def __init__(self): - super().__init__() - self.sanity_patterns = sn.assert_eq(self.count, 32) + @sanity_function + def validate(self): + return sn.assert_eq(self.count, 32) @rfm.simple_test class T4(BaseTest): - def __init__(self): - super().__init__() - self.depends_on('T0') - self.sanity_patterns = sn.assert_eq(self.count, 4) + deps = ['T0'] + + @sanity_function + def validate(self): + return sn.assert_eq(self.count, 4) @require_deps def prepend_output(self, T0): with open(os.path.join(T0().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) @rfm.simple_test class T5(BaseTest): - def __init__(self): - super().__init__() - self.depends_on('T4') - self.sanity_patterns = sn.assert_eq(self.count, 9) + deps = ['T4'] + + @sanity_function + def validate(self): + return sn.assert_eq(self.count, 9) @require_deps def prepend_output(self, T4): with open(os.path.join(T4().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) @rfm.simple_test class T6(BaseTest): - def __init__(self): - super().__init__() - self.depends_on('T1') - self.depends_on('T5') - self.sanity_patterns = sn.assert_eq(self.count, 29) + deps = ['T1', 'T5'] + + @sanity_function + def validate(self): + return sn.assert_eq(self.count, 29) @require_deps def prepend_output(self, T1, T5): with open(os.path.join(T1().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) with open(os.path.join(T5().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) @rfm.simple_test class T7(BaseTest): - def __init__(self): - super().__init__() - self.depends_on('T2') - self.sanity_patterns = sn.assert_eq(self.count, 38) + deps = ['T2'] + + @sanity_function + def validate(self): + return sn.assert_eq(self.count, 38) @require_deps def prepend_output(self, T2): with open(os.path.join(T2().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) @rfm.simple_test class T8(BaseTest): - def __init__(self): - super().__init__() - self.depends_on('T1') - self.sanity_patterns = sn.assert_eq(self.count, 22) + deps = ['T1'] + + @sanity_function + def validate(self): + return sn.assert_eq(self.count, 22) @require_deps def prepend_output(self, T1): with open(os.path.join(T1().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) @run_after('setup') def fail(self): @@ -180,12 +185,13 @@ class T9(BaseTest): # This tests fails because of T8. It is added to make sure that # all tests are accounted for in the summary. - def __init__(self): - super().__init__() - self.depends_on('T8') - self.sanity_patterns = sn.assert_eq(self.count, 31) + deps = ['T8'] + + @sanity_function + def validate(self): + return sn.assert_eq(self.count, 31) @require_deps def prepend_output(self, T8): with open(os.path.join(T8().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) diff --git a/unittests/resources/checks_unlisted/deps_simple.py b/unittests/resources/checks_unlisted/deps_simple.py index c5b6d3e1ec..afd0953a68 100644 --- a/unittests/resources/checks_unlisted/deps_simple.py +++ b/unittests/resources/checks_unlisted/deps_simple.py @@ -10,12 +10,10 @@ @rfm.simple_test class Test0(rfm.RunOnlyRegressionTest): - def __init__(self): - self.valid_systems = ['sys0:p0', 'sys0:p1'] - self.valid_prog_environs = ['e0', 'e1'] - self.executable = 'echo' - self.executable_opts = [self.name] - self.sanity_patterns = sn.assert_found(self.name, self.stdout) + valid_systems = ['sys0:p0', 'sys0:p1'] + valid_prog_environs = ['e0', 'e1'] + executable = 'echo' + sanity_patterns = sn.assert_true(1) @rfm.simple_test @@ -23,7 +21,8 @@ class Test1(rfm.RunOnlyRegressionTest): kind = parameter(['default', 'fully', 'by_part', 'by_case', 'custom', 'any', 'all', 'nodeps']) - def __init__(self): + @run_after('init') + def setup_deps(self): def custom_deps(src, dst): return ( src[0] == 'p0' and diff --git a/unittests/test_pipeline.py b/unittests/test_pipeline.py index afe9c3a14d..5007676ed5 100644 --- a/unittests/test_pipeline.py +++ b/unittests/test_pipeline.py @@ -199,36 +199,22 @@ def test_hellocheck_build_remotely(hellotest, remote_exec_ctx): assert not hellotest.build_job.scheduler.is_local -def test_hellocheck_local_prepost_run(hellotest, local_exec_ctx): - @sn.deferrable - def stagedir(test): - return test.stagedir - - # Test also the prebuild/postbuild functionality - hellotest.prerun_cmds = ['echo prerun: `pwd`'] - hellotest.postrun_cmds = ['echo postrun: `pwd`'] - pre_run_path = sn.extractsingle(r'^prerun: (\S+)', hellotest.stdout, 1) - post_run_path = sn.extractsingle(r'^postrun: (\S+)', hellotest.stdout, 1) - hellotest.sanity_patterns = sn.all([ - sn.assert_eq(stagedir(hellotest), pre_run_path), - sn.assert_eq(stagedir(hellotest), post_run_path), - ]) - _run(hellotest, *local_exec_ctx) +def test_hellocheck_local_prepost_run(HelloTest, local_exec_ctx): + class _X(HelloTest): + # Test also the prebuild/postbuild functionality + prerun_cmds = ['echo prerun: `pwd`'] + postrun_cmds = ['echo postrun: `pwd`'] + @sanity_function + def validate(self): + pre_path = sn.extractsingle(r'^prerun: (\S+)', self.stdout, 1) + post_path = sn.extractsingle(r'^postrun: (\S+)', self.stdout, 1) + return sn.all([ + sn.assert_eq(self.stagedir, pre_path), + sn.assert_eq(self.stagedir, post_path), + ]) -def test_run_only_sanity(local_exec_ctx): - @test_util.custom_prefix('unittests/resources/checks') - class MyTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.executable = './hello.sh' - self.executable_opts = ['Hello, World!'] - self.local = True - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] - self.sanity_patterns = sn.assert_found( - r'Hello, World\!', self.stdout) - - _run(MyTest(), *local_exec_ctx) + _run(_X(), *local_exec_ctx) def test_run_only_set_sanity_in_a_hook(local_exec_ctx): @@ -243,7 +229,8 @@ class MyTest(rfm.RunOnlyRegressionTest): @run_after('run') def set_sanity(self): self.sanity_patterns = sn.assert_found( - r'Hello, World\!', self.stdout) + r'Hello, World\!', self.stdout + ) _run(MyTest(), *local_exec_ctx) @@ -274,12 +261,10 @@ class MyOtherTest(MyTest): def test_run_only_no_srcdir(local_exec_ctx): @test_util.custom_prefix('foo/bar/') class MyTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.executable = 'echo' - self.executable_opts = ['hello'] - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] - self.sanity_patterns = sn.assert_found(r'hello', self.stdout) + valid_systems = ['*'] + valid_prog_environs = ['*'] + executable = 'echo' + sanity_patterns = sn.assert_true(1) test = MyTest() assert test.sourcesdir is None @@ -312,10 +297,9 @@ class MyTest(rfm.RunOnlyRegressionTest): def test_compile_only_failure(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.CompileOnlyRegressionTest): - def __init__(self): - self.sourcepath = 'compiler_failure.c' - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] + sourcepath = 'compiler_failure.c' + valid_prog_environs = ['*'] + valid_systems = ['*'] test = MyTest() test.setup(*local_exec_ctx) @@ -327,13 +311,18 @@ def __init__(self): def test_compile_only_warning(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.CompileOnlyRegressionTest): - def __init__(self): - self.build_system = 'SingleSource' - self.build_system.srcfile = 'compiler_warning.c' + valid_prog_environs = ['*'] + valid_systems = ['*'] + build_system = 'SingleSource' + sourcepath = 'compiler_warning.c' + + @run_before('compile') + def setup_build(self): self.build_system.cflags = ['-Wall'] - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] - self.sanity_patterns = sn.assert_found(r'warning', self.stderr) + + @sanity_function + def validate(self): + return sn.assert_found(r'warning', self.stderr) _run(MyTest(), *local_exec_ctx) @@ -406,10 +395,9 @@ def test_supports_environ(hellotest, generic_system): def test_sourcesdir_none(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.RegressionTest): - def __init__(self): - self.sourcesdir = None - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] + sourcesdir = None + valid_prog_environs = ['*'] + valid_systems = ['*'] with pytest.raises(ReframeError): _run(MyTest(), *local_exec_ctx) @@ -418,14 +406,15 @@ def __init__(self): def test_sourcesdir_build_system(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.RegressionTest): - def __init__(self): - self.build_system = 'Make' - self.sourcepath = 'code' - self.executable = './code/hello' - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.sanity_patterns = sn.assert_found(r'Hello, World\!', - self.stdout) + build_system = 'Make' + sourcepath = 'code' + executable = './code/hello' + valid_systems = ['*'] + valid_prog_environs = ['*'] + + @sanity_function + def validate(self): + return sn.assert_found(r'Hello, World\!', self.stdout) _run(MyTest(), *local_exec_ctx) @@ -433,18 +422,19 @@ def __init__(self): def test_sourcesdir_none_generated_sources(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.RegressionTest): - def __init__(self): - self.sourcesdir = None - self.prebuild_cmds = [ - "printf '#include \\n int main(){ " - "printf(\"Hello, World!\\\\n\"); return 0; }' > hello.c" - ] - self.executable = './hello' - self.sourcepath = 'hello.c' - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.sanity_patterns = sn.assert_found(r'Hello, World\!', - self.stdout) + sourcesdir = None + prebuild_cmds = [ + "printf '#include \\n int main(){ " + "printf(\"Hello, World!\\\\n\"); return 0; }' > hello.c" + ] + executable = './hello' + sourcepath = 'hello.c' + valid_systems = ['*'] + valid_prog_environs = ['*'] + + @sanity_function + def validate(self): + return sn.assert_found(r'Hello, World\!', self.stdout) _run(MyTest(), *local_exec_ctx) @@ -452,10 +442,9 @@ def __init__(self): def test_sourcesdir_none_compile_only(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.CompileOnlyRegressionTest): - def __init__(self): - self.sourcesdir = None - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] + sourcesdir = None + valid_prog_environs = ['*'] + valid_systems = ['*'] with pytest.raises(BuildError): _run(MyTest(), *local_exec_ctx) @@ -464,14 +453,15 @@ def __init__(self): def test_sourcesdir_none_run_only(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.sourcesdir = None - self.executable = 'echo' - self.executable_opts = ["Hello, World!"] - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] - self.sanity_patterns = sn.assert_found(r'Hello, World\!', - self.stdout) + sourcesdir = None + executable = 'echo' + executable_opts = ['Hello, World!'] + valid_prog_environs = ['*'] + valid_systems = ['*'] + + @sanity_function + def validate(self): + return sn.assert_found(r'Hello, World\!', self.stdout) _run(MyTest(), *local_exec_ctx) @@ -479,9 +469,8 @@ def __init__(self): def test_sourcepath_abs(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.CompileOnlyRegressionTest): - def __init__(self): - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] + valid_prog_environs = ['*'] + valid_systems = ['*'] test = MyTest() test.setup(*local_exec_ctx) @@ -493,9 +482,8 @@ def __init__(self): def test_sourcepath_upref(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.CompileOnlyRegressionTest): - def __init__(self): - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] + valid_prog_environs = ['*'] + valid_systems = ['*'] test = MyTest() test.setup(*local_exec_ctx) @@ -507,9 +495,8 @@ def __init__(self): def test_sourcepath_non_existent(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.CompileOnlyRegressionTest): - def __init__(self): - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] + valid_prog_environs = ['*'] + valid_systems = ['*'] test = MyTest() test.setup(*local_exec_ctx) @@ -604,10 +591,7 @@ def __init__(self): def test_setup_hooks(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(HelloTest): - def __init__(self): - super().__init__() - self.executable = os.path.join('.', self.name) - self.count = 0 + count = variable(int, value=0) @run_before('setup') def prefoo(self): @@ -905,6 +889,9 @@ def setz(self, T0): assert t.z == 3 +# All the following tests about naming are for the deprecated +# @parameterized_test decorator + def test_regression_test_name(): class MyTest(rfm.RegressionTest): def __init__(self, a, b): @@ -930,8 +917,7 @@ def __init__(self, a, b): self.b = b test = MyTest('(a*b+c)/12', C(33)) - assert ('MyTest__a_b_c__12_C_33_' == - test.name) + assert 'MyTest__a_b_c__12_C_33_' == test.name def test_name_user_inheritance(): @@ -975,10 +961,9 @@ def test_trap_job_errors_without_sanity_patterns(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] - self.executable = 'exit 10' + valid_prog_environs = ['*'] + valid_systems = ['*'] + executable = 'exit 10' with pytest.raises(SanityError, match='job exited with exit code 10'): _run(MyTest(), *local_exec_ctx) @@ -989,12 +974,14 @@ def test_trap_job_errors_with_sanity_patterns(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] - self.prerun_cmds = ['echo hello'] - self.executable = 'true' - self.sanity_patterns = sn.assert_not_found(r'hello', self.stdout) + valid_prog_environs = ['*'] + valid_systems = ['*'] + prerun_cmds = ['echo hello'] + executable = 'true' + + @sanity_function + def validate(self): + return sn.assert_not_found(r'hello', self.stdout) with pytest.raises(SanityError): _run(MyTest(), *local_exec_ctx) @@ -1024,6 +1011,9 @@ def sanity_file(tmp_path): yield tmp_path / 'sanity.out' +# NOTE: The following series of tests test the `perf_patterns` syntax, so they +# should not change to the `@performance_function` syntax` + @pytest.fixture def dummytest(testsys_system, perf_file, sanity_file): class MyTest(rfm.RunOnlyRegressionTest): From 26106ab20d6de2d967479f6f9c3a10847dffabef Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sat, 11 Dec 2021 10:56:46 +0100 Subject: [PATCH 18/62] Use `RegressionTest.unique_name` instead of `name` --- reframe/core/fixtures.py | 4 +-- reframe/core/pipeline.py | 36 +++++++++---------- reframe/frontend/ci.py | 14 ++++---- reframe/frontend/cli.py | 8 ++--- reframe/frontend/dependencies.py | 14 ++++---- reframe/frontend/executors/__init__.py | 14 ++++---- reframe/frontend/executors/policies.py | 6 ++-- reframe/frontend/filters.py | 2 +- reframe/frontend/runreport.py | 30 ++++++++-------- reframe/frontend/statistics.py | 22 ++++++------ reframe/schemas/runreport.json | 9 +++-- unittests/test_dependencies.py | 10 +++--- unittests/test_filters.py | 48 +++++++++++--------------- unittests/test_loader.py | 24 +------------ unittests/test_policies.py | 32 ++++++----------- unittests/test_variables.py | 6 ++-- unittests/utility.py | 18 ++++++++++ 17 files changed, 138 insertions(+), 159 deletions(-) diff --git a/reframe/core/fixtures.py b/reframe/core/fixtures.py index 5e622e62d2..f92dfd8545 100644 --- a/reframe/core/fixtures.py +++ b/reframe/core/fixtures.py @@ -651,7 +651,7 @@ def _expand_partitions_envs(self, obj): part = tuple(obj.valid_systems) except AttributeError: raise ReframeSyntaxError( - f"'valid_systems' is undefined in test {obj.name}" + f"'valid_systems' is undefined in test {obj.unique_name!r}" ) else: rt = runtime.runtime() @@ -662,7 +662,7 @@ def _expand_partitions_envs(self, obj): prog_envs = tuple(obj.valid_prog_environs) except AttributeError: raise ReframeSyntaxError( - f"'valid_prog_environs' is undefined in test {obj.name}" + f"'valid_prog_environs' is undefined in test {obj.unique_name!r}" ) else: if '*' in prog_envs: diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 3162493454..7e2edf7aa9 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -270,7 +270,7 @@ def pipeline_hooks(cls): #: A detailed description of the test. #: #: :type: :class:`str` - #: :default: ``self.name`` + #: :default: ``self.display_name`` descr = variable(str) #: The path to the source file or source directory of the test. @@ -365,8 +365,9 @@ def pipeline_hooks(cls): #: The name of the executable to be launched during the run phase. #: #: If this variable is undefined when entering the compile pipeline - #: stage, it will be set to ``os.path.join('.', self.name)``. Classes - #: that override the compile stage may leave this variable undefined. + #: stage, it will be set to ``os.path.join('.', self.unique_name)``. + #: Classes that override the compile stage may leave this variable + #: undefined. #: #: :type: :class:`str` #: :default: :class:`required` @@ -1295,12 +1296,13 @@ def info(self): you use the :class:`RegressionTest`'s attributes, because this method may be called at any point of the test's lifetime. ''' - ret = self.name + + ret = self.display_name if self.current_partition: - ret += ' on %s' % self.current_partition.fullname + ret += f' @{self.current_partition.fullname}' if self.current_environ: - ret += ' using %s' % self.current_environ.name + ret += f'+{self.current_environ.name}' return ret @@ -1423,11 +1425,11 @@ def _setup_paths(self): runtime = rt.runtime() self._stagedir = runtime.make_stagedir( self.current_system.name, self._current_partition.name, - self._current_environ.name, self.name + self._current_environ.name, self.unique_name ) self._outputdir = runtime.make_outputdir( self.current_system.name, self._current_partition.name, - self._current_environ.name, self.name + self._current_environ.name, self.unique_name ) except OSError as e: raise PipelineError('failed to set up paths') from e @@ -1488,10 +1490,10 @@ def setup(self, partition, environ, **job_opts): self._current_environ = environ self._setup_paths() self._resolve_fixtures() - self._job = self._setup_job(f'rfm_{self.name}_job', + self._job = self._setup_job(f'rfm_{self.unique_name}_job', self.local, **job_opts) - self._build_job = self._setup_job(f'rfm_{self.name}_build', + self._build_job = self._setup_job(f'rfm_{self.unique_name}_build', self.local or self.build_locally, **job_opts) @@ -1559,7 +1561,7 @@ def compile(self): # Set executable (only if hasn't been provided) if not hasattr(self, 'executable'): - self.executable = os.path.join('.', self.name) + self.executable = os.path.join('.', self.unique_name) # Verify the sourcepath and determine the sourcepath in the stagedir if (os.path.isabs(self.sourcepath) or @@ -2263,7 +2265,7 @@ def getdep(self, target, environ=None, part=None): raise DependencyError('no test case is associated with this test') for d in self._case().deps: - mask = int(d.check.name == target) + mask = int(d.check.unique_name == target) mask |= (int(d.partition.name == part) | int(part == '*')) << 1 mask |= (int(d.environ.name == environ) | int(environ == '*')) << 2 if mask == 7: @@ -2313,8 +2315,7 @@ def skip_if_no_procinfo(self, msg=None): self.skip_if(not proc.info, msg) def __str__(self): - return "%s(name='%s', prefix='%s')" % (type(self).__name__, - self.name, self.prefix) + return f'{self.unique_name} [{self.display_name}]' def __eq__(self, other): if not isinstance(other, RegressionTest): @@ -2348,9 +2349,8 @@ def setup(self, partition, environ, **job_opts): self._current_partition = partition self._current_environ = environ self._setup_paths() - self._job = self._setup_job(f'rfm_{self.name}_job', - self.local, - **job_opts) + self._job = self._setup_job(f'rfm_{self.unique_name}_job', + self.local, **job_opts) self._resolve_fixtures() def compile(self): @@ -2406,7 +2406,7 @@ def setup(self, partition, environ, **job_opts): self._current_partition = partition self._current_environ = environ self._setup_paths() - self._build_job = self._setup_job(f'rfm_{self.name}_build', + self._build_job = self._setup_job(f'rfm_{self.unique_name}_build', self.local or self.build_locally, **job_opts) self._resolve_fixtures() diff --git a/reframe/frontend/ci.py b/reframe/frontend/ci.py index 1e01ec932c..49b3a4b3a2 100644 --- a/reframe/frontend/ci.py +++ b/reframe/frontend/ci.py @@ -27,10 +27,10 @@ def rfm_command(testcase): else: config_opt = '' - report_file = f'{testcase.check.name}-report.json' + report_file = f'{testcase.check.unique_name}-report.json' if testcase.level: restore_files = ','.join( - f'{t.check.name}-report.json' for t in tc.deps + f'{t.check.unique_name}-report.json' for t in tc.deps ) else: restore_files = None @@ -42,9 +42,9 @@ def rfm_command(testcase): f'-R' if recurse else '', f'--report-file={report_file}', f'--restore-session={restore_files}' if restore_files else '', - f'--report-junit={testcase.check.name}-report.xml', + f'--report-junit={testcase.check.unique_name}-report.xml', f'{"".join("-" + verbosity)}' if verbosity else '', - '-n', f"'^{testcase.check.name}$'", '-r' + '-n', f"'^{testcase.check.unique_name}$'", '-r' ]) max_level = 0 # We need the maximum level to generate the stages section @@ -63,13 +63,13 @@ def rfm_command(testcase): json['image'] = image_name for tc in testcases: - json[f'{tc.check.name}'] = { + json[f'{tc.check.unique_name}'] = { 'stage': f'rfm-stage-{tc.level}', 'script': [rfm_command(tc)], 'artifacts': { - 'paths': [f'{tc.check.name}-report.json'] + 'paths': [f'{tc.check.unique_name}-report.json'] }, - 'needs': [t.check.name for t in tc.deps] + 'needs': [t.check.unique_name for t in tc.deps] } max_level = max(max_level, tc.level) diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index 3024c16676..cded9c0788 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -85,7 +85,7 @@ def fmt_deps(): ), 'Dependencies (actual)': fmt_deps() } - lines = [f'- {check.name}:'] + lines = [f'- {check.unique_name}:'] for prop, val in check_info.items(): lines.append(f' {prop}:') if isinstance(val, dict): @@ -113,8 +113,8 @@ def list_checks(testcases, printer, detailed=False): # Collect dependencies per test deps = {} for t in testcases: - deps.setdefault(t.check.name, []) - deps[t.check.name].append((t, t.deps)) + deps.setdefault(t.check.unique_name, []) + deps[t.check.unique_name].append((t, t.deps)) checks = set( t.check for t in testcases if not t.check.is_fixture() or detailed @@ -138,7 +138,7 @@ def dep_lines(u, *, prefix, depth=0, lines=None, printed=None): adj = u.deps for v in adj: - if v.check.name not in printed: + if v.check.unique_name not in printed: dep_lines(v, prefix=prefix + 2*' ', depth=depth+1, lines=lines, printed=printed) diff --git a/reframe/frontend/dependencies.py b/reframe/frontend/dependencies.py index b713f859c4..48f93c7098 100644 --- a/reframe/frontend/dependencies.py +++ b/reframe/frontend/dependencies.py @@ -31,7 +31,7 @@ def build_index(cases): ret = {} for c in cases: - cname = c.check.name + cname = c.check.unique_name ret.setdefault(cname, []) ret[cname].append(c) @@ -130,11 +130,11 @@ def _reduce_deps(graph): '''Reduce test case graph to a test-only graph.''' ret = {} for case, deps in graph.items(): - test_deps = util.OrderedSet(d.check.name for d in deps) + test_deps = util.OrderedSet(d.check.unique_name for d in deps) try: - ret[case.check.name] |= test_deps + ret[case.check.unique_name] |= test_deps except KeyError: - ret[case.check.name] = test_deps + ret[case.check.unique_name] = test_deps return ret @@ -251,11 +251,11 @@ def visit(node, path): # Index test cases by test name cases_by_name = {} for c in graph.keys(): - c.level = levels[c.check.name] + c.level = levels[c.check.unique_name] try: - cases_by_name[c.check.name].append(c) + cases_by_name[c.check.unique_name].append(c) except KeyError: - cases_by_name[c.check.name] = [c] + cases_by_name[c.check.unique_name] = [c] return list(itertools.chain(*(retrieve(cases_by_name, n, []) for n in visited))) diff --git a/reframe/frontend/executors/__init__.py b/reframe/frontend/executors/__init__.py index 76b4d4a168..838448b47d 100644 --- a/reframe/frontend/executors/__init__.py +++ b/reframe/frontend/executors/__init__.py @@ -54,7 +54,7 @@ def __iter__(self): return iter([self._check, self._partition, self._environ]) def __hash__(self): - return (hash(self.check.name) ^ + return (hash(self.check.unique_name) ^ hash(self.partition.fullname) ^ hash(self.environ.name)) @@ -62,12 +62,12 @@ def __eq__(self, other): if not isinstance(other, type(self)): return NotImplemented - return (self.check.name == other.check.name and + return (self.check.unique_name == other.check.unique_name and self.environ.name == other.environ.name and self.partition.fullname == other.partition.fullname) def __repr__(self): - c, p, e = self.check.name, self.partition.fullname, self.environ.name + c, p, e = self.check.unique_name, self.partition.fullname, self.environ.name return f'({c!r}, {p!r}, {e!r})' @property @@ -420,7 +420,7 @@ def stats(self): return self._stats def runall(self, testcases, restored_cases=None): - num_checks = len({tc.check.name for tc in testcases}) + num_checks = len({tc.check.unique_name for tc in testcases}) self._printer.separator('short double line', 'Running %d check(s)' % num_checks) self._printer.timestamp('Started on', 'short double line') @@ -457,7 +457,7 @@ def _retry_failed(self, cases): rt = runtime.runtime() failures = self._stats.failed() while (failures and rt.current_run < self._max_retries): - num_failed_checks = len({tc.check.name for tc in failures}) + num_failed_checks = len({tc.check.unique_name for tc in failures}) rt.next_run() self._printer.separator( @@ -477,14 +477,14 @@ def _runall(self, testcases): def print_separator(check, prefix): self._printer.separator( 'short single line', - '%s %s (%s)' % (prefix, check.name, check.descr) + '%s %s (%s)' % (prefix, check.unique_name, check.descr) ) self._policy.enter() self._printer.reset_progress(len(testcases)) last_check = None for t in testcases: - if last_check is None or last_check.name != t.check.name: + if last_check is None or last_check.unique_name != t.check.unique_name: if last_check is not None: print_separator(last_check, 'finished processing') self._printer.info('') diff --git a/reframe/frontend/executors/policies.py b/reframe/frontend/executors/policies.py index f2200ffdea..96f35320f1 100644 --- a/reframe/frontend/executors/policies.py +++ b/reframe/frontend/executors/policies.py @@ -91,7 +91,7 @@ def runcase(self, case): self.printer.status( 'RUN', '%s on %s using %s' % - (check.name, partition.fullname, environ.name) + (check.unique_name, partition.fullname, environ.name) ) task = RegressionTask(case, self.task_listeners) self._task_index[case] = task @@ -381,7 +381,7 @@ def runcase(self, case): self.stats.add_task(task) self.printer.status( 'RUN', '%s on %s using %s' % - (check.name, partition.fullname, environ.name) + (check.unique_name, partition.fullname, environ.name) ) try: partname = partition.fullname @@ -389,7 +389,7 @@ def runcase(self, case): if not task.skipped and not task.failed: self.printer.status( 'DEP', '%s on %s using %s' % - (check.name, partname, environ.name), + (check.unique_name, partname, environ.name), just='right' ) self._waiting_tasks.append(task) diff --git a/reframe/frontend/filters.py b/reframe/frontend/filters.py index 0319251f56..5783f8d94a 100644 --- a/reframe/frontend/filters.py +++ b/reframe/frontend/filters.py @@ -19,7 +19,7 @@ def have_name(patt): regex = re_compile(patt) def _fn(case): - return regex.match(case.check.name) + return regex.match(case.check.unique_name) return _fn diff --git a/reframe/frontend/runreport.py b/reframe/frontend/runreport.py index a5f0d54f64..034a559dfb 100644 --- a/reframe/frontend/runreport.py +++ b/reframe/frontend/runreport.py @@ -15,7 +15,10 @@ import reframe.utility.jsonext as jsonext import reframe.utility.versioning as versioning -DATA_VERSION = '1.3.0' +# The schema data version +# Major version bumps are expected to break the validation of previous schemas + +DATA_VERSION = '2.0' _SCHEMA = os.path.join(rfm.INSTALL_PREFIX, 'reframe/schemas/runreport.json') @@ -31,12 +34,12 @@ def __init__(self, report): self._cases_index = {} for run in self._report['runs']: for tc in run['testcases']: - c, p, e = tc['name'], tc['system'], tc['environment'] + c, p, e = tc['unique_name'], tc['system'], tc['environment'] self._cases_index[c, p, e] = tc # Index also the restored cases for tc in self._report['restored_cases']: - c, p, e = tc['name'], tc['system'], tc['environment'] + c, p, e = tc['unique_name'], tc['system'], tc['environment'] self._cases_index[c, p, e] = tc def __getitem__(self, key): @@ -71,7 +74,7 @@ def slice(self, prop, when=None, unique=False): yield val def case(self, check, part, env): - c, p, e = check.name, part.fullname, env.name + c, p, e = check.unique_name, part.fullname, env.name ret = self._cases_index.get((c, p, e)) if ret is None: # Look up the case in the fallback reports @@ -151,18 +154,15 @@ def _load_report(filename): try: jsonschema.validate(report, schema) except jsonschema.ValidationError as e: - raise errors.ReframeError(f'invalid report {filename!r}') from e + try: + found_ver = report['session_info']['data_version'] + except KeyError: + found_ver = 'n/a' - # Check if the report data is compatible - found_ver = versioning.parse( - report['session_info']['data_version'] - ) - required_ver = versioning.parse(DATA_VERSION) - if found_ver.major != required_ver.major or found_ver < required_ver: raise errors.ReframeError( - f'incompatible report data versions: ' - f'found {found_ver}, required >= {required_ver}' - ) + f'invalid report {filename!r} ' + f'(required data version: {DATA_VERSION}), found: {found_ver})' + ) from e return _RunReport(report) @@ -202,7 +202,7 @@ def junit_xml_report(json_report): testsuite_properties = etree.SubElement(xml_testsuite, 'properties') for tc in rfm_run['testcases']: casename = ( - f"{tc['name']}[{tc['system']}, {tc['environment']}]" + f"{tc['unique_name']}[{tc['system']}, {tc['environment']}]" ) testcase = etree.SubElement( xml_testsuite, 'testcase', diff --git a/reframe/frontend/statistics.py b/reframe/frontend/statistics.py index 0349573b6d..746faf4310 100644 --- a/reframe/frontend/statistics.py +++ b/reframe/frontend/statistics.py @@ -70,7 +70,7 @@ def retry_report(self): environ_name = t.check.current_environ.name # Overwrite entry from previous run if available - messages[f"{t.check.name}:{partition_name}:{environ_name}"] = ( + messages[f"{t.check.unique_name}:{partition_name}:{environ_name}"] = ( f" * Test {t.check.info()} was retried {run} time(s) and " f"{'failed' if t.failed else 'passed'}." ) @@ -96,14 +96,14 @@ def json(self, force=False): 'build_stderr': None, 'build_stdout': None, 'dependencies_actual': [ - (d.check.name, d.partition.fullname, d.environ.name) + (d.check.unique_name, d.partition.fullname, d.environ.name) for d in t.testcase.deps ], 'dependencies_conceptual': [ d[0] for d in t.check.user_deps() ], 'description': check.descr, - 'prefix': check.prefix, + 'display_name': check.display_name, 'filename': inspect.getfile(type(check)), 'environment': None, 'fail_phase': None, @@ -116,6 +116,7 @@ def json(self, force=False): 'nodelist': [], 'outputdir': None, 'perfvars': None, + 'prefix': check.prefix, 'result': None, 'stagedir': check.stagedir, 'scheduler': None, @@ -126,7 +127,8 @@ def json(self, force=False): 'time_run': t.duration('run_complete'), 'time_sanity': t.duration('sanity'), 'time_setup': t.duration('setup'), - 'time_total': t.duration('total') + 'time_total': t.duration('total'), + 'unique_name': check.unique_name } # We take partition and environment from the test case and not @@ -213,7 +215,7 @@ def print_failure_report(self, printer): f'(for the last of {last_run} retries)' if last_run > 0 else '' ) printer.info(line_width * '-') - printer.info(f"FAILURE INFO for {r['name']} {retry_info}") + printer.info(f"FAILURE INFO for {r['unique_name']} {retry_info}") printer.info(f" * Test Description: {r['description']}") printer.info(f" * System partition: {r['system']}") printer.info(f" * Environment: {r['environment']}") @@ -230,7 +232,7 @@ def print_failure_report(self, printer): f"{r['dependencies_actual']}") printer.info(f" * Maintainers: {r['maintainers']}") printer.info(f" * Failing phase: {r['fail_phase']}") - printer.info(f" * Rerun with '-n {r['name']}" + printer.info(f" * Rerun with '-n {r['unique_name']}" f" -p {r['environment']} --system {r['system']} -r'") printer.info(f" * Reason: {r['fail_reason']}") @@ -251,7 +253,7 @@ def print_failure_stats(self, printer): partfullname = partition.fullname if partition else 'None' environ_name = (check.current_environ.name if check.current_environ else 'None') - f = f'[{check.name}, {environ_name}, {partfullname}]' + f = f'[{check.unique_name}, {environ_name}, {partfullname}]' if tf.failed_stage not in failures: failures[tf.failed_stage] = [] @@ -297,10 +299,10 @@ def performance_report(self): previous_part = '' for t in self.tasks(): if t.check.perfvalues.keys(): - if t.check.name != previous_name: + if t.check.unique_name != previous_name: report_body.append(line_width * '-') - report_body.append(t.check.name) - previous_name = t.check.name + report_body.append(t.check.unique_name) + previous_name = t.check.unique_name if t.check.current_partition.fullname != previous_part: report_body.append( diff --git a/reframe/schemas/runreport.json b/reframe/schemas/runreport.json index 6afe98223d..6575b65609 100644 --- a/reframe/schemas/runreport.json +++ b/reframe/schemas/runreport.json @@ -22,6 +22,7 @@ "items": {"type": "string"} }, "description": {"type": "string"}, + "display_name": {"type": "string"}, "environment": {"type": ["string", "null"]}, "fail_info": { "type": ["object", "null"], @@ -94,10 +95,12 @@ "time_run": {"type": ["number", "null"]}, "time_sanity": {"type": ["number", "null"]}, "time_setup": {"type": ["number", "null"]}, - "time_total": {"type": ["number", "null"]} + "time_total": {"type": ["number", "null"]}, + "unique_name": {"type": "string"} }, "required": [ - "environment", "name", "result", "system", "filename" + "environment", "fail_phase", "fail_reason", "filename", + "result", "stagedir", "system", "time_total", "unique_name" ] } }, @@ -122,7 +125,7 @@ "version": {"type": "string"}, "workdir": {"type": "string"} }, - "required": ["data_version"] + "required": ["data_version", "hostname", "time_elapsed", "time_start"] }, "restored_cases": { "type": "array", diff --git a/unittests/test_dependencies.py b/unittests/test_dependencies.py index 54512dc8ea..3f938a42e6 100644 --- a/unittests/test_dependencies.py +++ b/unittests/test_dependencies.py @@ -320,12 +320,10 @@ def test_dependecies_how_functions_undoc(): def test_build_deps_deprecated_syntax(loader, default_exec_ctx): class Test0(rfm.RegressionTest): - def __init__(self): - self.valid_systems = ['sys0:p0', 'sys0:p1'] - self.valid_prog_environs = ['e0', 'e1'] - self.executable = 'echo' - self.executable_opts = [self.name] - self.sanity_patterns = sn.assert_found(self.name, self.stdout) + valid_systems = ['sys0:p0', 'sys0:p1'] + valid_prog_environs = ['e0', 'e1'] + executable = 'echo' + sanity_patterns = sn.assert_true(1) class Test1_deprecated(rfm.RunOnlyRegressionTest): kind = parameter([rfm.DEPEND_FULLY, diff --git a/unittests/test_filters.py b/unittests/test_filters.py index 1036f5dc34..71701b6eb7 100644 --- a/unittests/test_filters.py +++ b/unittests/test_filters.py @@ -5,50 +5,42 @@ import pytest +import reframe as rfm import reframe.core.exceptions as errors import reframe.frontend.executors as executors import reframe.frontend.filters as filters import reframe.utility.sanity as sn +import unittests.utility as test_util def count_checks(filter_fn, checks): return sn.count(filter(filter_fn, checks)) -def make_case(attrs): - class _MyTest: - def __init__(self): - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - - test = _MyTest() - for k, v in attrs.items(): - setattr(test, k, v) - +def make_case(*args, **kwargs): + test = test_util.make_check(*args, **kwargs) return executors.TestCase(test, None, None) @pytest.fixture def sample_cases(): + class _X(rfm.RegressionTest): + valid_systems = ['*'] + valid_prog_environs = ['*'] + return [ - make_case({ - 'name': 'check1', - 'tags': {'a', 'b', 'c', 'd'}, - 'num_gpus_per_node': 1, - 'maintainers': {'A', 'B', 'C', 'D'} - }), - make_case({ - 'name': 'check2', - 'tags': {'x', 'y', 'z'}, - 'num_gpus_per_node': 0, - 'maintainers': {'X', 'Y', 'Z'} - }), - make_case({ - 'name': 'check3', - 'tags': {'a', 'z'}, - 'num_gpus_per_node': 1, - 'maintainers': {'A', 'Z'} - }) + make_case(_X, alt_name='check1', + tags={'a', 'b', 'c', 'd'}, + num_gpus_per_node=1, + maintainers=['A', 'B', 'C', 'D']), + make_case(_X, alt_name='check2', + tags={'x', 'y', 'z'}, + num_gpus_per_node=0, + maintainers=['X', 'Y', 'Z']), + make_case(_X, alt_name='check3', + tags={'a', 'z'}, + num_gpus_per_node=1, + maintainers=['A', 'Z']) ] diff --git a/unittests/test_loader.py b/unittests/test_loader.py index 286548ed5f..ecb774e300 100644 --- a/unittests/test_loader.py +++ b/unittests/test_loader.py @@ -94,56 +94,34 @@ def setup(self, partition, environ, **job_opts): @rfm.simple_test class TestSimple(rfm.RegressionTest): - def __init__(self): - pass + pass @rfm.simple_test class TestSpecial(rfm.RegressionTest, special=True): - def __init__(self): - pass - def setup(self, partition, environ, **job_opts): super().setup(partition, environ, **job_opts) @rfm.simple_test class TestSpecialRunOnly(rfm.RunOnlyRegressionTest, special=True): - def __init__(self): - pass - def setup(self, partition, environ, **job_opts): super().setup(partition, environ, **job_opts) - def run(self): - super().run() - @rfm.simple_test class TestSpecialCompileOnly(rfm.CompileOnlyRegressionTest, special=True): - def __init__(self): - pass - def setup(self, partition, environ, **job_opts): super().setup(partition, environ, **job_opts) - def run(self): - super().run() - with pytest.raises(ReframeSyntaxError): @rfm.simple_test class TestSpecialDerived(TestSpecial): - def __init__(self): - pass - def setup(self, partition, environ, **job_opts): super().setup(partition, environ, **job_opts) with pytest.warns(ReframeDeprecationWarning): @rfm.simple_test class TestFinal(rfm.RegressionTest): - def __init__(self): - pass - @rfm.final def my_new_final(self): pass diff --git a/unittests/test_policies.py b/unittests/test_policies.py index 4ba5580107..9f0348af64 100644 --- a/unittests/test_policies.py +++ b/unittests/test_policies.py @@ -79,34 +79,24 @@ def testsys_exec_ctx(make_exec_ctx_g): yield from make_exec_ctx_g(system='testsys:gpu') -def make_check(cls, *, alt_name=None, **vars): - if alt_name: - cls = rfm.make_test(alt_name, (cls,), {}) - - for k, v in vars.items(): - cls.setvar(k, v) - - return cls() - - def make_kbd_check(phase='wait'): - return make_check(KeyboardInterruptCheck, phase=phase) + return test_util.make_check(KeyboardInterruptCheck, phase=phase) @pytest.fixture def make_sleep_check(): test_id = 0 - def _make_check(sleep_time, poll_fail=None): + def _do_make_check(sleep_time, poll_fail=None): nonlocal test_id - test = make_check(SleepCheck, - sleep_time=sleep_time, - poll_fail=poll_fail, - alt_name=f'SleepCheck_{test_id}') + test = test_util.make_check(SleepCheck, + sleep_time=sleep_time, + poll_fail=poll_fail, + alt_name=f'SleepCheck_{test_id}') test_id += 1 return test - return _make_check + return _do_make_check @pytest.fixture(params=[policies.SerialExecutionPolicy, @@ -289,13 +279,13 @@ def test_runall(make_runner, make_cases, common_exec_ctx, tmp_path): with pytest.raises(ReframeError, match=r'is not a valid JSON file'): runreport.load_report(tmp_path / 'invalid.json') - # Generate a report with an incorrect data version - report['session_info']['data_version'] = '10.0.0' + # Generate a report that does not comply to the schema + del report['session_info']['hostname'] with open(tmp_path / 'invalid-version.json', 'w') as fp: jsonext.dump(report, fp) with pytest.raises(ReframeError, - match=r'incompatible report data versions'): + match=r'invalid report'): runreport.load_report(tmp_path / 'invalid-version.json') @@ -477,7 +467,7 @@ def test_pass_in_retries(make_runner, make_cases, tmp_path, common_exec_ctx): tmpfile.write_text('0\n') runner = make_runner(max_retries=3) runner.runall(make_cases([ - make_check(RetriesCheck, filename=str(tmpfile), num_runs=2) + test_util.make_check(RetriesCheck, filename=str(tmpfile), num_runs=2) ])) # Ensure that the test passed after retries in run `pass_run_no` diff --git a/unittests/test_variables.py b/unittests/test_variables.py index 6caf02a922..7efe1af9af 100644 --- a/unittests/test_variables.py +++ b/unittests/test_variables.py @@ -155,7 +155,8 @@ def test_require_var(OneVarTest): class MyTest(OneVarTest): foo = required - def __init__(self): + @run_after('init') + def print_foo(self): print(self.foo) with pytest.raises(AttributeError): @@ -166,9 +167,6 @@ def test_required_var_not_present(OneVarTest): class MyTest(OneVarTest): foo = required - def __init__(self): - pass - MyTest() diff --git a/unittests/utility.py b/unittests/utility.py index a8609dd0d3..823e0791f3 100644 --- a/unittests/utility.py +++ b/unittests/utility.py @@ -12,6 +12,7 @@ import os import sys +import reframe as rfm import reframe.core.config as config import reframe.core.modules as modules import reframe.core.runtime as rt @@ -159,3 +160,20 @@ def _wrapped(*args, **kwargs): return _wrapped return _dispatch_deco + + +def make_check(cls, *, alt_name=None, **vars): + '''Create a new test from class `cls`. + + :arg cls: the class of the test. + 'arg alt_name: an alternative name to be given to the test class + :arg vars: variables to set in the test upon creation + ''' + + if alt_name: + cls = rfm.make_test(alt_name, (cls,), {}) + + for k, v in vars.items(): + cls.setvar(k, v) + + return cls() From bc65812a18741f7901dbca9426ec8daba5e73a26 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sat, 11 Dec 2021 23:40:41 +0100 Subject: [PATCH 19/62] New way for listing tests + support for listing concretizations --- reframe/frontend/cli.py | 64 ++++++++++++++++++++++++++++++++++------- unittests/test_cli.py | 22 ++++++++++++++ 2 files changed, 76 insertions(+), 10 deletions(-) diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index cded9c0788..b901b22ec7 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -125,7 +125,7 @@ def list_checks(testcases, printer, detailed=False): printer.info(f'Found {len(checks)} check(s)\n') -def list_checks2(testcases, printer, detailed=False): +def list_checks2(testcases, printer, detailed=False, concretized=False): printer.info('[List of matched checks]') unique_checks = set() @@ -138,7 +138,8 @@ def dep_lines(u, *, prefix, depth=0, lines=None, printed=None): adj = u.deps for v in adj: - if v.check.unique_name not in printed: + if concretized or (not concretized and + v.check.unique_name not in printed): dep_lines(v, prefix=prefix + 2*' ', depth=depth+1, lines=lines, printed=printed) @@ -147,8 +148,17 @@ def dep_lines(u, *, prefix, depth=0, lines=None, printed=None): unique_checks.add(v.check.unique_name) if depth: + tc_info = '' + details = '' + if concretized: + tc_info = f' @{u.partition.fullname}+{u.environ.name}' + + location = inspect.getfile(type(u.check)) + if detailed: + details = f' [id: {u.check.unique_name}, file: {location!r}]' + lines.append( - f'{prefix}^{u.check.display_name} [{u.check.unique_name}]' + f'{prefix}^{u.check.display_name}{tc_info}{details}' ) return lines @@ -156,14 +166,44 @@ def dep_lines(u, *, prefix, depth=0, lines=None, printed=None): # We need the leaf test cases to be printed at the leftmost testcases = list(t for t in testcases if t.in_degree == 0) for t in testcases: - printer.info(f'- {t.check.display_name} [{t.check.unique_name}]') + tc_info = '' + details = '' + if concretized: + tc_info = f' @{t.partition.fullname}+{t.environ.name}' + + location = inspect.getfile(type(t.check)) + if detailed: + details = f' [id: {t.check.unique_name}, file: {location!r}]' + + # if not concretized and t.check.name not in unique_checks: + if concretized or (not concretized and + t.check.unique_name not in unique_checks): + printer.info(f'- {t.check.display_name}{tc_info}{details}') + if not t.check.is_fixture(): unique_checks.add(t.check.unique_name) for l in reversed(dep_lines(t, prefix=' ')): printer.info(l) - printer.info(f'Found {len(unique_checks)} check(s)\n') + if concretized: + printer.info(f'Concretized {len(testcases)} test case(s)\n') + else: + printer.info(f'Found {len(unique_checks)} check(s)\n') + + +def describe_checks(testcases, printer): + checks = [] + unique_names = set() + for tc in testcases: + if tc.check.is_fixture(): + continue + + if tc.check.name not in unique_names: + checks.append(tc.check) + unique_names.add(tc.check.name) + + printer.info(jsonext.dumps(checks, indent=2)) def list_tags(testcases, printer): @@ -344,12 +384,13 @@ def main(): 'for the selected tests and exit'), ) action_options.add_argument( - '-L', '--list-detailed', action='store_true', - help='List the selected checks providing details for each test' + '-L', '--list-detailed', nargs='?', const='T', choices=['C', 'T'], + help=('List the selected tests (T) or the concretized test cases (C) ' + 'providing more details') ) action_options.add_argument( - '-l', '--list', action='store_true', - help='List the selected checks' + '-l', '--list', nargs='?', const='T', choices=['C', 'T'], + help='List the selected tests (T) or the concretized test cases (C)' ) action_options.add_argument( '--list-tags', action='store_true', @@ -970,7 +1011,10 @@ def _case_failed(t): # Act on checks if options.list or options.list_detailed: - list_checks2(testcases, printer, options.list_detailed) + concretized = (options.list == 'C' or + options.list_detailed == 'C') + detailed = options.list_detailed is not None + list_checks2(testcases, printer, detailed, concretized) sys.exit(0) if options.list_tags: diff --git a/unittests/test_cli.py b/unittests/test_cli.py index c7ee1039fd..2b293514dc 100644 --- a/unittests/test_cli.py +++ b/unittests/test_cli.py @@ -90,6 +90,10 @@ def _run_reframe(system='generic:default', argv += ['-l'] elif action == 'list_detailed': argv += ['-L'] + elif action == 'list_concretized': + argv += ['-lC'] + elif action == 'list_detailed_concretized': + argv += ['-LC'] elif action == 'list_tags': argv += ['--list-tags'] elif action == 'help': @@ -539,6 +543,24 @@ def test_list_with_details(run_reframe): assert returncode == 0 +def test_list_concretized(run_reframe): + returncode, stdout, stderr = run_reframe( + checkpath=['unittests/resources/checks/frontend_checks.py'], + action='list_concretized' + ) + assert 'Traceback' not in stdout + assert 'Traceback' not in stderr + assert returncode == 0 + + returncode, stdout, stderr = run_reframe( + checkpath=['unittests/resources/checks/frontend_checks.py'], + action='list_detailed_concretized' + ) + assert 'Traceback' not in stdout + assert 'Traceback' not in stderr + assert returncode == 0 + + def test_list_tags(run_reframe): returncode, stdout, stderr = run_reframe( checkpath=['unittests/resources/checks/hellocheck.py', From 7f6b30a89c3473b6dcdf974324cc175d1c259f42 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sun, 12 Dec 2021 22:54:56 +0100 Subject: [PATCH 20/62] Add a `--describe` option to get details of tests --- reframe/core/logging.py | 4 + reframe/frontend/cli.py | 174 +++++++++++++++++------------------- reframe/frontend/printer.py | 8 ++ 3 files changed, 92 insertions(+), 94 deletions(-) diff --git a/reframe/core/logging.py b/reframe/core/logging.py index 44187c8e4d..5b0357b9e9 100644 --- a/reframe/core/logging.py +++ b/reframe/core/logging.py @@ -470,6 +470,10 @@ def __init__(self, name, level=logging.NOTSET): def setLevel(self, level): self.level = _check_level(level) + # Clear the internal cache of the base logger, otherwise the logger + # will remain disabled if its level is raised and then lowered again + self._cache.clear() + def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None): record = super().makeRecord(name, level, fn, lno, msg, args, exc_info, diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index b901b22ec7..d599d0c60a 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -36,69 +36,6 @@ from reframe.frontend.executors import Runner, generate_testcases -def format_check(check, check_deps, detailed=False): - def fmt_list(x): - if not x: - return '' - - return ', '.join(x) - - def fmt_deps(): - no_deps = True - lines = [] - for t, deps in check_deps: - for d in deps: - lines.append(f'- {t} -> {d}') - - if lines: - return '\n '.join(lines) - else: - return '' - - location = inspect.getfile(type(check)) - if not detailed: - return f'- {check.unique_name} [{check.display_name}]' - - if check.num_tasks > 0: - node_alloc_scheme = (f'standard ({check.num_tasks} task(s) -- ' - f'may be set differently in hooks)') - elif check.num_tasks == 0: - node_alloc_scheme = 'flexible' - else: - node_alloc_scheme = f'flexible (minimum {-check.num_tasks} task(s))' - - check_info = { - 'Description': check.descr, - 'Environment modules': fmt_list(check.modules), - 'Location': location, - 'Maintainers': fmt_list(check.maintainers), - 'Node allocation': node_alloc_scheme, - 'Pipeline hooks': { - k: fmt_list(fn.__name__ for fn in v) - for k, v in check.pipeline_hooks().items() - }, - 'Tags': fmt_list(check.tags), - 'Valid environments': fmt_list(check.valid_prog_environs), - 'Valid systems': fmt_list(check.valid_systems), - 'Dependencies (conceptual)': fmt_list( - [d[0] for d in check.user_deps()] - ), - 'Dependencies (actual)': fmt_deps() - } - lines = [f'- {check.unique_name}:'] - for prop, val in check_info.items(): - lines.append(f' {prop}:') - if isinstance(val, dict): - for k, v in val.items(): - lines.append(f' - {k}: {v}') - else: - lines.append(f' {val}') - - lines.append('') - - return '\n'.join(lines) - - def format_env(envvars): ret = '[ReFrame Environment]\n' notset = '' @@ -107,25 +44,7 @@ def format_env(envvars): return ret -def list_checks(testcases, printer, detailed=False): - printer.info('[List of matched checks]') - - # Collect dependencies per test - deps = {} - for t in testcases: - deps.setdefault(t.check.unique_name, []) - deps[t.check.unique_name].append((t, t.deps)) - - checks = set( - t.check for t in testcases if not t.check.is_fixture() or detailed - ) - printer.info( - '\n'.join(format_check(c, deps[c.name], detailed) for c in checks) - ) - printer.info(f'Found {len(checks)} check(s)\n') - - -def list_checks2(testcases, printer, detailed=False, concretized=False): +def list_checks(testcases, printer, detailed=False, concretized=False): printer.info('[List of matched checks]') unique_checks = set() @@ -193,17 +112,46 @@ def dep_lines(u, *, prefix, depth=0, lines=None, printed=None): def describe_checks(testcases, printer): - checks = [] + records = [] unique_names = set() for tc in testcases: if tc.check.is_fixture(): continue if tc.check.name not in unique_names: - checks.append(tc.check) unique_names.add(tc.check.name) + rec = json.loads(jsonext.dumps(tc.check)) + + # Now manipulate the record to be more user-friendly + # + # 1. Add other fields that are relevant for users + # 2. Remove all private fields + rec['unique_name'] = tc.check.unique_name + rec['display_name'] = tc.check.display_name + rec['pipeline_hooks'] = {} + rec['perf_variables'] = list(rec['perf_variables'].keys()) + for stage, hooks in tc.check.pipeline_hooks().items(): + for hk in hooks: + rec['pipeline_hooks'].setdefault(stage, []) + rec['pipeline_hooks'][stage].append(hk.__name__) - printer.info(jsonext.dumps(checks, indent=2)) + for attr in list(rec.keys()): + if attr == '__rfm_class__' or attr == '__rfm_file__': + continue + + if attr.startswith('_'): + del rec[attr] + + # List all required variables + required = [] + for var in tc.check._rfm_var_space: + if not tc.check._rfm_var_space[var].is_defined(): + required.append(var) + + rec['__rfm_required__'] = required + records.append(dict(sorted(rec.items()))) + + printer.info(jsonext.dumps(records, indent=2)) def list_tags(testcases, printer): @@ -383,6 +331,11 @@ def main(): help=('Generate into FILE a Gitlab CI pipeline ' 'for the selected tests and exit'), ) + + action_options.add_argument( + '--describe', action='store_true', + help='Give full details on the selected tests' + ) action_options.add_argument( '-L', '--list-detailed', nargs='?', const='T', choices=['C', 'T'], help=('List the selected tests (T) or the concretized test cases (C) ' @@ -631,6 +584,25 @@ def main(): help='Use a login shell for job scripts' ) + def restrict_logging(): + '''Restrict logging to errors only. + + This is done when specific options are passed, which generate JSON + output and we don't want to pollute the output with other logging + output. + + :returns: :obj:`True` if the logging was restricted, :obj:`False` + otherwise. + + ''' + + if (options.show_config or + options.detect_host_topology or options.describe): + logging.getlogger().setLevel(logging.ERROR) + return True + else: + return False + # Parse command line options = argparser.parse_args() if len(sys.argv) == 1: @@ -646,10 +618,11 @@ def main(): site_config.select_subconfig('generic') options.update_config(site_config) logging.configure_logging(site_config) - logging.getlogger().colorize = site_config.get('general/0/colorize') printer = PrettyPrinter() printer.colorize = site_config.get('general/0/colorize') - printer.inc_verbosity(site_config.get('general/0/verbose')) + if not restrict_logging(): + printer.inc_verbosity(site_config.get('general/0/verbose')) + if os.getenv('RFM_GRAYLOG_SERVER'): printer.warning( 'RFM_GRAYLOG_SERVER environment variable is deprecated; ' @@ -719,9 +692,10 @@ def main(): printer.error(logfiles_message()) sys.exit(1) - logging.getlogger().colorize = site_config.get('general/0/colorize') printer.colorize = site_config.get('general/0/colorize') - printer.inc_verbosity(site_config.get('general/0/verbose')) + if not restrict_logging(): + printer.inc_verbosity(site_config.get('general/0/verbose')) + try: printer.debug('Initializing runtime') runtime.init_runtime(site_config) @@ -761,6 +735,8 @@ def main(): # Show configuration after everything is set up if options.show_config: + # Restore logging level + printer.setLevel(logging.INFO) config_param = options.show_config if config_param == 'all': printer.info(str(rt.site_config)) @@ -778,14 +754,17 @@ def main(): if options.detect_host_topology: from reframe.utility.cpuinfo import cpuinfo + s_cpuinfo = cpuinfo() + + # Restore logging level + printer.setLevel(logging.INFO) topofile = options.detect_host_topology if topofile == '-': - json.dump(cpuinfo(), sys.stdout, indent=2) - sys.stdout.write('\n') + printer.info(json.dumps(s_cpuinfo, indent=2)) else: try: with open(topofile, 'w') as fp: - json.dump(cpuinfo(), fp, indent=2) + json.dump(s_cpuinfo, fp, indent=2) fp.write('\n') except OSError as e: getlogger().error( @@ -1010,11 +989,17 @@ def _case_failed(t): tc.check.disable_hook(h) # Act on checks + if options.describe: + # Restore logging level + printer.setLevel(logging.INFO) + describe_checks(testcases, printer) + sys.exit(0) + if options.list or options.list_detailed: concretized = (options.list == 'C' or options.list_detailed == 'C') detailed = options.list_detailed is not None - list_checks2(testcases, printer, detailed, concretized) + list_checks(testcases, printer, detailed, concretized) sys.exit(0) if options.list_tags: @@ -1284,4 +1269,5 @@ def module_unuse(*paths): printer.error(f'could not save log file: {e}') sys.exit(1) finally: - printer.info(logfiles_message()) + if not restrict_logging(): + printer.info(logfiles_message()) diff --git a/reframe/frontend/printer.py b/reframe/frontend/printer.py index 986563c8da..264e355dbe 100644 --- a/reframe/frontend/printer.py +++ b/reframe/frontend/printer.py @@ -75,3 +75,11 @@ def timestamp(self, msg='', separator=None): def __getattr__(self, attr): # delegate all other attribute lookup to the underlying logger return getattr(logging.getlogger(), attr) + + def __setattr__(self, attr, value): + # Delegate colorize setting to the backend logger + if attr == 'colorize': + logging.getlogger().colorize = value + self.__dict__['colorize'] = value + else: + super().__setattr__(attr, value) From 8d06cf8688c5301db408751246bb6f2adc2e01a5 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Tue, 14 Dec 2021 00:35:43 +0100 Subject: [PATCH 21/62] Fix unit tests for Python 3.6 --- reframe/core/logging.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/reframe/core/logging.py b/reframe/core/logging.py index 5b0357b9e9..42b9da5a75 100644 --- a/reframe/core/logging.py +++ b/reframe/core/logging.py @@ -470,9 +470,11 @@ def __init__(self, name, level=logging.NOTSET): def setLevel(self, level): self.level = _check_level(level) - # Clear the internal cache of the base logger, otherwise the logger - # will remain disabled if its level is raised and then lowered again - self._cache.clear() + if sys.version_info[:2] >= (3, 7): + # Clear the internal cache of the base logger, otherwise the + # logger will remain disabled if its level is raised and then + # lowered again + self._cache.clear() def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None): From 609dbc16b2fd5656b7f209c98ad3303d49072d59 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Tue, 14 Dec 2021 22:43:40 +0100 Subject: [PATCH 22/62] Fine tune failure and performance report --- reframe/frontend/statistics.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/reframe/frontend/statistics.py b/reframe/frontend/statistics.py index 746faf4310..cb086e80b2 100644 --- a/reframe/frontend/statistics.py +++ b/reframe/frontend/statistics.py @@ -216,7 +216,8 @@ def print_failure_report(self, printer): ) printer.info(line_width * '-') printer.info(f"FAILURE INFO for {r['unique_name']} {retry_info}") - printer.info(f" * Test Description: {r['description']}") + printer.info(f" * Expanded name: {r['display_name']}") + printer.info(f" * Description: {r['description']}") printer.info(f" * System partition: {r['system']}") printer.info(f" * Environment: {r['environment']}") printer.info(f" * Stage directory: {r['stagedir']}") @@ -301,7 +302,7 @@ def performance_report(self): if t.check.perfvalues.keys(): if t.check.unique_name != previous_name: report_body.append(line_width * '-') - report_body.append(t.check.unique_name) + report_body.append(t.check.display_name) previous_name = t.check.unique_name if t.check.current_partition.fullname != previous_part: From 63e7dd5cd76a2e74692d9f987d90f9cde291817a Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Tue, 14 Dec 2021 23:27:36 +0100 Subject: [PATCH 23/62] Update `--describe` output --- reframe/frontend/cli.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index d599d0c60a..9f0d355b1f 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -136,10 +136,13 @@ def describe_checks(testcases, printer): rec['pipeline_hooks'][stage].append(hk.__name__) for attr in list(rec.keys()): - if attr == '__rfm_class__' or attr == '__rfm_file__': - continue - - if attr.startswith('_'): + if attr == '__rfm_class__': + rec['@class'] = rec[attr] + del rec[attr] + elif attr == '__rfm_file__': + rec['@file'] = rec[attr] + del rec[attr] + elif attr.startswith('_'): del rec[attr] # List all required variables @@ -148,7 +151,7 @@ def describe_checks(testcases, printer): if not tc.check._rfm_var_space[var].is_defined(): required.append(var) - rec['__rfm_required__'] = required + rec['@required'] = required records.append(dict(sorted(rec.items()))) printer.info(jsonext.dumps(records, indent=2)) From 7ad10e10f08f2fd8b538ea20b9cb53b889640f06 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 15 Dec 2021 00:13:47 +0100 Subject: [PATCH 24/62] Extend `-n` option to match both display and unique names --- reframe/frontend/filters.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/reframe/frontend/filters.py b/reframe/frontend/filters.py index 5783f8d94a..d3625f68c5 100644 --- a/reframe/frontend/filters.py +++ b/reframe/frontend/filters.py @@ -19,7 +19,13 @@ def have_name(patt): regex = re_compile(patt) def _fn(case): - return regex.match(case.check.unique_name) + # Match pattern, but remove spaces from the `display_name` + display_name = case.check.display_name.replace(' ', '') + if '@' in patt: + # Do an exact match on the unique name + return patt.replace('@', '_') == case.check.unique_name + else: + return regex.match(display_name) return _fn From 9db0bf6033c3f576af40e8fc313b9a10443fb274 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Thu, 16 Dec 2021 00:21:09 +0100 Subject: [PATCH 25/62] Add unit tests for updated name filtering --- reframe/core/meta.py | 13 ++----- reframe/frontend/executors/__init__.py | 4 ++- reframe/frontend/filters.py | 13 ++++--- unittests/conftest.py | 2 +- unittests/test_filters.py | 47 ++++++++++++++++++++++++++ 5 files changed, 62 insertions(+), 17 deletions(-) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index cf6fdea674..fb34b63d11 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -16,20 +16,11 @@ import reframe.core.variables as variables import reframe.core.fixtures as fixtures import reframe.core.hooks as hooks -import reframe.core.runtime as rt import reframe.utility as utils from reframe.core.exceptions import ReframeSyntaxError from reframe.core.deferrable import deferrable, _DeferredPerformanceExpression - - -def _use_compact_names(): - try: - return getattr(_use_compact_names, '_cached') - except AttributeError: - ret = rt.runtime().get_option('general/0/compact_test_names') - _use_compact_names._cached = ret - return ret +from reframe.core.runtime import runtime class RegressionTestMeta(type): @@ -849,7 +840,7 @@ def variant_name(cls, variant_num=None): if variant_num is None: return name - if _use_compact_names(): + if runtime().get_option('general/0/compact_test_names'): if cls.num_variants > 1: width = utils.count_digits(cls.num_variants) name += f'_{variant_num:0{width}}' diff --git a/reframe/frontend/executors/__init__.py b/reframe/frontend/executors/__init__.py index 838448b47d..788438fbb9 100644 --- a/reframe/frontend/executors/__init__.py +++ b/reframe/frontend/executors/__init__.py @@ -67,7 +67,9 @@ def __eq__(self, other): self.partition.fullname == other.partition.fullname) def __repr__(self): - c, p, e = self.check.unique_name, self.partition.fullname, self.environ.name + c = self.check.unique_name if self.check else None + p = self.partition.fullname if self.partition else None + e = self.environ.name if self.environ else None return f'({c!r}, {p!r}, {e!r})' @property diff --git a/reframe/frontend/filters.py b/reframe/frontend/filters.py index d3625f68c5..8d0de40bc8 100644 --- a/reframe/frontend/filters.py +++ b/reframe/frontend/filters.py @@ -6,6 +6,7 @@ import re from reframe.core.exceptions import ReframeError +from reframe.core.runtime import runtime def re_compile(patt): @@ -21,11 +22,15 @@ def have_name(patt): def _fn(case): # Match pattern, but remove spaces from the `display_name` display_name = case.check.display_name.replace(' ', '') - if '@' in patt: - # Do an exact match on the unique name - return patt.replace('@', '_') == case.check.unique_name + rt = runtime() + if not rt.get_option('general/0/compact_test_names'): + return regex.match(case.check.unique_name) else: - return regex.match(display_name) + if '@' in patt: + # Do an exact match on the unique name + return patt.replace('@', '_') == case.check.unique_name + else: + return regex.match(display_name) return _fn diff --git a/unittests/conftest.py b/unittests/conftest.py index 766543d7b6..0f69acd778 100644 --- a/unittests/conftest.py +++ b/unittests/conftest.py @@ -64,7 +64,7 @@ def _make_exec_ctx(config_file=TEST_CONFIG_FILE, @pytest.fixture def make_exec_ctx_g(make_exec_ctx): - '''Same as ``make_exec_ctx_g`` except that it is a generator. + '''Same as ``make_exec_ctx`` except that it is a generator. You should use this fixture if you want to pass it to ``yield from`` expressions. diff --git a/unittests/test_filters.py b/unittests/test_filters.py index 71701b6eb7..139e21b843 100644 --- a/unittests/test_filters.py +++ b/unittests/test_filters.py @@ -44,6 +44,36 @@ class _X(rfm.RegressionTest): ] +@pytest.fixture +def use_compact_names(make_exec_ctx_g): + yield from make_exec_ctx_g(options={'general/compact_test_names': True}) + + +@pytest.fixture +def sample_param_cases(use_compact_names): + class _X(rfm.RegressionTest): + p = parameter([1, 1, 3]) + valid_systems = ['*'] + valid_prog_environs = ['*'] + + return [executors.TestCase(_X(variant_num=v), None, None) + for v in range(_X.num_variants)] + + +@pytest.fixture +def sample_param_cases_compat(): + # Param cases with the old naming scheme; i.e., with + # `general/compact_test_names=False` + + class _X(rfm.RegressionTest): + p = parameter([1, 1, 3]) + valid_systems = ['*'] + valid_prog_environs = ['*'] + + return [executors.TestCase(_X(variant_num=v), None, None) + for v in range(_X.num_variants)] + + def test_have_name(sample_cases): assert 1 == count_checks(filters.have_name('check1'), sample_cases) assert 3 == count_checks(filters.have_name('check'), sample_cases) @@ -54,6 +84,23 @@ def test_have_name(sample_cases): sample_cases) +def test_have_name_param_test(sample_param_cases): + assert 2 == count_checks(filters.have_name('.*%p=1'), sample_param_cases) + assert 1 == count_checks(filters.have_name('_X%p=3'), sample_param_cases) + assert 1 == count_checks(filters.have_name('_X@2'), sample_param_cases) + + +def test_have_name_param_test_compat(sample_param_cases_compat): + assert 0 == count_checks(filters.have_name('.*%p=1'), + sample_param_cases_compat) + assert 0 == count_checks(filters.have_name('_X%p=3'), + sample_param_cases_compat) + assert 0 == count_checks(filters.have_name('_X@2'), + sample_param_cases_compat) + assert 2 == count_checks(filters.have_name('_X_1'), + sample_param_cases_compat) + + def test_have_not_name(sample_cases): assert 2 == count_checks(filters.have_not_name('check1'), sample_cases) assert 1 == count_checks(filters.have_not_name('check1|check3'), From b04100b4c6339d261441efe35fa8150d5374e4de Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Fri, 17 Dec 2021 23:05:21 +0100 Subject: [PATCH 26/62] Remove unused imports --- reframe/frontend/runreport.py | 2 +- unittests/test_policies.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/reframe/frontend/runreport.py b/reframe/frontend/runreport.py index 034a559dfb..78274ee348 100644 --- a/reframe/frontend/runreport.py +++ b/reframe/frontend/runreport.py @@ -13,7 +13,7 @@ import reframe as rfm import reframe.core.exceptions as errors import reframe.utility.jsonext as jsonext -import reframe.utility.versioning as versioning + # The schema data version # Major version bumps are expected to break the validation of previous schemas diff --git a/unittests/test_policies.py b/unittests/test_policies.py index 9f0348af64..9b2fcc60ee 100644 --- a/unittests/test_policies.py +++ b/unittests/test_policies.py @@ -12,7 +12,6 @@ import sys import time -import reframe as rfm import reframe.core.runtime as rt import reframe.frontend.dependencies as dependencies import reframe.frontend.executors as executors From b820ae248d2773ed865901cfd08bb0c0609e24f5 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Fri, 17 Dec 2021 23:25:19 +0100 Subject: [PATCH 27/62] Fix PEP8 issues --- reframe/core/fixtures.py | 3 ++- reframe/core/pipeline.py | 7 ++++--- reframe/frontend/executors/__init__.py | 3 ++- reframe/frontend/statistics.py | 6 ++++-- unittests/test_policies.py | 5 +++-- 5 files changed, 15 insertions(+), 9 deletions(-) diff --git a/reframe/core/fixtures.py b/reframe/core/fixtures.py index f92dfd8545..99867e801a 100644 --- a/reframe/core/fixtures.py +++ b/reframe/core/fixtures.py @@ -662,7 +662,8 @@ def _expand_partitions_envs(self, obj): prog_envs = tuple(obj.valid_prog_environs) except AttributeError: raise ReframeSyntaxError( - f"'valid_prog_environs' is undefined in test {obj.unique_name!r}" + f"'valid_prog_environs' is undefined " + f"in test {obj.unique_name!r}" ) else: if '*' in prog_envs: diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index e42c4d2c41..8977a6e49d 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -9,8 +9,8 @@ __all__ = [ 'CompileOnlyRegressionTest', 'RegressionTest', 'RunOnlyRegressionTest', - 'DEPEND_BY_ENV', 'DEPEND_EXACT', 'DEPEND_FULLY', 'final', 'RegressionMixin', - 'make_test' + 'DEPEND_BY_ENV', 'DEPEND_EXACT', 'DEPEND_FULLY', 'final', + 'RegressionMixin', 'make_test' ] @@ -1034,7 +1034,8 @@ def __getattr__(self, name): def __setattr__(self, name, value): if name == 'name': user_deprecation_warning( - 'setting the name of the test is deprecated; see for XXX for details', + "setting the name of the test is deprecated; " + "see the documentation of the 'name' attribute for details", from_version='3.10.0' ) self._rfm_unique_name = value diff --git a/reframe/frontend/executors/__init__.py b/reframe/frontend/executors/__init__.py index 788438fbb9..3433368ef6 100644 --- a/reframe/frontend/executors/__init__.py +++ b/reframe/frontend/executors/__init__.py @@ -486,7 +486,8 @@ def print_separator(check, prefix): self._printer.reset_progress(len(testcases)) last_check = None for t in testcases: - if last_check is None or last_check.unique_name != t.check.unique_name: + if (last_check is None or + last_check.unique_name != t.check.unique_name): if last_check is not None: print_separator(last_check, 'finished processing') self._printer.info('') diff --git a/reframe/frontend/statistics.py b/reframe/frontend/statistics.py index cb086e80b2..afa51131cf 100644 --- a/reframe/frontend/statistics.py +++ b/reframe/frontend/statistics.py @@ -70,7 +70,8 @@ def retry_report(self): environ_name = t.check.current_environ.name # Overwrite entry from previous run if available - messages[f"{t.check.unique_name}:{partition_name}:{environ_name}"] = ( + key = f"{t.check.unique_name}:{partition_name}:{environ_name}" + messages[key] = ( f" * Test {t.check.info()} was retried {run} time(s) and " f"{'failed' if t.failed else 'passed'}." ) @@ -96,7 +97,8 @@ def json(self, force=False): 'build_stderr': None, 'build_stdout': None, 'dependencies_actual': [ - (d.check.unique_name, d.partition.fullname, d.environ.name) + (d.check.unique_name, + d.partition.fullname, d.environ.name) for d in t.testcase.deps ], 'dependencies_conceptual': [ diff --git a/unittests/test_policies.py b/unittests/test_policies.py index 9b2fcc60ee..97b5991048 100644 --- a/unittests/test_policies.py +++ b/unittests/test_policies.py @@ -722,8 +722,9 @@ def assert_interrupted_run(runner): assert t.exc_info[0] == AbortTaskError -def test_kbd_interrupt_in_wait_with_concurrency(async_runner, make_cases, - make_sleep_check, make_exec_ctx): +def test_kbd_interrupt_in_wait_with_concurrency( + async_runner, make_cases, make_sleep_check, make_exec_ctx +): make_exec_ctx(options=max_jobs_opts(4)) runner, _ = async_runner with pytest.raises(KeyboardInterrupt): From b3f1511a8e4a97c1e858ecedf7c995b21f6fe045 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 12 Jan 2022 22:57:33 +0100 Subject: [PATCH 28/62] Support a more compact for selecting simple variants --- reframe/core/parameters.py | 10 ++++++++++ unittests/test_meta.py | 12 +++++++++--- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/reframe/core/parameters.py b/reframe/core/parameters.py index 72522362e9..e3ad740b10 100644 --- a/reframe/core/parameters.py +++ b/reframe/core/parameters.py @@ -281,6 +281,10 @@ def get_variant_nums(self, **conditions): The conditions are passed as key-value pairs, where the keys are the parameter names to apply the filtering on and the values are functions that expect the parameter's value as the sole argument. + + :returns: the indices of the matching parameters in the parameter + space. + ''' candidates = range(len(self)) if not conditions: @@ -292,6 +296,12 @@ def get_variant_nums(self, **conditions): raise NameError( f'no such parameter: {param!r}' ) + elif not callable(cond): + # Convert it to the identity function + val = cond + + def cond(x): + return x == val elif not utils.is_trivially_callable(cond, non_def_args=1): raise ValueError( f'condition on {param!r} must be a callable accepting a ' diff --git a/unittests/test_meta.py b/unittests/test_meta.py index 8f753958dd..bfbaedea7a 100644 --- a/unittests/test_meta.py +++ b/unittests/test_meta.py @@ -497,8 +497,14 @@ class Foo(MyMeta): q = parameter(range(10)) variants = Foo.get_variant_nums(p=lambda x: x < 5, q=lambda x: x > 3) - for variant in variants: - assert Foo.get_variant_info(variant)['params']['p'] < 5 - assert Foo.get_variant_info(variant)['params']['q'] > 3 + for v in variants: + assert Foo.get_variant_info(v)['params']['p'] < 5 + assert Foo.get_variant_info(v)['params']['q'] > 3 assert Foo.get_variant_nums() == list(range(Foo.num_variants)) + + # Check condensed syntax + variants = Foo.get_variant_nums(p=5, q=4) + for v in variants: + assert Foo.get_variant_info(v)['params']['p'] == 5 + assert Foo.get_variant_info(v)['params']['q'] == 4 From 3008c2d3c64db6e02b149e1ec305ab5e01a4ff8f Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 12 Jan 2022 23:07:03 +0100 Subject: [PATCH 29/62] Dump more test properties with the `--describe` option --- reframe/frontend/cli.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index 7a1adfc87c..3e6337903d 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -130,6 +130,8 @@ def describe_checks(testcases, printer): rec['display_name'] = tc.check.display_name rec['pipeline_hooks'] = {} rec['perf_variables'] = list(rec['perf_variables'].keys()) + rec['prefix'] = tc.check.prefix + rec['variant_num'] = tc.check.variant_num for stage, hooks in tc.check.pipeline_hooks().items(): for hk in hooks: rec['pipeline_hooks'].setdefault(stage, []) From 68cbd644405ed0ede1e7bbf5ef9bf7c8895f9eef Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 12 Jan 2022 23:29:12 +0100 Subject: [PATCH 30/62] Adapt CSCS tests --- cscs-checks/system/io/ior_check.py | 5 +++-- cscs-checks/tools/profiling_and_debugging/notool.py | 1 - 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cscs-checks/system/io/ior_check.py b/cscs-checks/system/io/ior_check.py index e1f401fa5f..45f2518fd4 100644 --- a/cscs-checks/system/io/ior_check.py +++ b/cscs-checks/system/io/ior_check.py @@ -173,5 +173,6 @@ def set_perf_patterns(self): } @run_after('init') - def set_dependency(self): - self.depends_on(re.sub(r'IorReadCheck', 'IorWriteCheck', self.name)) + def set_deps(self): + variant = IorWriteCheck.get_variant_nums(base_dir=self.base_dir)[0] + self.depends_on(IorWriteCheck.variant_name(variant)) diff --git a/cscs-checks/tools/profiling_and_debugging/notool.py b/cscs-checks/tools/profiling_and_debugging/notool.py index adb47a6076..f0a4260d91 100644 --- a/cscs-checks/tools/profiling_and_debugging/notool.py +++ b/cscs-checks/tools/profiling_and_debugging/notool.py @@ -32,7 +32,6 @@ class JacobiNoToolHybrid(rfm.RegressionTest): @run_after('init') def set_descr_name(self): self.descr = f'Jacobi (without tool) {self.lang} check' - self.name = f'{type(self).__name__}_{self.lang.replace("+", "p")}' @run_after('init') def remove_buggy_prgenv(self): From 75adf268f3a326a3383519c7de92119041b00fa3 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 12 Jan 2022 23:34:28 +0100 Subject: [PATCH 31/62] Remove unused imports --- cscs-checks/system/io/ior_check.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cscs-checks/system/io/ior_check.py b/cscs-checks/system/io/ior_check.py index 45f2518fd4..af46e382c5 100644 --- a/cscs-checks/system/io/ior_check.py +++ b/cscs-checks/system/io/ior_check.py @@ -5,7 +5,6 @@ import getpass import os -import re import reframe as rfm import reframe.utility.sanity as sn From 8cdcd57f18762f8a695a1ecb19832f6d0dccb3e0 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sun, 16 Jan 2022 12:19:51 +0100 Subject: [PATCH 32/62] WIP: Update documentation --- reframe/core/meta.py | 3 --- reframe/core/pipeline.py | 35 +++++++++++++++++++++-------------- unittests/test_pipeline.py | 18 ++++++++++++++++++ 3 files changed, 39 insertions(+), 17 deletions(-) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index b546c65cb8..8c7e7c56a0 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -51,7 +51,6 @@ def __setitem__(self, key, value): # Override the regular class attribute (if present) and return self._namespace.pop(key, None) return - elif isinstance(value, parameters.TestParam): # Insert the attribute in the parameter namespace try: @@ -64,7 +63,6 @@ def __setitem__(self, key, value): # Override the regular class attribute (if present) and return self._namespace.pop(key, None) return - elif isinstance(value, fixtures.TestFixture): # Insert the attribute in the fixture namespace self['_rfm_local_fixture_space'][key] = value @@ -72,7 +70,6 @@ def __setitem__(self, key, value): # Override the regular class attribute (if present) self._namespace.pop(key, None) return - elif key in self['_rfm_local_param_space']: raise ReframeSyntaxError( f'cannot redefine parameter {key!r}' diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 8977a6e49d..26bde574e6 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -215,17 +215,6 @@ def pipeline_hooks(cls): return ret - #: The name of the test. - #: - #: :type: string that can contain any character except ``/`` - #: :default: For non-parameterised tests, the default name is the test - #: class name. For parameterised tests, the default name is constructed - #: by concatenating the test class name and the string representations - #: of every test parameter: ``TestClassName__``. - #: Any non-alphanumeric value in a parameter's representation is - #: converted to ``_``. - # name = variable(typ.Str[r'[^\/]+']) - #: List of programming environments supported by this test. #: #: If ``*`` is in the list then all programming environments are supported @@ -369,7 +358,7 @@ def pipeline_hooks(cls): #: :default: :class:`required` #: #: .. versionchanged:: 3.7.3 - #: Default value changed from ``os.path.join('.', self.name)`` to + #: Default value changed from ``os.path.join('.', self.unique_name)`` to #: :class:`required`. executable = variable(str) @@ -1038,6 +1027,12 @@ def __setattr__(self, name, value): "see the documentation of the 'name' attribute for details", from_version='3.10.0' ) + name_type = typ.Str[r'[^\/]+'] + if not isinstance(value, name_type): + raise TypeError( + f'attribute {name!r} must be of type {name_type.__name__}' + ) + self._rfm_unique_name = value else: super().__setattr__(name, value) @@ -1048,6 +1043,8 @@ def __setattr__(self, name, value): def unique_name(self): '''The unique name of this test. + :type: :class:`str` + .. versionadded:: 3.10.0 ''' return self._rfm_unique_name @@ -1059,11 +1056,13 @@ def display_name(self): This name contains a string representation of the various parameters of this specific test variant. - .. versionadded;: 3.10.0 + :type: :class:`str` .. note:: The display name may not be unique. + .. versionadded:: 3.10.0 + ''' def _format_params(cls, info, prefix=' %'): name = '' @@ -1107,7 +1106,15 @@ def _format_params(cls, info, prefix=' %'): @property def name(self): - # For backward compatibility + '''The name of the test. + + This is an alias of :attr:`unique_name`. + + .. versionchanged:: 3.10.0 + Setting this field is deprecated. + + ''' + return self.unique_name @property diff --git a/unittests/test_pipeline.py b/unittests/test_pipeline.py index 1bff88b938..c2b95b5e2a 100644 --- a/unittests/test_pipeline.py +++ b/unittests/test_pipeline.py @@ -1524,3 +1524,21 @@ def validate(self): hello_cls.setvar('message', 'hello') assert hello_cls.__name__ == 'HelloTest' _run(hello_cls(), *local_exec_ctx) + + +def test_set_name_deprecation(): + from reframe.core.warnings import ReframeDeprecationWarning + + with pytest.warns(ReframeDeprecationWarning): + class _X(rfm.RegressionTest): + @run_after('init') + def set_name(self): + self.name = 'foo' + + _X() + + with pytest.warns(ReframeDeprecationWarning): + class _X(rfm.RegressionTest): + name = 'foo' + + _X() From 4f5ebd607b3ba342afc1b16c9ca3e4601196f58c Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Mon, 17 Jan 2022 01:26:02 +0100 Subject: [PATCH 33/62] Introduce variable deprecation mechanism --- reframe/core/fields.py | 34 +++++++++++++++++++------- reframe/core/meta.py | 2 +- reframe/core/pipeline.py | 11 ++++++--- reframe/core/variables.py | 44 +++++++++++++++++++++++++++++++++- reframe/core/warnings.py | 15 ++++++++++++ unittests/test_dependencies.py | 6 ++--- unittests/test_fields.py | 18 ++++++++++++++ unittests/test_pipeline.py | 10 ++++++-- unittests/test_variables.py | 32 +++++++++++++++++++++++++ unittests/test_warnings.py | 8 +++++++ 10 files changed, 162 insertions(+), 18 deletions(-) diff --git a/reframe/core/fields.py b/reframe/core/fields.py index 35790b91a7..a5d7c1eca4 100644 --- a/reframe/core/fields.py +++ b/reframe/core/fields.py @@ -38,8 +38,13 @@ def remove_convertible(value): class Field: '''Base class for attribute validators.''' + def __init__(self, attr_name=None): + if attr_name is not None: + self._name = attr_name + def __set_name__(self, owner, name): - self._name = name + if not hasattr(self, '_name'): + self._name = name def __get__(self, obj, objtype): if obj is None: @@ -60,7 +65,8 @@ def __set__(self, obj, value): class TypedField(Field): '''Stores a field of predefined type''' - def __init__(self, main_type, *other_types): + def __init__(self, main_type, *other_types, attr_name=None): + super().__init__(attr_name) self._types = (main_type,) + other_types if not all(isinstance(t, type) for t in self._types): raise TypeError('{0} is not a sequence of types'. @@ -133,8 +139,8 @@ def __set__(self, obj, value): class TimerField(TypedField): '''Stores a timer in the form of a :class:`datetime.timedelta` object''' - def __init__(self, *other_types): - super().__init__(str, int, float, *other_types) + def __init__(self, *other_types, attr_name=None): + super().__init__(str, int, float, *other_types, attr_name=attr_name) def __set__(self, obj, value): value = remove_convertible(value) @@ -164,9 +170,9 @@ class ScopedDictField(TypedField): It also handles implicit conversions from ordinary dicts.''' - def __init__(self, valuetype, *other_types): + def __init__(self, valuetype, *other_types, attr_name=None): super().__init__(types.Dict[str, types.Dict[str, valuetype]], - ScopedDict, *other_types) + ScopedDict, *other_types, attr_name=attr_name) def __set__(self, obj, value): value = remove_convertible(value) @@ -180,10 +186,22 @@ def __set__(self, obj, value): class DeprecatedField(Field): '''Field wrapper for deprecating fields.''' - OP_SET = 1 - OP_GET = 2 + OP_GET = 1 + OP_SET = 2 OP_ALL = OP_SET | OP_GET + @property + def message(self): + return self._message + + @property + def op(self): + return self._op + + @property + def from_version(self): + return self._from_version + def __set_name__(self, owner, name): self._target_field.__set_name__(owner, name) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 8c7e7c56a0..2b9b639b9d 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -124,7 +124,6 @@ def __getitem__(self, key): try: # Handle variable access return self['_rfm_local_var_space'][key] - except KeyError: # Handle parameter access if key in self['_rfm_local_param_space']: @@ -229,6 +228,7 @@ def __prepare__(metacls, name, bases, **kwargs): # Directives to add/modify a regression test variable namespace['variable'] = variables.TestVar namespace['required'] = variables.Undefined + namespace['deprecate'] = variables.TestVar.create_deprecated # Regression test fixture space namespace['_rfm_local_fixture_space'] = namespaces.LocalNamespace() diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 26bde574e6..0791884c5e 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -44,6 +44,7 @@ ReframeSyntaxError) from reframe.core.meta import RegressionTestMeta from reframe.core.schedulers import Job +from reframe.core.variables import DEPRECATE_WR from reframe.core.warnings import user_deprecation_warning @@ -215,6 +216,10 @@ def pipeline_hooks(cls): return ret + name = deprecate(variable(typ.Str[r'[^\/]+'], attr_name='_rfm_unique_name'), + "setting the 'name' attribute is deprecated and " + "will be disabled in the future", DEPRECATE_WR) + #: List of programming environments supported by this test. #: #: If ``*`` is in the list then all programming environments are supported @@ -896,7 +901,7 @@ def __init_subclass__(cls, *, special=False, pin_prefix=False, @deferrable def __rfm_init__(self, *args, prefix=None, **kwargs): - if not self.is_fixture(): + if not self.is_fixture() and not hasattr(self, '_rfm_unique_name'): self._rfm_unique_name = type(self).variant_name(self.variant_num) # Add the parameters from the parameterized_test decorator. @@ -1020,7 +1025,7 @@ def __getattr__(self, name): f'{type(self).__qualname__!r} object has no attribute {name!r}' ) - def __setattr__(self, name, value): + def r__setattr__(self, name, value): if name == 'name': user_deprecation_warning( "setting the name of the test is deprecated; " @@ -1105,7 +1110,7 @@ def _format_params(cls, info, prefix=' %'): return self._rfm_display_name @property - def name(self): + def r_name(self): '''The name of the test. This is an alias of :attr:`unique_name`. diff --git a/reframe/core/variables.py b/reframe/core/variables.py index e87dd42d22..7e3af617c9 100644 --- a/reframe/core/variables.py +++ b/reframe/core/variables.py @@ -13,6 +13,8 @@ import reframe.core.fields as fields import reframe.core.namespaces as namespaces from reframe.core.exceptions import ReframeSyntaxError +from reframe.core.warnings import (user_deprecation_warning, + suppress_deprecations) class _UndefinedType: @@ -25,6 +27,10 @@ def __deepcopy__(self, memo): Undefined = _UndefinedType() +DEPRECATE_RD = 1 +DEPRECATE_WR = 2 +DEPRECATE_RDWR = DEPRECATE_RD | DEPRECATE_WR + class TestVar: '''Regression test variable class. @@ -53,6 +59,20 @@ def __init__(self, *args, **kwargs): self._field = field_type(*args, **kwargs) + @classmethod + def create_deprecated(cls, var, message, + kind=DEPRECATE_RDWR, from_version='0.0.0'): + ret = TestVar.__new__(TestVar) + ret._field = fields.DeprecatedField(var.field, message, + kind, from_version) + ret._default_value = var._default_value + return ret + + def _check_deprecation(self, kind): + if isinstance(self.field, fields.DeprecatedField): + if self.field.op & kind: + user_deprecation_warning(self.field.message) + def is_defined(self): return self._default_value is not Undefined @@ -60,6 +80,19 @@ def undefine(self): self._default_value = Undefined def define(self, value): + if value != self._default_value: + # We only issue a deprecation warning if the write attempt changes + # the value. This is a workaround to the fact that if a variable + # defined in parent classes is accessed by the current class, then + # the definition of the variable is "copied" in the class body as + # an assignment (see `MetaNamespace.__getitem__()`). The + # `VarSpace.extend()` method then checks all local class body + # assignments and if they refer to a variable (inherited or not), + # they call `define()` on it. So, practically, in this case, the + # `_default_value` is set redundantly once per class in the + # hierarchy. + self._check_deprecation(DEPRECATE_WR) + self._default_value = value @property @@ -67,6 +100,7 @@ def default_value(self): # Variables must be returned by-value to prevent an instance from # modifying the class variable space. self._check_is_defined() + self._check_deprecation(DEPRECATE_RD) return copy.deepcopy(self._default_value) @property @@ -81,7 +115,7 @@ def __set_name__(self, owner, name): self._name = name def __setattr__(self, name, value): - '''Set any additional variable attribute into __attrs__.''' + '''Set any additional variable attribute into the default value.''' if name in self.__slots__: super().__setattr__(name, value) else: @@ -528,6 +562,14 @@ def inject(self, obj, cls): :param cls: The test class. ''' + # Attribute injection is a special operation; the actual attribute + # descriptor fields will be created and they will be assigned their + # value; deprecations have been checked already during the class + # construction, so we don't want to trigger them also here. + with suppress_deprecations(): + self._inject(obj, cls) + + def _inject(self, obj, cls): for name, var in self.items(): setattr(cls, name, var.field) getattr(cls, name).__set_name__(obj, name) diff --git a/reframe/core/warnings.py b/reframe/core/warnings.py index 5e13aa16ed..6fd537d237 100644 --- a/reframe/core/warnings.py +++ b/reframe/core/warnings.py @@ -84,3 +84,18 @@ def user_deprecation_warning(message, from_version='0.0.0'): if _RAISE_DEPRECATION_ALWAYS or version >= min_version: warnings.warn(message, ReframeDeprecationWarning, stacklevel=stack_level) + + +class suppress_deprecations: + '''Temporarily suprress ReFrame deprecation warnings.''' + + def __init__(self, *args, **kwargs): + self._ctxmgr = warnings.catch_warnings(*args, **kwargs) + + def __enter__(self): + ret = self._ctxmgr.__enter__() + warnings.simplefilter('ignore', ReframeDeprecationWarning) + return ret + + def __exit__(self, exc_type, exc_val, exc_tb): + return self._ctxmgr.__exit__(exc_type, exc_val, exc_tb) diff --git a/unittests/test_dependencies.py b/unittests/test_dependencies.py index 3f938a42e6..1e14c75a75 100644 --- a/unittests/test_dependencies.py +++ b/unittests/test_dependencies.py @@ -517,14 +517,14 @@ def test_build_deps_empty(default_exec_ctx): @pytest.fixture def make_test(): - def _make_test(name): + def _make_test(test_name): class _Test(rfm.RegressionTest): valid_systems = ['*'] valid_prog_environs = ['*'] executable = 'echo' - executable_opts = [name] + executable_opts = [test_name] - return rfm.make_test(name, (_Test,), {})() + return rfm.make_test(test_name, (_Test,), {})() return _make_test diff --git a/unittests/test_fields.py b/unittests/test_fields.py index 9e6843b8ad..d9509a6a25 100644 --- a/unittests/test_fields.py +++ b/unittests/test_fields.py @@ -26,6 +26,24 @@ class FieldTester: getattr(c, 'var') +def test_alt_attr_name(): + class FieldTester: + var = fields.Field(attr_name='foo') + + c = FieldTester() + c.var = 5 + + assert c.var == 5 + assert c.foo == 5 + + c.foo = 6 + assert c.var == 6 + assert c.foo == 6 + + assert 'var' not in c.__dict__ + assert 'foo' in c.__dict__ + + def test_constant_field(): class FieldTester: ro = fields.ConstantField('foo') diff --git a/unittests/test_pipeline.py b/unittests/test_pipeline.py index c2b95b5e2a..53b18b1138 100644 --- a/unittests/test_pipeline.py +++ b/unittests/test_pipeline.py @@ -1535,10 +1535,16 @@ class _X(rfm.RegressionTest): def set_name(self): self.name = 'foo' - _X() + x = _X() + + assert x.name == 'foo' + assert x.unique_name == 'foo' with pytest.warns(ReframeDeprecationWarning): class _X(rfm.RegressionTest): name = 'foo' - _X() + x = _X() + + assert x.name == 'foo' + assert x.unique_name == 'foo' diff --git a/unittests/test_variables.py b/unittests/test_variables.py index 7efe1af9af..c7409b78e9 100644 --- a/unittests/test_variables.py +++ b/unittests/test_variables.py @@ -454,3 +454,35 @@ class A(rfm.RegressionTest): assert math.trunc(npi) == -3 assert math.floor(npi) == -4 assert math.ceil(npi) == -3 + + +def test_var_deprecation(): + from reframe.core.variables import DEPRECATE_RD, DEPRECATE_WR + from reframe.core.warnings import ReframeDeprecationWarning + + # Check read deprecation + class A(rfm.RegressionMixin): + x = deprecate(variable(int, value=3), + 'accessing x is deprecated', DEPRECATE_RD) + y = deprecate(variable(int, value=5), + 'setting y is deprecated', DEPRECATE_WR) + + class B(A): + z = variable(int, value=y) + + with pytest.warns(ReframeDeprecationWarning): + class C(A): + w = variable(int, value=x) + + with pytest.warns(ReframeDeprecationWarning): + class D(A): + y = 3 + + # Check that deprecation warnings are raised properly after instantiation + a = A() + with pytest.warns(ReframeDeprecationWarning): + c = a.x + + c = a.y + with pytest.warns(ReframeDeprecationWarning): + a.y = 10 diff --git a/unittests/test_warnings.py b/unittests/test_warnings.py index f571345a98..25a2412d15 100644 --- a/unittests/test_warnings.py +++ b/unittests/test_warnings.py @@ -63,3 +63,11 @@ def test_random_warning_formatting(): 'deprecated', UserWarning, 'file', 10, 'a = 1' ) assert message == 'file:10: UserWarning: deprecated\n a = 1\n' + + +def test_suppress_deprecations(): + with warn.suppress_deprecations(): + warn.user_deprecation_warning('warning 1') + + with pytest.warns(warn.ReframeDeprecationWarning): + warn.user_deprecation_warning('warning 2') From 58c14da34a42ed0b1390ab02348e0ce35202b7db Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Mon, 17 Jan 2022 22:48:20 +0100 Subject: [PATCH 34/62] WIP: Update documentation --- docs/tutorial_deps.rst | 11 ++++++++ reframe/core/pipeline.py | 45 +++++++++++---------------------- tutorials/deps/parameterized.py | 39 ++++++++++++++++++++++++++++ 3 files changed, 65 insertions(+), 30 deletions(-) create mode 100644 tutorials/deps/parameterized.py diff --git a/docs/tutorial_deps.rst b/docs/tutorial_deps.rst index fea0450671..fe0024b00a 100644 --- a/docs/tutorial_deps.rst +++ b/docs/tutorial_deps.rst @@ -426,4 +426,15 @@ The following listing shows how the actual test cases dependencies are formed wh < ... omitted ... > + For more information on test dependencies, you can have a look at :doc:`dependencies`. + + +Depending on Parameterized Tests +-------------------------------- + +As shown earlier in this section, tests define their dependencies by referencing the target tests by their unique name. +This is straightforward when referring to regular tests, where their name matches the class name, but it becomes cumbersome trying to refer to a parameterized tests, since no safe assumption should be as of the variant number of the test. +In order to safely and reliably refer to parameterized test, you should use the :func:`get_variant_nums` and :func:`variant_name` class methods as shown in the following example: + +.. literalinclude:: ../tutorials/deps/parameterized.py diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 0791884c5e..948d03928b 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -216,6 +216,21 @@ def pipeline_hooks(cls): return ret + #: The name of the test. + #: + #: This is an alias of :attr:`unique_name`. + #: + #: .. warning:: + #: + #: Setting the name of a test is deprecated and will be disabled in the + #: future. If you were setting the name of a test to circumvent the old + #: long parameterized test names in order to reference them in + #: dependency chains, please refer to :doc:`xxx` for more details on how + #: to achieve this. + #: + #: .. versionchanged:: 3.10.0 + #: Setting the :attr:`name` attribute is deprecated. + #: name = deprecate(variable(typ.Str[r'[^\/]+'], attr_name='_rfm_unique_name'), "setting the 'name' attribute is deprecated and " "will be disabled in the future", DEPRECATE_WR) @@ -1025,23 +1040,6 @@ def __getattr__(self, name): f'{type(self).__qualname__!r} object has no attribute {name!r}' ) - def r__setattr__(self, name, value): - if name == 'name': - user_deprecation_warning( - "setting the name of the test is deprecated; " - "see the documentation of the 'name' attribute for details", - from_version='3.10.0' - ) - name_type = typ.Str[r'[^\/]+'] - if not isinstance(value, name_type): - raise TypeError( - f'attribute {name!r} must be of type {name_type.__name__}' - ) - - self._rfm_unique_name = value - else: - super().__setattr__(name, value) - # Export read-only views to interesting fields @property @@ -1109,19 +1107,6 @@ def _format_params(cls, info, prefix=' %'): return self._rfm_display_name - @property - def r_name(self): - '''The name of the test. - - This is an alias of :attr:`unique_name`. - - .. versionchanged:: 3.10.0 - Setting this field is deprecated. - - ''' - - return self.unique_name - @property def current_environ(self): '''The programming environment that the regression test is currently diff --git a/tutorials/deps/parameterized.py b/tutorials/deps/parameterized.py new file mode 100644 index 0000000000..2632d15139 --- /dev/null +++ b/tutorials/deps/parameterized.py @@ -0,0 +1,39 @@ +# Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich) +# ReFrame Project Developers. See the top-level LICENSE file for details. +# +# SPDX-License-Identifier: BSD-3-Clause + +import reframe as rfm +import reframe.utility.sanity as sn + + +@rfm.simple_test +class TestA(rfm.RunOnlyRegressionTest): + z = parameter(range(10)) + executable = 'echo' + valid_systems = ['*'] + valid_prog_environs = ['*'] + + @run_after('init') + def set_exec_opts(self): + self.executable_opts = [str(self.z)] + + @sanity_function + def validate(self): + return sn.assert_eq( + sn.extractsingle(r'\d+', self.stdout, 0, int), self.z + ) + + +@rfm.simple_test +class TestB(rfm.RunOnlyRegressionTest): + executable = 'echo' + valid_systems = ['*'] + valid_prog_environs = ['*'] + sanity_patterns = sn.assert_true(1) + + @run_after('init') + def setdeps(self): + variants = TestA.get_variant_nums(z=lambda x: x > 5) + for v in variants: + self.depends_on(TestA.variant_name(v)) From 072543f3e3381ff4358b1c4bf334a202c51450f7 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Tue, 18 Jan 2022 16:58:28 +0100 Subject: [PATCH 35/62] Fix code formatting --- reframe/core/pipeline.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 948d03928b..d4e534f88c 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -222,11 +222,11 @@ def pipeline_hooks(cls): #: #: .. warning:: #: - #: Setting the name of a test is deprecated and will be disabled in the - #: future. If you were setting the name of a test to circumvent the old - #: long parameterized test names in order to reference them in - #: dependency chains, please refer to :doc:`xxx` for more details on how - #: to achieve this. + #: Setting the name of a test is deprecated and will be disabled in the + #: future. If you were setting the name of a test to circumvent the old + #: long parameterized test names in order to reference them in + #: dependency chains, please refer to :doc:`xxx` for more details on how + #: to achieve this. #: #: .. versionchanged:: 3.10.0 #: Setting the :attr:`name` attribute is deprecated. From bf404ae14e5310504a06b15043b61576d0ba895f Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 19 Jan 2022 00:00:39 +0100 Subject: [PATCH 36/62] WIP: Adapt tutorial --- docs/listings/hello1.txt | 25 ++++++++++ docs/listings/run-report.json | 65 ++++++++++++++++++++++++++ docs/tutorial_basics.rst | 87 ++--------------------------------- docs/tutorial_deps.rst | 31 ++++++++++++- 4 files changed, 123 insertions(+), 85 deletions(-) create mode 100644 docs/listings/hello1.txt create mode 100644 docs/listings/run-report.json diff --git a/docs/listings/hello1.txt b/docs/listings/hello1.txt new file mode 100644 index 0000000000..d4bee37063 --- /dev/null +++ b/docs/listings/hello1.txt @@ -0,0 +1,25 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+072543f3 + command: './bin/reframe -c tutorials/basics/hello/hello1.py -r' + launched by: user@tresa.local + working directory: '/Users/user/Repositories/reframe' + settings file: '' + check search path: '/Users/user/Repositories/reframe/tutorials/basics/hello/hello1.py' + stage directory: '/Users/user/Repositories/reframe/stage' + output directory: '/Users/user/Repositories/reframe/output' + +[==========] Running 1 check(s) +[==========] Started on Tue Jan 18 23:54:45 2022 + +[----------] started processing HelloTest (HelloTest) +[ RUN ] HelloTest on generic:default using builtin +[----------] finished processing HelloTest (HelloTest) + +[----------] waiting for spawned checks to finish +[ OK ] (1/1) HelloTest @generic:default+builtin [compile: 0.149s run: 0.163s total: 0.338s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 1/1 test case(s) from 1 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Tue Jan 18 23:54:45 2022 +Run report saved in '/Users/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-nv8jqh00.log' diff --git a/docs/listings/run-report.json b/docs/listings/run-report.json new file mode 100644 index 0000000000..1c5c720aed --- /dev/null +++ b/docs/listings/run-report.json @@ -0,0 +1,65 @@ +{ + "session_info": { + "cmdline": "./bin/reframe -c tutorials/basics/hello/hello1.py -r", + "config_file": "", + "data_version": "2.0", + "hostname": "tresa.local", + "prefix_output": "/Users/user/Repositories/reframe/output", + "prefix_stage": "/Users/user/Repositories/reframe/stage", + "user": "user", + "version": "3.10.0-dev.2+072543f3", + "workdir": "/Users/user/Repositories/reframe", + "time_start": "2022-01-18T23:57:17+0100", + "time_end": "2022-01-18T23:57:17+0100", + "time_elapsed": 0.5832219123840332, + "num_cases": 1, + "num_failures": 0 + }, + "runs": [ + { + "num_cases": 1, + "num_failures": 0, + "num_aborted": 0, + "num_skipped": 0, + "runid": 0, + "testcases": [ + { + "build_stderr": "rfm_HelloTest_build.err", + "build_stdout": "rfm_HelloTest_build.out", + "dependencies_actual": [], + "dependencies_conceptual": [], + "description": "HelloTest", + "display_name": "HelloTest", + "filename": "/Users/user/Repositories/reframe/tutorials/basics/hello/hello1.py", + "environment": "builtin", + "fail_phase": null, + "fail_reason": null, + "jobid": "27101", + "job_stderr": "rfm_HelloTest_job.err", + "job_stdout": "rfm_HelloTest_job.out", + "maintainers": [], + "name": "HelloTest", + "nodelist": [ + "tresa.local" + ], + "outputdir": "/Users/user/Repositories/reframe/output/generic/default/builtin/HelloTest", + "perfvars": null, + "prefix": "/Users/user/Repositories/reframe/tutorials/basics/hello", + "result": "success", + "stagedir": "/Users/user/Repositories/reframe/stage/generic/default/builtin/HelloTest", + "scheduler": "local", + "system": "generic:default", + "tags": [], + "time_compile": 0.367156982421875, + "time_performance": 8.7738037109375e-05, + "time_run": 0.1748819351196289, + "time_sanity": 0.0006799697875976562, + "time_setup": 0.009120941162109375, + "time_total": 0.5680220127105713, + "unique_name": "HelloTest" + } + ] + } + ], + "restored_cases": [] +} diff --git a/docs/tutorial_basics.rst b/docs/tutorial_basics.rst index b77b5e4200..d6e9e731d0 100644 --- a/docs/tutorial_basics.rst +++ b/docs/tutorial_basics.rst @@ -98,32 +98,8 @@ Now it's time to run our first test: ./bin/reframe -c tutorials/basics/hello/hello1.py -r -.. code-block:: none - - [ReFrame Setup] - version: 3.3-dev0 (rev: 5d246bff) - command: './bin/reframe -c tutorials/basics/hello/hello1.py -r' - launched by: user@tresa.local - working directory: '/Users/user/Repositories/reframe' - settings file: '' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/hello/hello1.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - [==========] Running 1 check(s) - [==========] Started on Mon Oct 12 18:23:30 2020 - - [----------] started processing HelloTest (HelloTest) - [ RUN ] HelloTest on generic:default using builtin - [----------] finished processing HelloTest (HelloTest) - - [----------] waiting for spawned checks to finish - [ OK ] (1/1) HelloTest on generic:default using builtin [compile: 0.389s run: 0.406s total: 0.811s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 1 test case(s) from 1 check(s) (0 failure(s)) - [==========] Finished on Mon Oct 12 18:23:31 2020 - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-00lf_tbi.log' +.. literalinclude:: listings/hello1.txt + :language: console Perfect! We have verified that we have a functioning C compiler in our system. @@ -155,64 +131,7 @@ Here are the contents of the report file for our first ReFrame run: cat ~/.reframe/reports/run-report.json -.. code-block:: javascript - - { - "session_info": { - "cmdline": "./bin/reframe -c tutorials/basics/hello/hello1.py -r", - "config_file": "", - "data_version": "1.0", - "hostname": "dhcp-133-44.cscs.ch", - "prefix_output": "/Users/user/Repositories/reframe/output", - "prefix_stage": "/Users/user/Repositories/reframe/stage", - "user": "user", - "version": "3.1-dev2 (rev: 272e1aae)", - "workdir": "/Users/user/Repositories/reframe", - "time_start": "2020-07-24T11:05:46+0200", - "time_end": "2020-07-24T11:05:47+0200", - "time_elapsed": 0.7293069362640381, - "num_cases": 1, - "num_failures": 0 - }, - "runs": [ - { - "num_cases": 1, - "num_failures": 0, - "runid": 0, - "testcases": [ - { - "build_stderr": "rfm_HelloTest_build.err", - "build_stdout": "rfm_HelloTest_build.out", - "description": "HelloTest", - "environment": "builtin", - "fail_reason": null, - "fail_phase": null, - "jobid": 85063, - "job_stderr": "rfm_HelloTest_job.err", - "job_stdout": "rfm_HelloTest_job.out", - "name": "HelloTest", - "maintainers": [], - "nodelist": [ - "dhcp-133-44.cscs.ch" - ], - "outputdir": "/Users/user/Repositories/reframe/output/generic/default/builtin/HelloTest", - "perfvars": null, - "result": "success", - "stagedir": null, - "scheduler": "local", - "system": "generic:default", - "tags": [], - "time_compile": 0.3776402473449707, - "time_performance": 4.506111145019531e-05, - "time_run": 0.2992382049560547, - "time_sanity": 0.0005609989166259766, - "time_setup": 0.0031709671020507812, - "time_total": 0.7213571071624756 - } - ] - } - ] - } +.. literalinclude:: listings/run-report.json More of "Hello, World!" diff --git a/docs/tutorial_deps.rst b/docs/tutorial_deps.rst index fe0024b00a..8d1e527f03 100644 --- a/docs/tutorial_deps.rst +++ b/docs/tutorial_deps.rst @@ -434,7 +434,36 @@ Depending on Parameterized Tests -------------------------------- As shown earlier in this section, tests define their dependencies by referencing the target tests by their unique name. -This is straightforward when referring to regular tests, where their name matches the class name, but it becomes cumbersome trying to refer to a parameterized tests, since no safe assumption should be as of the variant number of the test. +This is straightforward when referring to regular tests, where their name matches the class name, but it becomes cumbersome trying to refer to a parameterized tests, since no safe assumption should be as of the variant number of the test or how the parameters are encoded in the name. In order to safely and reliably refer to parameterized test, you should use the :func:`get_variant_nums` and :func:`variant_name` class methods as shown in the following example: .. literalinclude:: ../tutorials/deps/parameterized.py + :emphasize-lines: 37- + +In this example, :class:`TestB` depends only on selected variants of :class:`TestA`. +The :func:`get_variant_nums` method accepts a set of key-value pairs representing the target test parameters and selector functions and returns the list of the variant numbers that correspond to these variants. +Using the :func:`variant_name` subsequently, we can get the actual name of the variant. +Listing the tests using the `new naming scheme `__ we can easily see the dependency graph: + + +.. code-block:: console + + export RFM_COMPACT_TEST_NAMES=y + ./bin/reframe -c tutorials/deps/parameterized.py -l + + +.. code-block:: console + + [List of matched checks] + - TestB + ^TestA %z=9 + ^TestA %z=8 + ^TestA %z=7 + ^TestA %z=6 + - TestA %z=5 + - TestA %z=4 + - TestA %z=3 + - TestA %z=2 + - TestA %z=1 + - TestA %z=0 + Found 11 check(s) From b7a6e14a162873c3f071e2dc0fab7ac9bb6fe0cd Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 19 Jan 2022 18:02:26 +0100 Subject: [PATCH 37/62] WIP: Adapt tutorial --- docs/listings/hello1.txt | 2 +- docs/listings/hello2.txt | 50 +++++ docs/listings/hello2_catalina.txt | 34 ++++ docs/listings/hellomp1.txt | 27 +++ docs/listings/hellomp2.txt | 63 +++++++ docs/listings/perflogs.txt | 8 + docs/listings/run-report.json | 2 +- docs/listings/stream1.txt | 37 ++++ docs/listings/stream3_failure_only.txt | 14 ++ docs/tutorial_basics.rst | 248 ++----------------------- tutorials/basics/stream/stream3.py | 2 +- 11 files changed, 250 insertions(+), 237 deletions(-) create mode 100644 docs/listings/hello2.txt create mode 100644 docs/listings/hello2_catalina.txt create mode 100644 docs/listings/hellomp1.txt create mode 100644 docs/listings/hellomp2.txt create mode 100644 docs/listings/perflogs.txt create mode 100644 docs/listings/stream1.txt create mode 100644 docs/listings/stream3_failure_only.txt diff --git a/docs/listings/hello1.txt b/docs/listings/hello1.txt index d4bee37063..e302ed7ee3 100644 --- a/docs/listings/hello1.txt +++ b/docs/listings/hello1.txt @@ -1,7 +1,7 @@ [ReFrame Setup] version: 3.10.0-dev.2+072543f3 command: './bin/reframe -c tutorials/basics/hello/hello1.py -r' - launched by: user@tresa.local + launched by: user@host working directory: '/Users/user/Repositories/reframe' settings file: '' check search path: '/Users/user/Repositories/reframe/tutorials/basics/hello/hello1.py' diff --git a/docs/listings/hello2.txt b/docs/listings/hello2.txt new file mode 100644 index 0000000000..beba15a8e9 --- /dev/null +++ b/docs/listings/hello2.txt @@ -0,0 +1,50 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+bf404ae1 + command: './bin/reframe -c tutorials/basics/hello/hello2.py -r' + launched by: karakasv@vpn-39.cscs.ch + working directory: '/Users/karakasv/Repositories/reframe' + settings file: '' + check search path: '/Users/karakasv/Repositories/reframe/tutorials/basics/hello/hello2.py' + stage directory: '/Users/karakasv/Repositories/reframe/stage' + output directory: '/Users/karakasv/Repositories/reframe/output' + +[==========] Running 2 check(s) +[==========] Started on Wed Jan 19 14:50:11 2022 + +[----------] started processing HelloMultiLangTest_cpp (HelloMultiLangTest %lang=cpp) +[ RUN ] HelloMultiLangTest_cpp on generic:default using builtin +[ FAIL ] (1/2) HelloMultiLangTest %lang=cpp @generic:default+builtin [compile: 0.011s run: n/a total: 0.028s] +==> test failed during 'compile': test staged in '/Users/karakasv/Repositories/reframe/stage/generic/default/builtin/HelloMultiLangTest_cpp' +[----------] finished processing HelloMultiLangTest_cpp (HelloMultiLangTest %lang=cpp) + +[----------] started processing HelloMultiLangTest_c (HelloMultiLangTest %lang=c) +[ RUN ] HelloMultiLangTest_c on generic:default using builtin +[----------] finished processing HelloMultiLangTest_c (HelloMultiLangTest %lang=c) + +[----------] waiting for spawned checks to finish +[ OK ] (2/2) HelloMultiLangTest %lang=c @generic:default+builtin [compile: 0.437s run: 0.434s total: 0.896s] +[----------] all spawned checks have finished + +[ FAILED ] Ran 2/2 test case(s) from 2 check(s) (1 failure(s), 0 skipped) +[==========] Finished on Wed Jan 19 14:50:12 2022 + +============================================================================== +SUMMARY OF FAILURES +------------------------------------------------------------------------------ +FAILURE INFO for HelloMultiLangTest_cpp + * Expanded name: HelloMultiLangTest %lang=cpp + * Description: HelloMultiLangTest %lang=cpp + * System partition: generic:default + * Environment: builtin + * Stage directory: /Users/karakasv/Repositories/reframe/stage/generic/default/builtin/HelloMultiLangTest_cpp + * Node list: + * Job type: local (id=None) + * Dependencies (conceptual): [] + * Dependencies (actual): [] + * Maintainers: [] + * Failing phase: compile + * Rerun with '-n HelloMultiLangTest_cpp -p builtin --system generic:default -r' + * Reason: build system error: I do not know how to compile a C++ program +------------------------------------------------------------------------------ +Run report saved in '/Users/karakasv/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-82sxn1an.log' diff --git a/docs/listings/hello2_catalina.txt b/docs/listings/hello2_catalina.txt new file mode 100644 index 0000000000..3fee5c7766 --- /dev/null +++ b/docs/listings/hello2_catalina.txt @@ -0,0 +1,34 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+bf404ae1 + command: './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -r' + launched by: user@host + working directory: '/Users/user/Repositories/reframe' + settings file: 'tutorials/config/settings.py' + check search path: '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py' + stage directory: '/Users/user/Repositories/reframe/stage' + output directory: '/Users/user/Repositories/reframe/output' + +[==========] Running 2 check(s) +[==========] Started on Wed Jan 19 14:56:30 2022 + +[----------] started processing HelloMultiLangTest_cpp (HelloMultiLangTest %lang=cpp) +[ RUN ] HelloMultiLangTest_cpp on catalina:default using gnu +[ RUN ] HelloMultiLangTest_cpp on catalina:default using clang +[----------] finished processing HelloMultiLangTest_cpp (HelloMultiLangTest %lang=cpp) + +[----------] started processing HelloMultiLangTest_c (HelloMultiLangTest %lang=c) +[ RUN ] HelloMultiLangTest_c on catalina:default using gnu +[ RUN ] HelloMultiLangTest_c on catalina:default using clang +[----------] finished processing HelloMultiLangTest_c (HelloMultiLangTest %lang=c) + +[----------] waiting for spawned checks to finish +[ OK ] (1/4) HelloMultiLangTest %lang=c @catalina:default+gnu [compile: 0.249s run: 0.348s total: 0.623s] +[ OK ] (2/4) HelloMultiLangTest %lang=cpp @catalina:default+gnu [compile: 0.777s run: 1.270s total: 2.076s] +[ OK ] (3/4) HelloMultiLangTest %lang=cpp @catalina:default+clang [compile: 0.563s run: 0.841s total: 1.431s] +[ OK ] (4/4) HelloMultiLangTest %lang=c @catalina:default+clang [compile: 0.236s run: 0.430s total: 0.692s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 4/4 test case(s) from 2 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Wed Jan 19 14:56:33 2022 +Run report saved in '/Users/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-ia9qxjzo.log' diff --git a/docs/listings/hellomp1.txt b/docs/listings/hellomp1.txt new file mode 100644 index 0000000000..4b3388eee7 --- /dev/null +++ b/docs/listings/hellomp1.txt @@ -0,0 +1,27 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+bf404ae1 + command: './bin/reframe -c tutorials/basics/hellomp/hellomp1.py -r' + launched by: user@host + working directory: '/Users/user/Repositories/reframe' + settings file: 'tutorials/config/settings.py' + check search path: '/Users/user/Repositories/reframe/tutorials/basics/hellomp/hellomp1.py' + stage directory: '/Users/user/Repositories/reframe/stage' + output directory: '/Users/user/Repositories/reframe/output' + +[==========] Running 1 check(s) +[==========] Started on Wed Jan 19 17:04:06 2022 + +[----------] started processing HelloThreadedTest (HelloThreadedTest) +[ RUN ] HelloThreadedTest on catalina:default using gnu +[ RUN ] HelloThreadedTest on catalina:default using clang +[----------] finished processing HelloThreadedTest (HelloThreadedTest) + +[----------] waiting for spawned checks to finish +[ OK ] (1/2) HelloThreadedTest @catalina:default+gnu [compile: 1.764s run: 1.566s total: 3.355s] +[ OK ] (2/2) HelloThreadedTest @catalina:default+clang [compile: 1.481s run: 0.469s total: 1.975s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 2/2 test case(s) from 1 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Wed Jan 19 17:04:09 2022 +Run report saved in '/Users/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-r26o0go9.log' diff --git a/docs/listings/hellomp2.txt b/docs/listings/hellomp2.txt new file mode 100644 index 0000000000..613a753e7e --- /dev/null +++ b/docs/listings/hellomp2.txt @@ -0,0 +1,63 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+bf404ae1 + command: './bin/reframe -c tutorials/basics/hellomp/hellomp2.py -r' + launched by: user@host + working directory: '/Users/user/Repositories/reframe' + settings file: 'tutorials/config/settings.py' + check search path: '/Users/user/Repositories/reframe/tutorials/basics/hellomp/hellomp2.py' + stage directory: '/Users/user/Repositories/reframe/stage' + output directory: '/Users/user/Repositories/reframe/output' + +[==========] Running 1 check(s) +[==========] Started on Wed Jan 19 17:06:19 2022 + +[----------] started processing HelloThreadedExtendedTest (HelloThreadedExtendedTest) +[ RUN ] HelloThreadedExtendedTest on catalina:default using gnu +[ RUN ] HelloThreadedExtendedTest on catalina:default using clang +[----------] finished processing HelloThreadedExtendedTest (HelloThreadedExtendedTest) + +[----------] waiting for spawned checks to finish +[ FAIL ] (1/2) HelloThreadedExtendedTest @catalina:default+gnu [compile: 1.259s run: 0.904s total: 2.190s] +==> test failed during 'sanity': test staged in '/Users/user/Repositories/reframe/stage/catalina/default/gnu/HelloThreadedExtendedTest' +[ FAIL ] (2/2) HelloThreadedExtendedTest @catalina:default+clang [compile: 0.827s run: 0.296s total: 1.148s] +==> test failed during 'sanity': test staged in '/Users/user/Repositories/reframe/stage/catalina/default/clang/HelloThreadedExtendedTest' +[----------] all spawned checks have finished + +[ FAILED ] Ran 2/2 test case(s) from 1 check(s) (2 failure(s), 0 skipped) +[==========] Finished on Wed Jan 19 17:06:21 2022 + +============================================================================== +SUMMARY OF FAILURES +------------------------------------------------------------------------------ +FAILURE INFO for HelloThreadedExtendedTest + * Expanded name: HelloThreadedExtendedTest + * Description: HelloThreadedExtendedTest + * System partition: catalina:default + * Environment: gnu + * Stage directory: /Users/user/Repositories/reframe/stage/catalina/default/gnu/HelloThreadedExtendedTest + * Node list: vpn-39 + * Job type: local (id=34268) + * Dependencies (conceptual): [] + * Dependencies (actual): [] + * Maintainers: [] + * Failing phase: sanity + * Rerun with '-n HelloThreadedExtendedTest -p gnu --system catalina:default -r' + * Reason: sanity error: 10 != 16 +------------------------------------------------------------------------------ +FAILURE INFO for HelloThreadedExtendedTest + * Expanded name: HelloThreadedExtendedTest + * Description: HelloThreadedExtendedTest + * System partition: catalina:default + * Environment: clang + * Stage directory: /Users/user/Repositories/reframe/stage/catalina/default/clang/HelloThreadedExtendedTest + * Node list: vpn-39 + * Job type: local (id=34279) + * Dependencies (conceptual): [] + * Dependencies (actual): [] + * Maintainers: [] + * Failing phase: sanity + * Rerun with '-n HelloThreadedExtendedTest -p clang --system catalina:default -r' + * Reason: sanity error: 13 != 16 +------------------------------------------------------------------------------ +Run report saved in '/Users/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-uoew5foo.log' diff --git a/docs/listings/perflogs.txt b/docs/listings/perflogs.txt new file mode 100644 index 0000000000..6906425949 --- /dev/null +++ b/docs/listings/perflogs.txt @@ -0,0 +1,8 @@ +2022-01-19T17:17:15|reframe 3.10.0-dev.2+bf404ae1|StreamWithRefTest @catalina:default+gnu|jobid=34545|Copy=24672.4|ref=25200 (l=-0.05, u=0.05)|MB/s +2022-01-19T17:17:15|reframe 3.10.0-dev.2+bf404ae1|StreamWithRefTest @catalina:default+gnu|jobid=34545|Scale=16834.0|ref=16800 (l=-0.05, u=0.05)|MB/s +2022-01-19T17:17:15|reframe 3.10.0-dev.2+bf404ae1|StreamWithRefTest @catalina:default+gnu|jobid=34545|Add=18376.3|ref=18500 (l=-0.05, u=0.05)|MB/s +2022-01-19T17:17:15|reframe 3.10.0-dev.2+bf404ae1|StreamWithRefTest @catalina:default+gnu|jobid=34545|Triad=19071.7|ref=18800 (l=-0.05, u=0.05)|MB/s +2022-01-19T17:18:52|reframe 3.10.0-dev.2+bf404ae1|StreamWithRefTest @catalina:default+gnu|jobid=34622|Copy=24584.3|ref=55200 (l=-0.05, u=0.05)|MB/s +2022-01-19T17:18:52|reframe 3.10.0-dev.2+bf404ae1|StreamWithRefTest @catalina:default+gnu|jobid=34622|Scale=16767.3|ref=16800 (l=-0.05, u=0.05)|MB/s +2022-01-19T17:18:52|reframe 3.10.0-dev.2+bf404ae1|StreamWithRefTest @catalina:default+gnu|jobid=34622|Add=18409.5|ref=18500 (l=-0.05, u=0.05)|MB/s +2022-01-19T17:18:52|reframe 3.10.0-dev.2+bf404ae1|StreamWithRefTest @catalina:default+gnu|jobid=34622|Triad=18959.5|ref=18800 (l=-0.05, u=0.05)|MB/s diff --git a/docs/listings/run-report.json b/docs/listings/run-report.json index 1c5c720aed..7d2f8be19b 100644 --- a/docs/listings/run-report.json +++ b/docs/listings/run-report.json @@ -3,7 +3,7 @@ "cmdline": "./bin/reframe -c tutorials/basics/hello/hello1.py -r", "config_file": "", "data_version": "2.0", - "hostname": "tresa.local", + "hostname": "host", "prefix_output": "/Users/user/Repositories/reframe/output", "prefix_stage": "/Users/user/Repositories/reframe/stage", "user": "user", diff --git a/docs/listings/stream1.txt b/docs/listings/stream1.txt new file mode 100644 index 0000000000..140395d155 --- /dev/null +++ b/docs/listings/stream1.txt @@ -0,0 +1,37 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+bf404ae1 + command: './bin/reframe -c tutorials/basics/stream/stream1.py -r --performance-report' + launched by: user@host + working directory: '/Users/user/Repositories/reframe' + settings file: 'tutorials/config/settings.py' + check search path: '/Users/user/Repositories/reframe/tutorials/basics/stream/stream1.py' + stage directory: '/Users/user/Repositories/reframe/stage' + output directory: '/Users/user/Repositories/reframe/output' + +[==========] Running 1 check(s) +[==========] Started on Wed Jan 19 17:13:35 2022 + +[----------] started processing StreamTest (StreamTest) +[ RUN ] StreamTest on catalina:default using gnu +[----------] finished processing StreamTest (StreamTest) + +[----------] waiting for spawned checks to finish +[ OK ] (1/1) StreamTest @catalina:default+gnu [compile: 1.260s run: 2.844s total: 4.136s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 1/1 test case(s) from 1 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Wed Jan 19 17:13:39 2022 +============================================================================== +PERFORMANCE REPORT +------------------------------------------------------------------------------ +StreamTest +- catalina:default + - gnu + * num_tasks: 1 + * Copy: 23864.2 MB/s + * Scale: 16472.6 MB/s + * Add: 18265.5 MB/s + * Triad: 18632.3 MB/s +------------------------------------------------------------------------------ +Run report saved in '/Users/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-o1wls55_.log' diff --git a/docs/listings/stream3_failure_only.txt b/docs/listings/stream3_failure_only.txt new file mode 100644 index 0000000000..2611791793 --- /dev/null +++ b/docs/listings/stream3_failure_only.txt @@ -0,0 +1,14 @@ +FAILURE INFO for StreamWithRefTest + * Expanded name: StreamWithRefTest + * Description: StreamWithRefTest + * System partition: catalina:default + * Environment: gnu + * Stage directory: /Users/user/Repositories/reframe/stage/catalina/default/gnu/StreamWithRefTest + * Node list: vpn-39 + * Job type: local (id=34622) + * Dependencies (conceptual): [] + * Dependencies (actual): [] + * Maintainers: [] + * Failing phase: performance + * Rerun with '-n StreamWithRefTest -p gnu --system catalina:default -r' + * Reason: performance error: failed to meet reference: Copy=24584.3, expected 55200 (l=52440.0, u=57960.0) diff --git a/docs/tutorial_basics.rst b/docs/tutorial_basics.rst index d6e9e731d0..acc9c06fd7 100644 --- a/docs/tutorial_basics.rst +++ b/docs/tutorial_basics.rst @@ -187,56 +187,8 @@ Let's run the test now: ./bin/reframe -c tutorials/basics/hello/hello2.py -r -.. code-block:: none - - [ReFrame Setup] - version: 3.6.0-dev.0+a3d0b0cd - command: './bin/reframe -c tutorials/basics/hello/hello2.py -r' - launched by: user@tresa.local - working directory: '/Users/user/Repositories/reframe' - settings file: '' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - [==========] Running 2 check(s) - [==========] Started on Tue Mar 9 23:25:22 2021 - - [----------] started processing HelloMultiLangTest_c (HelloMultiLangTest_c) - [ RUN ] HelloMultiLangTest_c on generic:default using builtin - [----------] finished processing HelloMultiLangTest_c (HelloMultiLangTest_c) - - [----------] started processing HelloMultiLangTest_cpp (HelloMultiLangTest_cpp) - [ RUN ] HelloMultiLangTest_cpp on generic:default using builtin - [ FAIL ] (1/2) HelloMultiLangTest_cpp on generic:default using builtin [compile: 0.006s run: n/a total: 0.023s] - ==> test failed during 'compile': test staged in '/Users/user/Repositories/reframe/stage/generic/default/builtin/HelloMultiLangTest_cpp' - [----------] finished processing HelloMultiLangTest_cpp (HelloMultiLangTest_cpp) - - [----------] waiting for spawned checks to finish - [ OK ] (2/2) HelloMultiLangTest_c on generic:default using builtin [compile: 0.981s run: 0.468s total: 1.475s] - [----------] all spawned checks have finished - - [ FAILED ] Ran 2/2 test case(s) from 2 check(s) (1 failure(s)) - [==========] Finished on Tue Mar 9 23:25:23 2021 - - ============================================================================== - SUMMARY OF FAILURES - ------------------------------------------------------------------------------ - FAILURE INFO for HelloMultiLangTest_cpp - * Test Description: HelloMultiLangTest_cpp - * System partition: generic:default - * Environment: builtin - * Stage directory: /Users/user/Repositories/reframe/stage/generic/default/builtin/HelloMultiLangTest_cpp - * Node list: None - * Job type: local (id=None) - * Dependencies (conceptual): [] - * Dependencies (actual): [] - * Maintainers: [] - * Failing phase: compile - * Rerun with '-n HelloMultiLangTest_cpp -p builtin --system generic:default -r' - * Reason: build system error: I do not know how to compile a C++ program - ------------------------------------------------------------------------------ - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-wemvsvs2.log' +.. literalinclude:: listings/hello2.txt + :language: console Oops! The C++ test has failed. @@ -301,42 +253,8 @@ Let's now rerun our "Hello, World!" tests: ./bin/reframe -C tutorials/config/mysettings.py -c tutorials/basics/hello/hello2.py -r -.. code-block:: none - - [ReFrame Setup] - version: 3.6.0-dev.0+a3d0b0cd - command: './bin/reframe -C tutorials/config/mysettings.py -c tutorials/basics/hello/hello2.py -r' - launched by: user@tresa.local - working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - [==========] Running 2 check(s) - [==========] Started on Tue Mar 9 23:28:00 2021 - - [----------] started processing HelloMultiLangTest_c (HelloMultiLangTest_c) - [ RUN ] HelloMultiLangTest_c on catalina:default using gnu - [ RUN ] HelloMultiLangTest_c on catalina:default using clang - [----------] finished processing HelloMultiLangTest_c (HelloMultiLangTest_c) - - [----------] started processing HelloMultiLangTest_cpp (HelloMultiLangTest_cpp) - [ RUN ] HelloMultiLangTest_cpp on catalina:default using gnu - [ RUN ] HelloMultiLangTest_cpp on catalina:default using clang - [----------] finished processing HelloMultiLangTest_cpp (HelloMultiLangTest_cpp) - - [----------] waiting for spawned checks to finish - [ OK ] (1/4) HelloMultiLangTest_cpp on catalina:default using gnu [compile: 0.768s run: 1.115s total: 1.909s] - [ OK ] (2/4) HelloMultiLangTest_c on catalina:default using gnu [compile: 0.600s run: 2.230s total: 2.857s] - [ OK ] (3/4) HelloMultiLangTest_c on catalina:default using clang [compile: 0.238s run: 2.129s total: 2.393s] - [ OK ] (4/4) HelloMultiLangTest_cpp on catalina:default using clang [compile: 1.006s run: 0.427s total: 1.456s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 4/4 test case(s) from 2 check(s) (0 failure(s)) - [==========] Finished on Tue Mar 9 23:28:03 2021 - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-dnubkvfi.log' - +.. literalinclude:: listings/hello2_catalina.txt + :language: console Notice how the same tests are now tried with both the ``gnu`` and ``clang`` programming environments, without having to touch them at all! That's one of the powerful features of ReFrame and we shall see later on, how easily we can port our tests to an HPC cluster with minimal changes. @@ -403,34 +321,8 @@ Let's run the test now: ./bin/reframe -c tutorials/basics/hellomp/hellomp1.py -r -.. code-block:: none - - [ReFrame Setup] - version: 3.3-dev0 (rev: 5d246bff) - command: './bin/reframe -c tutorials/basics/hellomp/hellomp1.py -r' - launched by: user@tresa.local - working directory: '/Users/user/Repositories/reframe' - settings file: '/Users/user/Repositories/reframe/tutorials/config/settings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/hellomp/hellomp1.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - [==========] Running 1 check(s) - [==========] Started on Mon Oct 12 20:02:37 2020 - - [----------] started processing HelloThreadedTest (HelloThreadedTest) - [ RUN ] HelloThreadedTest on catalina:default using gnu - [ RUN ] HelloThreadedTest on catalina:default using clang - [----------] finished processing HelloThreadedTest (HelloThreadedTest) - - [----------] waiting for spawned checks to finish - [ OK ] (1/2) HelloThreadedTest on catalina:default using gnu [compile: 1.591s run: 1.205s total: 2.816s] - [ OK ] (2/2) HelloThreadedTest on catalina:default using clang [compile: 1.141s run: 0.309s total: 1.465s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 2 test case(s) from 1 check(s) (0 failure(s)) - [==========] Finished on Mon Oct 12 20:02:40 2020 - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-h_itoc1k.log' +.. literalinclude:: listings/hellomp1.txt + :language: console Everything looks fine, but let's inspect the actual output of one of the tests: @@ -496,63 +388,8 @@ Let's run this version of the test now and see if it fails: ./bin/reframe -c tutorials/basics/hellomp/hellomp2.py -r -.. code-block:: none - - [ReFrame Setup] - version: 3.3-dev0 (rev: 5d246bff) - command: './bin/reframe -c tutorials/basics/hellomp/hellomp2.py -r' - launched by: user@tresa.local - working directory: '/Users/user/Repositories/reframe' - settings file: '/Users/user/Repositories/reframe/tutorials/config/settings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/hellomp/hellomp2.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - [==========] Running 1 check(s) - [==========] Started on Mon Oct 12 20:04:59 2020 - - [----------] started processing HelloThreadedExtendedTest (HelloThreadedExtendedTest) - [ RUN ] HelloThreadedExtendedTest on catalina:default using gnu - [ RUN ] HelloThreadedExtendedTest on catalina:default using clang - [----------] finished processing HelloThreadedExtendedTest (HelloThreadedExtendedTest) - - [----------] waiting for spawned checks to finish - [ FAIL ] (1/2) HelloThreadedExtendedTest on catalina:default using gnu [compile: 1.222s run: 0.891s total: 2.130s] - [ FAIL ] (2/2) HelloThreadedExtendedTest on catalina:default using clang [compile: 0.835s run: 0.167s total: 1.018s] - [----------] all spawned checks have finished - - [ FAILED ] Ran 2 test case(s) from 1 check(s) (2 failure(s)) - [==========] Finished on Mon Oct 12 20:05:02 2020 - - ============================================================================== - SUMMARY OF FAILURES - ------------------------------------------------------------------------------ - FAILURE INFO for HelloThreadedExtendedTest - * Test Description: HelloThreadedExtendedTest - * System partition: catalina:default - * Environment: gnu - * Stage directory: /Users/user/Repositories/reframe/stage/catalina/default/gnu/HelloThreadedExtendedTest - * Node list: tresa.local - * Job type: local (id=60355) - * Maintainers: [] - * Failing phase: sanity - * Rerun with '-n HelloThreadedExtendedTest -p gnu --system catalina:default' - * Reason: sanity error: 12 != 16 - ------------------------------------------------------------------------------ - FAILURE INFO for HelloThreadedExtendedTest - * Test Description: HelloThreadedExtendedTest - * System partition: catalina:default - * Environment: clang - * Stage directory: /Users/user/Repositories/reframe/stage/catalina/default/clang/HelloThreadedExtendedTest - * Node list: tresa.local - * Job type: local (id=60366) - * Maintainers: [] - * Failing phase: sanity - * Rerun with '-n HelloThreadedExtendedTest -p clang --system catalina:default' - * Reason: sanity error: 6 != 16 - ------------------------------------------------------------------------------ - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-zz7x_5c8.log' - +.. literalinclude:: listings/hellomp2.txt + :language: console As expected, only some of lines are printed correctly which makes the test fail. To fix this test, we need to compile with ``-DSYNC_MESSAGES``, which will synchronize the printing of messages. @@ -622,44 +459,8 @@ Let's run the test now: The :option:`--performance-report` will generate a short report at the end for each performance test that has run. -.. code-block:: none - - [ReFrame Setup] - version: 3.3-dev0 (rev: 5d246bff) - command: './bin/reframe -c tutorials/basics/stream/stream1.py -r --performance-report' - launched by: user@tresa.local - working directory: '/Users/user/Repositories/reframe' - settings file: '/Users/user/Repositories/reframe/tutorials/config/settings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/stream/stream1.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - [==========] Running 1 check(s) - [==========] Started on Mon Oct 12 20:06:09 2020 - - [----------] started processing StreamTest (StreamTest) - [ RUN ] StreamTest on catalina:default using gnu - [----------] finished processing StreamTest (StreamTest) - - [----------] waiting for spawned checks to finish - [ OK ] (1/1) StreamTest on catalina:default using gnu [compile: 1.386s run: 2.377s total: 3.780s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 1 test case(s) from 1 check(s) (0 failure(s)) - [==========] Finished on Mon Oct 12 20:06:13 2020 - ============================================================================== - PERFORMANCE REPORT - ------------------------------------------------------------------------------ - StreamTest - - catalina:default - - gnu - * num_tasks: 1 - * Copy: 24326.7 MB/s - * Scale: 16664.2 MB/s - * Add: 18398.7 MB/s - * Triad: 18930.6 MB/s - ------------------------------------------------------------------------------ - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-gczplnic.log' +.. literalinclude:: listings/stream1.txt + :language: console --------------------------------------------------- @@ -737,20 +538,8 @@ If any obtained performance value is beyond its respective thresholds, the test ./bin/reframe -c tutorials/basics/stream/stream3.py -r --performance-report - -.. code-block:: none - - FAILURE INFO for StreamWithRefTest - * Test Description: StreamWithRefTest - * System partition: catalina:default - * Environment: gnu - * Stage directory: /Users/user/Repositories/reframe/stage/catalina/default/gnu/StreamWithRefTest - * Node list: tresa.local - * Job type: local (id=62114) - * Maintainers: [] - * Failing phase: performance - * Rerun with '-n StreamWithRefTest -p gnu --system catalina:default' - * Reason: performance error: failed to meet reference: Copy=24586.5, expected 55200 (l=52440.0, u=57960.0) +.. literalinclude:: listings/stream3_failure_only.txt + :language: console ------------------------------ Examining the performance logs @@ -776,17 +565,8 @@ Let's inspect the log file from our last test: tail perflogs/catalina/default/StreamWithRefTest.log - -.. code-block:: none - - 2020-06-24T00:27:06|reframe 3.1-dev0 (rev: 9d92d0ec)|StreamWithRefTest on catalina:default using gnu|jobid=58384|Copy=24762.2|ref=25200 (l=-0.05, u=0.05)|MB/s - 2020-06-24T00:27:06|reframe 3.1-dev0 (rev: 9d92d0ec)|StreamWithRefTest on catalina:default using gnu|jobid=58384|Scale=16784.6|ref=16800 (l=-0.05, u=0.05)|MB/s - 2020-06-24T00:27:06|reframe 3.1-dev0 (rev: 9d92d0ec)|StreamWithRefTest on catalina:default using gnu|jobid=58384|Add=18553.8|ref=18500 (l=-0.05, u=0.05)|MB/s - 2020-06-24T00:27:06|reframe 3.1-dev0 (rev: 9d92d0ec)|StreamWithRefTest on catalina:default using gnu|jobid=58384|Triad=18679.0|ref=18800 (l=-0.05, u=0.05)|MB/s - 2020-06-24T12:42:07|reframe 3.1-dev0 (rev: 138cbd68)|StreamWithRefTest on catalina:default using gnu|jobid=62114|Copy=24586.5|ref=55200 (l=-0.05, u=0.05)|MB/s - 2020-06-24T12:42:07|reframe 3.1-dev0 (rev: 138cbd68)|StreamWithRefTest on catalina:default using gnu|jobid=62114|Scale=16880.6|ref=16800 (l=-0.05, u=0.05)|MB/s - 2020-06-24T12:42:07|reframe 3.1-dev0 (rev: 138cbd68)|StreamWithRefTest on catalina:default using gnu|jobid=62114|Add=18570.4|ref=18500 (l=-0.05, u=0.05)|MB/s - 2020-06-24T12:42:07|reframe 3.1-dev0 (rev: 138cbd68)|StreamWithRefTest on catalina:default using gnu|jobid=62114|Triad=19048.3|ref=18800 (l=-0.05, u=0.05)|MB/s +.. literalinclude:: listings/perflogs.txt + :language: console Several information are printed for each run, such as the performance variables, their value, their references and thresholds etc. The default format is in a form suitable for easy parsing, but you may fully control not only the format, but also what is being logged from the configuration file. diff --git a/tutorials/basics/stream/stream3.py b/tutorials/basics/stream/stream3.py index de37d07485..3a0b2d4a25 100644 --- a/tutorials/basics/stream/stream3.py +++ b/tutorials/basics/stream/stream3.py @@ -23,7 +23,7 @@ class StreamWithRefTest(rfm.RegressionTest): } reference = { 'catalina': { - 'Copy': (25200, -0.05, 0.05, 'MB/s'), + 'Copy': (55200, -0.05, 0.05, 'MB/s'), 'Scale': (16800, -0.05, 0.05, 'MB/s'), 'Add': (18500, -0.05, 0.05, 'MB/s'), 'Triad': (18800, -0.05, 0.05, 'MB/s') From 27d56c73697b8cf37fb103a5c8d583393a0156bc Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 19 Jan 2022 18:37:37 +0100 Subject: [PATCH 38/62] WIP: Adapt tutorial --- docs/listings/alltests_daint.txt | 142 ++++++++++++++++++++++++++++++ docs/listings/hello2_catalina.txt | 4 +- docs/listings/hellomp1.txt | 2 +- docs/listings/hellomp2.txt | 2 +- docs/listings/stream1.txt | 2 +- docs/listings/stream4_daint.txt | 127 ++++++++++++++++++++++++++ 6 files changed, 274 insertions(+), 5 deletions(-) create mode 100644 docs/listings/alltests_daint.txt create mode 100644 docs/listings/stream4_daint.txt diff --git a/docs/listings/alltests_daint.txt b/docs/listings/alltests_daint.txt new file mode 100644 index 0000000000..f38cc18bcf --- /dev/null +++ b/docs/listings/alltests_daint.txt @@ -0,0 +1,142 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+b7a6e14a + command: './bin/reframe -c tutorials/basics/ -R -n HelloMultiLangTest|HelloThreadedExtended2Test|StreamWithRefTest --performance-report -r' + launched by: user@host + working directory: '/users/user/Devel/reframe' + settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' + check search path: (R) '/users/user/Devel/reframe/tutorials/basics' + stage directory: '/users/user/Devel/reframe/stage' + output directory: '/users/user/Devel/reframe/output' + +[==========] Running 4 check(s) +[==========] Started on Wed Jan 19 18:20:56 2022 + +[----------] started processing HelloMultiLangTest_cpp (HelloMultiLangTest %lang=cpp) +[ RUN ] HelloMultiLangTest_cpp on daint:login using builtin +[ RUN ] HelloMultiLangTest_cpp on daint:login using gnu +[ RUN ] HelloMultiLangTest_cpp on daint:login using intel +[ RUN ] HelloMultiLangTest_cpp on daint:login using pgi +[ RUN ] HelloMultiLangTest_cpp on daint:login using cray +[ RUN ] HelloMultiLangTest_cpp on daint:gpu using gnu +[ RUN ] HelloMultiLangTest_cpp on daint:gpu using intel +[ RUN ] HelloMultiLangTest_cpp on daint:gpu using pgi +[ RUN ] HelloMultiLangTest_cpp on daint:gpu using cray +[ RUN ] HelloMultiLangTest_cpp on daint:mc using gnu +[ RUN ] HelloMultiLangTest_cpp on daint:mc using intel +[ RUN ] HelloMultiLangTest_cpp on daint:mc using pgi +[ RUN ] HelloMultiLangTest_cpp on daint:mc using cray +[----------] finished processing HelloMultiLangTest_cpp (HelloMultiLangTest %lang=cpp) + +[----------] started processing HelloMultiLangTest_c (HelloMultiLangTest %lang=c) +[ RUN ] HelloMultiLangTest_c on daint:login using builtin +[ RUN ] HelloMultiLangTest_c on daint:login using gnu +[ RUN ] HelloMultiLangTest_c on daint:login using intel +[ RUN ] HelloMultiLangTest_c on daint:login using pgi +[ RUN ] HelloMultiLangTest_c on daint:login using cray +[ RUN ] HelloMultiLangTest_c on daint:gpu using gnu +[ RUN ] HelloMultiLangTest_c on daint:gpu using intel +[ RUN ] HelloMultiLangTest_c on daint:gpu using pgi +[ RUN ] HelloMultiLangTest_c on daint:gpu using cray +[ RUN ] HelloMultiLangTest_c on daint:mc using gnu +[ RUN ] HelloMultiLangTest_c on daint:mc using intel +[ RUN ] HelloMultiLangTest_c on daint:mc using pgi +[ RUN ] HelloMultiLangTest_c on daint:mc using cray +[----------] finished processing HelloMultiLangTest_c (HelloMultiLangTest %lang=c) + +[----------] started processing HelloThreadedExtended2Test (HelloThreadedExtended2Test) +[ RUN ] HelloThreadedExtended2Test on daint:login using builtin +[ RUN ] HelloThreadedExtended2Test on daint:login using gnu +[ RUN ] HelloThreadedExtended2Test on daint:login using intel +[ RUN ] HelloThreadedExtended2Test on daint:login using pgi +[ RUN ] HelloThreadedExtended2Test on daint:login using cray +[ RUN ] HelloThreadedExtended2Test on daint:gpu using gnu +[ RUN ] HelloThreadedExtended2Test on daint:gpu using intel +[ RUN ] HelloThreadedExtended2Test on daint:gpu using pgi +[ RUN ] HelloThreadedExtended2Test on daint:gpu using cray +[ RUN ] HelloThreadedExtended2Test on daint:mc using gnu +[ RUN ] HelloThreadedExtended2Test on daint:mc using intel +[ RUN ] HelloThreadedExtended2Test on daint:mc using pgi +[ RUN ] HelloThreadedExtended2Test on daint:mc using cray +[----------] finished processing HelloThreadedExtended2Test (HelloThreadedExtended2Test) + +[----------] started processing StreamWithRefTest (StreamWithRefTest) +[ RUN ] StreamWithRefTest on daint:login using gnu +[ RUN ] StreamWithRefTest on daint:gpu using gnu +[ RUN ] StreamWithRefTest on daint:mc using gnu +[----------] finished processing StreamWithRefTest (StreamWithRefTest) + +[----------] waiting for spawned checks to finish +[ OK ] ( 1/42) HelloThreadedExtended2Test @daint:login+cray [compile: 0.869s run: 105.884s total: 106.789s] +[ OK ] ( 2/42) HelloThreadedExtended2Test @daint:login+intel [compile: 2.418s run: 111.171s total: 121.396s] +[ OK ] ( 3/42) HelloMultiLangTest %lang=c @daint:login+cray [compile: 0.194s run: 176.455s total: 176.687s] +[ OK ] ( 4/42) HelloMultiLangTest %lang=cpp @daint:login+pgi [compile: 2.373s run: 238.350s total: 240.758s] +[ OK ] ( 5/42) HelloThreadedExtended2Test @daint:gpu+pgi [compile: 3.084s run: 90.799s total: 93.923s] +[ OK ] ( 6/42) HelloThreadedExtended2Test @daint:gpu+gnu [compile: 2.380s run: 103.562s total: 105.977s] +[ OK ] ( 7/42) HelloMultiLangTest %lang=c @daint:gpu+pgi [compile: 1.614s run: 163.384s total: 165.042s] +[ OK ] ( 8/42) HelloMultiLangTest %lang=c @daint:gpu+gnu [compile: 1.571s run: 174.946s total: 176.552s] +[ OK ] ( 9/42) HelloMultiLangTest %lang=cpp @daint:gpu+intel [compile: 2.124s run: 229.839s total: 231.999s] +[ OK ] (10/42) HelloMultiLangTest %lang=c @daint:mc+pgi [compile: 1.626s run: 136.895s total: 138.567s] +[ OK ] (11/42) HelloMultiLangTest %lang=c @daint:mc+gnu [compile: 1.540s run: 147.386s total: 148.961s] +[ OK ] (12/42) HelloMultiLangTest %lang=cpp @daint:mc+intel [compile: 2.158s run: 209.736s total: 211.936s] +[ OK ] (13/42) HelloThreadedExtended2Test @daint:login+builtin [compile: 0.825s run: 124.587s total: 125.455s] +[ OK ] (14/42) HelloMultiLangTest %lang=c @daint:login+pgi [compile: 1.599s run: 177.615s total: 179.539s] +[ OK ] (15/42) HelloMultiLangTest %lang=c @daint:login+builtin [compile: 0.163s run: 184.760s total: 184.963s] +[ OK ] (16/42) HelloMultiLangTest %lang=cpp @daint:login+gnu [compile: 1.874s run: 244.668s total: 246.575s] +[ OK ] (17/42) HelloMultiLangTest %lang=cpp @daint:gpu+pgi [compile: 2.239s run: 224.340s total: 226.619s] +[ OK ] (18/42) HelloMultiLangTest %lang=cpp @daint:gpu+gnu [compile: 1.884s run: 235.554s total: 237.470s] +[ OK ] (19/42) HelloMultiLangTest %lang=cpp @daint:mc+pgi [compile: 2.121s run: 191.871s total: 194.032s] +[ OK ] (20/42) HelloMultiLangTest %lang=cpp @daint:mc+gnu [compile: 1.788s run: 215.189s total: 217.017s] +[ OK ] (21/42) HelloMultiLangTest %lang=c @daint:login+gnu [compile: 1.571s run: 183.268s total: 184.878s] +[ OK ] (22/42) HelloMultiLangTest %lang=cpp @daint:login+cray [compile: 0.516s run: 237.593s total: 238.140s] +[ OK ] (23/42) HelloMultiLangTest %lang=cpp @daint:login+intel [compile: 2.124s run: 242.000s total: 244.157s] +[ OK ] (24/42) HelloMultiLangTest %lang=cpp @daint:login+builtin [compile: 0.517s run: 246.791s total: 247.343s] +[ OK ] (25/42) HelloThreadedExtended2Test @daint:login+pgi [compile: 3.005s run: 108.904s total: 111.944s] +[ OK ] (26/42) HelloMultiLangTest %lang=c @daint:login+intel [compile: 2.135s run: 181.436s total: 183.606s] +[ OK ] (27/42) HelloThreadedExtended2Test @daint:gpu+cray [compile: 0.886s run: 87.642s total: 88.565s] +[ OK ] (28/42) HelloMultiLangTest %lang=c @daint:gpu+cray [compile: 0.170s run: 153.574s total: 153.780s] +[ OK ] (29/42) HelloMultiLangTest %lang=cpp @daint:gpu+cray [compile: 0.519s run: 220.703s total: 221.262s] +[ OK ] (30/42) HelloMultiLangTest %lang=c @daint:mc+cray [compile: 0.271s run: 133.432s total: 133.746s] +[ OK ] (31/42) HelloMultiLangTest %lang=cpp @daint:mc+cray [compile: 0.504s run: 188.941s total: 189.483s] +[ OK ] (32/42) HelloThreadedExtended2Test @daint:login+gnu [compile: 2.138s run: 124.264s total: 126.440s] +[ OK ] (33/42) HelloMultiLangTest %lang=c @daint:gpu+intel [compile: 2.186s run: 170.732s total: 172.956s] +[ OK ] (34/42) HelloMultiLangTest %lang=c @daint:mc+intel [compile: 2.097s run: 143.858s total: 145.996s] +[ OK ] (35/42) StreamWithRefTest @daint:login+gnu [compile: 1.923s run: 16.452s total: 18.410s] +[ OK ] (36/42) HelloThreadedExtended2Test @daint:gpu+intel [compile: 2.447s run: 99.826s total: 102.307s] +[ OK ] (37/42) HelloThreadedExtended2Test @daint:mc+pgi [compile: 2.723s run: 172.072s total: 174.835s] +[ OK ] (38/42) HelloThreadedExtended2Test @daint:mc+gnu [compile: 2.187s run: 216.338s total: 218.569s] +[ OK ] (39/42) HelloThreadedExtended2Test @daint:mc+intel [compile: 2.351s run: 210.326s total: 212.711s] +[ OK ] (40/42) HelloThreadedExtended2Test @daint:mc+cray [compile: 0.827s run: 243.227s total: 244.100s] +[ OK ] (41/42) StreamWithRefTest @daint:mc+gnu [compile: 1.797s run: 228.275s total: 230.113s] +[ OK ] (42/42) StreamWithRefTest @daint:gpu+gnu [compile: 1.887s run: 400.080s total: 402.012s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 42/42 test case(s) from 4 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Wed Jan 19 18:31:31 2022 +============================================================================== +PERFORMANCE REPORT +------------------------------------------------------------------------------ +StreamWithRefTest +- daint:login + - gnu + * num_tasks: 1 + * Copy: 65583.2 MB/s + * Scale: 36908.7 MB/s + * Add: 38514.7 MB/s + * Triad: 38648.2 MB/s +- daint:gpu + - gnu + * num_tasks: 1 + * Copy: 50946.8 MB/s + * Scale: 35096.1 MB/s + * Add: 38841.5 MB/s + * Triad: 38729.8 MB/s +- daint:mc + - gnu + * num_tasks: 1 + * Copy: 48686.5 MB/s + * Scale: 31394.5 MB/s + * Add: 33423.7 MB/s + * Triad: 33520.9 MB/s +------------------------------------------------------------------------------ +Run report saved in '/users/user/.reframe/reports/run-report.json' +Log file(s) saved in '/tmp/rfm-63nptdxz.log' diff --git a/docs/listings/hello2_catalina.txt b/docs/listings/hello2_catalina.txt index 3fee5c7766..079ed51d61 100644 --- a/docs/listings/hello2_catalina.txt +++ b/docs/listings/hello2_catalina.txt @@ -1,9 +1,9 @@ [ReFrame Setup] version: 3.10.0-dev.2+bf404ae1 - command: './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -r' + command: './bin/reframe -C tutorials/config/mysettings.py -c tutorials/basics/hello/hello2.py -r' launched by: user@host working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/settings.py' + settings file: 'tutorials/config/mysettings.py' check search path: '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py' stage directory: '/Users/user/Repositories/reframe/stage' output directory: '/Users/user/Repositories/reframe/output' diff --git a/docs/listings/hellomp1.txt b/docs/listings/hellomp1.txt index 4b3388eee7..71e238a538 100644 --- a/docs/listings/hellomp1.txt +++ b/docs/listings/hellomp1.txt @@ -3,7 +3,7 @@ command: './bin/reframe -c tutorials/basics/hellomp/hellomp1.py -r' launched by: user@host working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/settings.py' + settings file: 'tutorials/config/mysettings.py' check search path: '/Users/user/Repositories/reframe/tutorials/basics/hellomp/hellomp1.py' stage directory: '/Users/user/Repositories/reframe/stage' output directory: '/Users/user/Repositories/reframe/output' diff --git a/docs/listings/hellomp2.txt b/docs/listings/hellomp2.txt index 613a753e7e..303f52e331 100644 --- a/docs/listings/hellomp2.txt +++ b/docs/listings/hellomp2.txt @@ -3,7 +3,7 @@ command: './bin/reframe -c tutorials/basics/hellomp/hellomp2.py -r' launched by: user@host working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/settings.py' + settings file: 'tutorials/config/mysettings.py' check search path: '/Users/user/Repositories/reframe/tutorials/basics/hellomp/hellomp2.py' stage directory: '/Users/user/Repositories/reframe/stage' output directory: '/Users/user/Repositories/reframe/output' diff --git a/docs/listings/stream1.txt b/docs/listings/stream1.txt index 140395d155..85b7c47808 100644 --- a/docs/listings/stream1.txt +++ b/docs/listings/stream1.txt @@ -3,7 +3,7 @@ command: './bin/reframe -c tutorials/basics/stream/stream1.py -r --performance-report' launched by: user@host working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/settings.py' + settings file: 'tutorials/config/mysettings.py' check search path: '/Users/user/Repositories/reframe/tutorials/basics/stream/stream1.py' stage directory: '/Users/user/Repositories/reframe/stage' output directory: '/Users/user/Repositories/reframe/output' diff --git a/docs/listings/stream4_daint.txt b/docs/listings/stream4_daint.txt new file mode 100644 index 0000000000..fa5517d485 --- /dev/null +++ b/docs/listings/stream4_daint.txt @@ -0,0 +1,127 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+2a281443 + command: './bin/reframe -c tutorials/basics/stream/stream4.py -r --performance-report' + launched by: user@host + working directory: '/users/user/Devel/reframe' + settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/users/user/Devel/reframe/tutorials/basics/stream/stream4.py' + stage directory: '/users/user/Devel/reframe/stage' + output directory: '/users/user/Devel/reframe/output' + +[==========] Running 1 check(s) +[==========] Started on Wed Jan 19 18:40:59 2022 + +[----------] started processing StreamMultiSysTest (StreamMultiSysTest) +[ RUN ] StreamMultiSysTest on daint:login using gnu +[ RUN ] StreamMultiSysTest on daint:login using intel +[ RUN ] StreamMultiSysTest on daint:login using pgi +[ RUN ] StreamMultiSysTest on daint:login using cray +[ RUN ] StreamMultiSysTest on daint:gpu using gnu +[ RUN ] StreamMultiSysTest on daint:gpu using intel +[ RUN ] StreamMultiSysTest on daint:gpu using pgi +[ RUN ] StreamMultiSysTest on daint:gpu using cray +[ RUN ] StreamMultiSysTest on daint:mc using gnu +[ RUN ] StreamMultiSysTest on daint:mc using intel +[ RUN ] StreamMultiSysTest on daint:mc using pgi +[ RUN ] StreamMultiSysTest on daint:mc using cray +[----------] finished processing StreamMultiSysTest (StreamMultiSysTest) + +[----------] waiting for spawned checks to finish +[ OK ] ( 1/12) StreamMultiSysTest @daint:login+pgi [compile: 10.793s run: 49.861s total: 60.881s] +[ OK ] ( 2/12) StreamMultiSysTest @daint:login+gnu [compile: 1.928s run: 72.907s total: 74.867s] +[ OK ] ( 3/12) StreamMultiSysTest @daint:login+intel [compile: 7.659s run: 63.146s total: 71.139s] +[ OK ] ( 4/12) StreamMultiSysTest @daint:login+cray [compile: 0.582s run: 48.996s total: 49.614s] +[ OK ] ( 5/12) StreamMultiSysTest @daint:mc+pgi [compile: 1.969s run: 71.561s total: 73.560s] +[ OK ] ( 6/12) StreamMultiSysTest @daint:mc+gnu [compile: 1.962s run: 87.247s total: 89.240s] +[ OK ] ( 7/12) StreamMultiSysTest @daint:mc+intel [compile: 2.176s run: 78.063s total: 80.268s] +[ OK ] ( 8/12) StreamMultiSysTest @daint:mc+cray [compile: 0.564s run: 68.645s total: 69.238s] +[ OK ] ( 9/12) StreamMultiSysTest @daint:gpu+pgi [compile: 2.529s run: 244.848s total: 247.413s] +[ OK ] (10/12) StreamMultiSysTest @daint:gpu+gnu [compile: 3.108s run: 257.580s total: 260.723s] +[ OK ] (11/12) StreamMultiSysTest @daint:gpu+intel [compile: 2.348s run: 251.428s total: 253.808s] +[ OK ] (12/12) StreamMultiSysTest @daint:gpu+cray [compile: 0.802s run: 241.325s total: 242.159s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 12/12 test case(s) from 1 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Wed Jan 19 18:45:47 2022 +============================================================================== +PERFORMANCE REPORT +------------------------------------------------------------------------------ +StreamMultiSysTest +- daint:login + - gnu + * num_tasks: 1 + * Copy: 101679.8 MB/s + * Scale: 45191.5 MB/s + * Add: 54368.5 MB/s + * Triad: 60150.7 MB/s + - intel + * num_tasks: 1 + * Copy: 91599.9 MB/s + * Scale: 84242.4 MB/s + * Add: 106380.9 MB/s + * Triad: 108751.1 MB/s + - pgi + * num_tasks: 1 + * Copy: 34313.7 MB/s + * Scale: 27147.5 MB/s + * Add: 40151.0 MB/s + * Triad: 40265.4 MB/s + - cray + * num_tasks: 1 + * Copy: 44660.8 MB/s + * Scale: 24224.9 MB/s + * Add: 32372.7 MB/s + * Triad: 42503.4 MB/s +- daint:gpu + - gnu + * num_tasks: 1 + * Copy: 42720.5 MB/s + * Scale: 38430.6 MB/s + * Add: 43645.7 MB/s + * Triad: 43969.6 MB/s + - intel + * num_tasks: 1 + * Copy: 52676.1 MB/s + * Scale: 54405.8 MB/s + * Add: 59010.5 MB/s + * Triad: 59135.5 MB/s + - pgi + * num_tasks: 1 + * Copy: 50671.7 MB/s + * Scale: 39562.9 MB/s + * Add: 43926.7 MB/s + * Triad: 44044.7 MB/s + - cray + * num_tasks: 1 + * Copy: 50864.0 MB/s + * Scale: 39099.2 MB/s + * Add: 43314.9 MB/s + * Triad: 43936.4 MB/s +- daint:mc + - gnu + * num_tasks: 1 + * Copy: 48660.2 MB/s + * Scale: 38660.2 MB/s + * Add: 43688.1 MB/s + * Triad: 44030.3 MB/s + - intel + * num_tasks: 1 + * Copy: 52582.7 MB/s + * Scale: 48775.1 MB/s + * Add: 57207.5 MB/s + * Triad: 57349.3 MB/s + - pgi + * num_tasks: 1 + * Copy: 46134.0 MB/s + * Scale: 40549.6 MB/s + * Add: 44189.3 MB/s + * Triad: 44531.3 MB/s + - cray + * num_tasks: 1 + * Copy: 46567.1 MB/s + * Scale: 39779.5 MB/s + * Add: 43429.1 MB/s + * Triad: 43814.4 MB/s +------------------------------------------------------------------------------ +Run report saved in '/users/user/.reframe/reports/run-report.json' +Log file(s) saved in '/tmp/rfm-fuzfkpeh.log' From 4cd9d2e924fea06c9f55b0aecf79ccfd0ba6acc4 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 19 Jan 2022 18:54:55 +0100 Subject: [PATCH 39/62] WIP: Adapt tutorial --- docs/tutorial_basics.rst | 278 +-------------------------------------- 1 file changed, 4 insertions(+), 274 deletions(-) diff --git a/docs/tutorial_basics.rst b/docs/tutorial_basics.rst index acc9c06fd7..4f6aa563b0 100644 --- a/docs/tutorial_basics.rst +++ b/docs/tutorial_basics.rst @@ -650,151 +650,8 @@ We will only do so with the final versions of the tests from the previous sectio export RFM_CONFIG_FILE=$(pwd)/tutorials/config/mysettings.py ./bin/reframe -c tutorials/basics/ -R -n 'HelloMultiLangTest|HelloThreadedExtended2Test|StreamWithRefTest' --performance-report -r - -.. code-block:: none - - [ReFrame Setup] - version: 3.4-dev2 (rev: f102d4bb) - command: './bin/reframe -c tutorials/basics/ -R -n HelloMultiLangTest|HelloThreadedExtended2Test|StreamWithRefTest --performance-report -r' - launched by: user@dom101 - working directory: '/users/user/Devel/reframe' - settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' - check search path: (R) '/users/user/Devel/reframe/tutorials/basics' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' - - [==========] Running 4 check(s) - [==========] Started on Mon Jan 25 00:34:32 2021 - - [----------] started processing HelloMultiLangTest_c (HelloMultiLangTest_c) - [ RUN ] HelloMultiLangTest_c on daint:login using builtin - [ RUN ] HelloMultiLangTest_c on daint:login using gnu - [ RUN ] HelloMultiLangTest_c on daint:login using intel - [ RUN ] HelloMultiLangTest_c on daint:login using pgi - [ RUN ] HelloMultiLangTest_c on daint:login using cray - [ RUN ] HelloMultiLangTest_c on daint:gpu using gnu - [ RUN ] HelloMultiLangTest_c on daint:gpu using intel - [ RUN ] HelloMultiLangTest_c on daint:gpu using pgi - [ RUN ] HelloMultiLangTest_c on daint:gpu using cray - [ RUN ] HelloMultiLangTest_c on daint:mc using gnu - [ RUN ] HelloMultiLangTest_c on daint:mc using intel - [ RUN ] HelloMultiLangTest_c on daint:mc using pgi - [ RUN ] HelloMultiLangTest_c on daint:mc using cray - [----------] finished processing HelloMultiLangTest_c (HelloMultiLangTest_c) - - [----------] started processing HelloMultiLangTest_cpp (HelloMultiLangTest_cpp) - [ RUN ] HelloMultiLangTest_cpp on daint:login using builtin - [ RUN ] HelloMultiLangTest_cpp on daint:login using gnu - [ RUN ] HelloMultiLangTest_cpp on daint:login using intel - [ RUN ] HelloMultiLangTest_cpp on daint:login using pgi - [ RUN ] HelloMultiLangTest_cpp on daint:login using cray - [ RUN ] HelloMultiLangTest_cpp on daint:gpu using gnu - [ RUN ] HelloMultiLangTest_cpp on daint:gpu using intel - [ RUN ] HelloMultiLangTest_cpp on daint:gpu using pgi - [ RUN ] HelloMultiLangTest_cpp on daint:gpu using cray - [ RUN ] HelloMultiLangTest_cpp on daint:mc using gnu - [ RUN ] HelloMultiLangTest_cpp on daint:mc using intel - [ RUN ] HelloMultiLangTest_cpp on daint:mc using pgi - [ RUN ] HelloMultiLangTest_cpp on daint:mc using cray - [----------] finished processing HelloMultiLangTest_cpp (HelloMultiLangTest_cpp) - - [----------] started processing HelloThreadedExtended2Test (HelloThreadedExtended2Test) - [ RUN ] HelloThreadedExtended2Test on daint:login using builtin - [ RUN ] HelloThreadedExtended2Test on daint:login using gnu - [ RUN ] HelloThreadedExtended2Test on daint:login using intel - [ RUN ] HelloThreadedExtended2Test on daint:login using pgi - [ RUN ] HelloThreadedExtended2Test on daint:login using cray - [ RUN ] HelloThreadedExtended2Test on daint:gpu using gnu - [ RUN ] HelloThreadedExtended2Test on daint:gpu using intel - [ RUN ] HelloThreadedExtended2Test on daint:gpu using pgi - [ RUN ] HelloThreadedExtended2Test on daint:gpu using cray - [ RUN ] HelloThreadedExtended2Test on daint:mc using gnu - [ RUN ] HelloThreadedExtended2Test on daint:mc using intel - [ RUN ] HelloThreadedExtended2Test on daint:mc using pgi - [ RUN ] HelloThreadedExtended2Test on daint:mc using cray - [----------] finished processing HelloThreadedExtended2Test (HelloThreadedExtended2Test) - - [----------] started processing StreamWithRefTest (StreamWithRefTest) - [ RUN ] StreamWithRefTest on daint:login using gnu - [ RUN ] StreamWithRefTest on daint:gpu using gnu - [ RUN ] StreamWithRefTest on daint:mc using gnu - [----------] finished processing StreamWithRefTest (StreamWithRefTest) - - [----------] waiting for spawned checks to finish - [ OK ] ( 1/42) HelloThreadedExtended2Test on daint:login using cray [compile: 0.959s run: 56.203s total: 57.189s] - [ OK ] ( 2/42) HelloThreadedExtended2Test on daint:login using intel [compile: 2.096s run: 61.438s total: 64.062s] - [ OK ] ( 3/42) HelloMultiLangTest_cpp on daint:login using cray [compile: 0.479s run: 98.909s total: 99.406s] - [ OK ] ( 4/42) HelloMultiLangTest_c on daint:login using pgi [compile: 1.342s run: 137.250s total: 138.609s] - [ OK ] ( 5/42) HelloThreadedExtended2Test on daint:gpu using cray [compile: 0.792s run: 33.748s total: 34.558s] - [ OK ] ( 6/42) HelloThreadedExtended2Test on daint:gpu using intel [compile: 2.257s run: 48.545s total: 50.825s] - [ OK ] ( 7/42) HelloMultiLangTest_cpp on daint:gpu using cray [compile: 0.469s run: 85.383s total: 85.873s] - [ OK ] ( 8/42) HelloMultiLangTest_c on daint:gpu using cray [compile: 0.132s run: 124.678s total: 124.827s] - [ OK ] ( 9/42) HelloThreadedExtended2Test on daint:mc using cray [compile: 0.775s run: 15.569s total: 16.362s] - [ OK ] (10/42) HelloThreadedExtended2Test on daint:mc using intel [compile: 2.814s run: 24.600s total: 27.438s] - [ OK ] (11/42) HelloMultiLangTest_cpp on daint:mc using cray [compile: 0.474s run: 70.035s total: 70.528s] - [ OK ] (12/42) HelloMultiLangTest_c on daint:mc using cray [compile: 0.138s run: 110.807s total: 110.963s] - [ OK ] (13/42) HelloThreadedExtended2Test on daint:login using builtin [compile: 0.790s run: 67.313s total: 68.124s] - [ OK ] (14/42) HelloMultiLangTest_cpp on daint:login using pgi [compile: 1.799s run: 100.490s total: 102.683s] - [ OK ] (15/42) HelloMultiLangTest_cpp on daint:login using builtin [compile: 0.497s run: 108.380s total: 108.895s] - [ OK ] (16/42) HelloMultiLangTest_c on daint:login using gnu [compile: 1.337s run: 142.017s total: 143.373s] - [ OK ] (17/42) HelloMultiLangTest_cpp on daint:gpu using pgi [compile: 1.851s run: 88.935s total: 90.805s] - [ OK ] (18/42) HelloMultiLangTest_cpp on daint:gpu using gnu [compile: 1.640s run: 97.855s total: 99.513s] - [ OK ] (19/42) HelloMultiLangTest_c on daint:gpu using intel [compile: 1.578s run: 131.689s total: 133.287s] - [ OK ] (20/42) HelloMultiLangTest_cpp on daint:mc using pgi [compile: 1.917s run: 73.276s total: 75.213s] - [ OK ] (21/42) HelloMultiLangTest_cpp on daint:mc using gnu [compile: 1.727s run: 82.213s total: 83.960s] - [ OK ] (22/42) HelloMultiLangTest_c on daint:mc using intel [compile: 1.573s run: 117.806s total: 119.402s] - [ OK ] (23/42) HelloMultiLangTest_cpp on daint:login using gnu [compile: 1.644s run: 106.956s total: 108.618s] - [ OK ] (24/42) HelloMultiLangTest_c on daint:login using cray [compile: 0.146s run: 137.301s total: 137.466s] - [ OK ] (25/42) HelloMultiLangTest_c on daint:login using intel [compile: 1.613s run: 140.058s total: 141.689s] - [ OK ] (26/42) HelloMultiLangTest_c on daint:login using builtin [compile: 0.122s run: 143.692s total: 143.833s] - [ OK ] (27/42) HelloMultiLangTest_c on daint:gpu using pgi [compile: 1.361s run: 127.958s total: 129.341s] - [ OK ] (28/42) HelloMultiLangTest_c on daint:gpu using gnu [compile: 1.337s run: 136.031s total: 137.386s] - [ OK ] (29/42) HelloMultiLangTest_c on daint:mc using pgi [compile: 1.410s run: 113.998s total: 115.428s] - [ OK ] (30/42) HelloMultiLangTest_c on daint:mc using gnu [compile: 1.344s run: 122.086s total: 123.453s] - [ OK ] (31/42) HelloThreadedExtended2Test on daint:login using pgi [compile: 2.733s run: 60.105s total: 62.951s] - [ OK ] (32/42) HelloMultiLangTest_cpp on daint:login using intel [compile: 2.780s run: 104.916s total: 107.716s] - [ OK ] (33/42) HelloThreadedExtended2Test on daint:gpu using pgi [compile: 2.373s run: 39.144s total: 41.545s] - [ OK ] (34/42) HelloMultiLangTest_cpp on daint:gpu using intel [compile: 1.835s run: 95.042s total: 96.896s] - [ OK ] (35/42) HelloThreadedExtended2Test on daint:mc using pgi [compile: 2.686s run: 20.751s total: 23.457s] - [ OK ] (36/42) HelloMultiLangTest_cpp on daint:mc using intel [compile: 1.862s run: 79.275s total: 81.170s] - [ OK ] (37/42) HelloThreadedExtended2Test on daint:login using gnu [compile: 2.106s run: 67.284s total: 69.409s] - [ OK ] (38/42) HelloThreadedExtended2Test on daint:gpu using gnu [compile: 2.471s run: 56.360s total: 58.871s] - [ OK ] (39/42) HelloThreadedExtended2Test on daint:mc using gnu [compile: 2.007s run: 32.300s total: 34.330s] - [ OK ] (40/42) StreamWithRefTest on daint:login using gnu [compile: 1.941s run: 14.373s total: 16.337s] - [ OK ] (41/42) StreamWithRefTest on daint:gpu using gnu [compile: 1.954s run: 11.815s total: 13.791s] - [ OK ] (42/42) StreamWithRefTest on daint:mc using gnu [compile: 2.513s run: 10.672s total: 13.213s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 42 test case(s) from 4 check(s) (0 failure(s)) - [==========] Finished on Mon Jan 25 00:37:02 2021 - ============================================================================== - PERFORMANCE REPORT - ------------------------------------------------------------------------------ - StreamWithRefTest - - daint:login - - gnu - * num_tasks: 1 - * Copy: 72923.3 MB/s - * Scale: 45663.4 MB/s - * Add: 49417.7 MB/s - * Triad: 49426.4 MB/s - - daint:gpu - - gnu - * num_tasks: 1 - * Copy: 50638.7 MB/s - * Scale: 35186.0 MB/s - * Add: 38564.4 MB/s - * Triad: 38771.1 MB/s - - daint:mc - - gnu - * num_tasks: 1 - * Copy: 19072.5 MB/s - * Scale: 10395.6 MB/s - * Add: 11041.0 MB/s - * Triad: 11079.2 MB/s - ------------------------------------------------------------------------------ - Log file(s) saved in: '/tmp/rfm-r4yjva71.log' - +.. literalinclude:: listings/alltests_daint.txt + :language: console There it is! Without any change in our tests, we could simply run them in a HPC cluster with all of its intricacies. @@ -889,135 +746,8 @@ Let's run our adapted test now: ./bin/reframe -c tutorials/basics/stream/stream4.py -r --performance-report -.. code-block:: none - - [ReFrame Setup] - version: 3.3-dev0 (rev: cb974c13) - command: './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/stream/stream4.py -r --performance-report' - launched by: user@dom101 - working directory: '/users/user/Devel/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/basics/stream/stream4.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' - - [==========] Running 1 check(s) - [==========] Started on Mon Oct 12 20:16:03 2020 - - [----------] started processing StreamMultiSysTest (StreamMultiSysTest) - [ RUN ] StreamMultiSysTest on daint:login using gnu - [ RUN ] StreamMultiSysTest on daint:login using intel - [ RUN ] StreamMultiSysTest on daint:login using pgi - [ RUN ] StreamMultiSysTest on daint:login using cray - [ RUN ] StreamMultiSysTest on daint:gpu using gnu - [ RUN ] StreamMultiSysTest on daint:gpu using intel - [ RUN ] StreamMultiSysTest on daint:gpu using pgi - [ RUN ] StreamMultiSysTest on daint:gpu using cray - [ RUN ] StreamMultiSysTest on daint:mc using gnu - [ RUN ] StreamMultiSysTest on daint:mc using intel - [ RUN ] StreamMultiSysTest on daint:mc using pgi - [ RUN ] StreamMultiSysTest on daint:mc using cray - [----------] finished processing StreamMultiSysTest (StreamMultiSysTest) - - [----------] waiting for spawned checks to finish - [ OK ] ( 1/12) StreamMultiSysTest on daint:gpu using pgi [compile: 2.092s run: 11.201s total: 13.307s] - [ OK ] ( 2/12) StreamMultiSysTest on daint:gpu using gnu [compile: 2.349s run: 17.140s total: 19.509s] - [ OK ] ( 3/12) StreamMultiSysTest on daint:login using pgi [compile: 2.230s run: 20.946s total: 23.189s] - [ OK ] ( 4/12) StreamMultiSysTest on daint:login using gnu [compile: 2.161s run: 27.093s total: 29.266s] - [ OK ] ( 5/12) StreamMultiSysTest on daint:mc using gnu [compile: 1.954s run: 7.904s total: 9.870s] - [ OK ] ( 6/12) StreamMultiSysTest on daint:gpu using intel [compile: 2.286s run: 14.686s total: 16.984s] - [ OK ] ( 7/12) StreamMultiSysTest on daint:login using intel [compile: 2.520s run: 24.427s total: 26.960s] - [ OK ] ( 8/12) StreamMultiSysTest on daint:mc using intel [compile: 2.312s run: 5.350s total: 7.678s] - [ OK ] ( 9/12) StreamMultiSysTest on daint:gpu using cray [compile: 0.672s run: 10.791s total: 11.476s] - [ OK ] (10/12) StreamMultiSysTest on daint:login using cray [compile: 0.706s run: 20.505s total: 21.229s] - [ OK ] (11/12) StreamMultiSysTest on daint:mc using cray [compile: 0.674s run: 2.763s total: 3.453s] - [ OK ] (12/12) StreamMultiSysTest on daint:mc using pgi [compile: 2.088s run: 5.124s total: 7.224s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 12 test case(s) from 1 check(s) (0 failure(s)) - [==========] Finished on Mon Oct 12 20:16:36 2020 - ============================================================================== - PERFORMANCE REPORT - ------------------------------------------------------------------------------ - StreamMultiSysTest - - daint:login - - gnu - * num_tasks: 1 - * Copy: 95784.6 MB/s - * Scale: 73747.3 MB/s - * Add: 79138.3 MB/s - * Triad: 81253.3 MB/s - - intel - * num_tasks: 1 - * Copy: 103540.5 MB/s - * Scale: 109257.6 MB/s - * Add: 112189.8 MB/s - * Triad: 113440.8 MB/s - - pgi - * num_tasks: 1 - * Copy: 99071.7 MB/s - * Scale: 74721.3 MB/s - * Add: 81206.4 MB/s - * Triad: 78328.9 MB/s - - cray - * num_tasks: 1 - * Copy: 96664.5 MB/s - * Scale: 75637.4 MB/s - * Add: 74759.3 MB/s - * Triad: 73450.6 MB/s - - daint:gpu - - gnu - * num_tasks: 1 - * Copy: 42293.7 MB/s - * Scale: 38095.1 MB/s - * Add: 43080.7 MB/s - * Triad: 43719.2 MB/s - - intel - * num_tasks: 1 - * Copy: 52563.0 MB/s - * Scale: 54316.5 MB/s - * Add: 59044.5 MB/s - * Triad: 59165.5 MB/s - - pgi - * num_tasks: 1 - * Copy: 50710.5 MB/s - * Scale: 39639.5 MB/s - * Add: 44104.5 MB/s - * Triad: 44143.7 MB/s - - cray - * num_tasks: 1 - * Copy: 51159.8 MB/s - * Scale: 39176.0 MB/s - * Add: 43588.8 MB/s - * Triad: 43866.8 MB/s - - daint:mc - - gnu - * num_tasks: 1 - * Copy: 48744.5 MB/s - * Scale: 38774.7 MB/s - * Add: 43760.0 MB/s - * Triad: 44143.1 MB/s - - intel - * num_tasks: 1 - * Copy: 52707.0 MB/s - * Scale: 49011.8 MB/s - * Add: 57513.3 MB/s - * Triad: 57678.3 MB/s - - pgi - * num_tasks: 1 - * Copy: 46274.3 MB/s - * Scale: 40628.6 MB/s - * Add: 44352.4 MB/s - * Triad: 44630.2 MB/s - - cray - * num_tasks: 1 - * Copy: 46912.5 MB/s - * Scale: 40076.9 MB/s - * Add: 43639.0 MB/s - * Triad: 44068.3 MB/s - ------------------------------------------------------------------------------ - Log file(s) saved in: '/tmp/rfm-odx7qewe.log' - +.. literalinclude:: listings/stream4_daint.txt + :language: console Notice the improved performance of the benchmark in all partitions and the differences in performance between the different compilers. From e8bdbaaabc8ec34d45d7012636e53ea5353bb3c8 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 19 Jan 2022 21:56:34 +0100 Subject: [PATCH 40/62] WIP: Adapt tutorial --- docs/listings/maketest_mixin.txt | 18 ++++++++++ docs/listings/stream_params.txt | 25 ++++++++++++++ docs/tutorial_advanced.rst | 58 +++++--------------------------- 3 files changed, 51 insertions(+), 50 deletions(-) create mode 100644 docs/listings/maketest_mixin.txt create mode 100644 docs/listings/stream_params.txt diff --git a/docs/listings/maketest_mixin.txt b/docs/listings/maketest_mixin.txt new file mode 100644 index 0000000000..588400368c --- /dev/null +++ b/docs/listings/maketest_mixin.txt @@ -0,0 +1,18 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+4cd9d2e9 + command: './bin/reframe -c tutorials/advanced/makefiles/maketest_mixin.py -l' + launched by: user@host + working directory: '/Users/user/Repositories/reframe' + settings file: 'tutorials/config/settings.py' + check search path: '/Users/user/Repositories/reframe/tutorials/advanced/makefiles/maketest_mixin.py' + stage directory: '/Users/user/Repositories/reframe/stage' + output directory: '/Users/user/Repositories/reframe/output' + +[List of matched checks] +- MakeOnlyTestAlt %elem_type=double +- MakeOnlyTestAlt %elem_type=float +- MakefileTestAlt %elem_type=double +- MakefileTestAlt %elem_type=float +Found 4 check(s) + +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-xv130jbu.log' diff --git a/docs/listings/stream_params.txt b/docs/listings/stream_params.txt new file mode 100644 index 0000000000..b31ffd3899 --- /dev/null +++ b/docs/listings/stream_params.txt @@ -0,0 +1,25 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+4cd9d2e9 + command: './bin/reframe -c tutorials/advanced/parameterized/stream.py -l' + launched by: user@host + working directory: '/Users/user/Repositories/reframe' + settings file: 'tutorials/config/settings.py' + check search path: '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py' + stage directory: '/Users/user/Repositories/reframe/stage' + output directory: '/Users/user/Repositories/reframe/output' + +[List of matched checks] +- StreamMultiSysTest %num_bytes=536870912 +- StreamMultiSysTest %num_bytes=268435456 +- StreamMultiSysTest %num_bytes=134217728 +- StreamMultiSysTest %num_bytes=67108864 +- StreamMultiSysTest %num_bytes=33554432 +- StreamMultiSysTest %num_bytes=16777216 +- StreamMultiSysTest %num_bytes=8388608 +- StreamMultiSysTest %num_bytes=4194304 +- StreamMultiSysTest %num_bytes=2097152 +- StreamMultiSysTest %num_bytes=1048576 +- StreamMultiSysTest %num_bytes=524288 +Found 11 check(s) + +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-93hsoek9.log' diff --git a/docs/tutorial_advanced.rst b/docs/tutorial_advanced.rst index ef6b9f7108..8057e60ca7 100644 --- a/docs/tutorial_advanced.rst +++ b/docs/tutorial_advanced.rst @@ -44,36 +44,13 @@ Let's try listing the generated tests: ./bin/reframe -c tutorials/advanced/parameterized/stream.py -l -.. code-block:: none +.. literalinclude:: listings/stream_params.txt + :language: console - [ReFrame Setup] - version: 3.6.0-dev.0+2f8e5b3b - command: './bin/reframe -c tutorials/advanced/parameterized/stream.py -l' - launched by: user@tresa.local - working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - [List of matched checks] - - StreamMultiSysTest_2097152 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_67108864 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_1048576 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_536870912 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_4194304 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_33554432 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_8388608 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_268435456 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_16777216 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_524288 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_134217728 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - Found 11 check(s) - - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-s_ty1l50.log' - - -ReFrame generates 11 tests from the single parameterized test that we have written and names them by appending a string representation of the parameter value. +ReFrame generates 11 tests from the single parameterized test. +When listing parameterized tests, ReFrame adds the list of parameters after the base test name using the notation ``%=``. +Each generated test gets also a unique name. +For more details on how the test names are generated for various types of tests, please refer to :doc:`test-naming-scheme`. Test parameterization in ReFrame is very powerful since you can parameterize your tests on anything and you can create complex parameterization spaces. A common pattern is to parameterize a test on the environment module that loads a software in order to test different versions of it. @@ -316,27 +293,8 @@ Notice how the parameters are expanded in each of the individual tests: ./bin/reframe -c tutorials/advanced/makefiles/maketest_mixin.py -l - -.. code-block:: none - - [ReFrame Setup] - version: 3.6.0-dev.0+2f8e5b3b - command: './bin/reframe -c tutorials/advanced/makefiles/maketest_mixin.py -l' - launched by: user@tresa.local - working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/advanced/makefiles/maketest_mixin.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - [List of matched checks] - - MakeOnlyTestAlt_double (found in '/Users/user/Repositories/reframe/tutorials/advanced/makefiles/maketest_mixin.py') - - MakeOnlyTestAlt_float (found in '/Users/user/Repositories/reframe/tutorials/advanced/makefiles/maketest_mixin.py') - - MakefileTestAlt_double (found in '/Users/user/Repositories/reframe/tutorials/advanced/makefiles/maketest_mixin.py') - - MakefileTestAlt_float (found in '/Users/user/Repositories/reframe/tutorials/advanced/makefiles/maketest_mixin.py') - Found 4 check(s) - - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-e384bvkd.log' +.. literalinclude:: listings/maketest_mixin.txt + :language: console From 75605eab468e4d041dbc43761f28ca6fa0deb7bf Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 19 Jan 2022 22:45:35 +0100 Subject: [PATCH 41/62] WIP: Adapt tutorial --- docs/listings/osu_bench_deps.txt | 109 ++++++++++++++++++ docs/listings/osu_latency_list.txt | 21 ++++ .../listings/osu_latency_list_concretized.txt | 23 ++++ docs/listings/osu_latency_res_error.txt | 40 +++++++ 4 files changed, 193 insertions(+) create mode 100644 docs/listings/osu_bench_deps.txt create mode 100644 docs/listings/osu_latency_list.txt create mode 100644 docs/listings/osu_latency_list_concretized.txt create mode 100644 docs/listings/osu_latency_res_error.txt diff --git a/docs/listings/osu_bench_deps.txt b/docs/listings/osu_bench_deps.txt new file mode 100644 index 0000000000..1c4ea1be94 --- /dev/null +++ b/docs/listings/osu_bench_deps.txt @@ -0,0 +1,109 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+e8bdbaaa + command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -r' + launched by: user@host + working directory: '/users/user/Devel/reframe' + settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' + stage directory: '/users/user/Devel/reframe/stage' + output directory: '/users/user/Devel/reframe/output' + +[==========] Running 8 check(s) +[==========] Started on Wed Jan 19 22:01:19 2022 + +[----------] started processing OSUDownloadTest (OSU benchmarks download sources) +[ RUN ] OSUDownloadTest on daint:login using builtin +[----------] finished processing OSUDownloadTest (OSU benchmarks download sources) + +[----------] started processing OSUBuildTest (OSU benchmarks build test) +[ RUN ] OSUBuildTest on daint:gpu using gnu +[ DEP ] OSUBuildTest on daint:gpu using gnu +[ RUN ] OSUBuildTest on daint:gpu using intel +[ DEP ] OSUBuildTest on daint:gpu using intel +[ RUN ] OSUBuildTest on daint:gpu using pgi +[ DEP ] OSUBuildTest on daint:gpu using pgi +[----------] finished processing OSUBuildTest (OSU benchmarks build test) + +[----------] started processing OSUAllreduceTest_16 (OSU Allreduce test) +[ RUN ] OSUAllreduceTest_16 on daint:gpu using gnu +[ DEP ] OSUAllreduceTest_16 on daint:gpu using gnu +[ RUN ] OSUAllreduceTest_16 on daint:gpu using intel +[ DEP ] OSUAllreduceTest_16 on daint:gpu using intel +[ RUN ] OSUAllreduceTest_16 on daint:gpu using pgi +[ DEP ] OSUAllreduceTest_16 on daint:gpu using pgi +[----------] finished processing OSUAllreduceTest_16 (OSU Allreduce test) + +[----------] started processing OSUAllreduceTest_8 (OSU Allreduce test) +[ RUN ] OSUAllreduceTest_8 on daint:gpu using gnu +[ DEP ] OSUAllreduceTest_8 on daint:gpu using gnu +[ RUN ] OSUAllreduceTest_8 on daint:gpu using intel +[ DEP ] OSUAllreduceTest_8 on daint:gpu using intel +[ RUN ] OSUAllreduceTest_8 on daint:gpu using pgi +[ DEP ] OSUAllreduceTest_8 on daint:gpu using pgi +[----------] finished processing OSUAllreduceTest_8 (OSU Allreduce test) + +[----------] started processing OSUAllreduceTest_4 (OSU Allreduce test) +[ RUN ] OSUAllreduceTest_4 on daint:gpu using gnu +[ DEP ] OSUAllreduceTest_4 on daint:gpu using gnu +[ RUN ] OSUAllreduceTest_4 on daint:gpu using intel +[ DEP ] OSUAllreduceTest_4 on daint:gpu using intel +[ RUN ] OSUAllreduceTest_4 on daint:gpu using pgi +[ DEP ] OSUAllreduceTest_4 on daint:gpu using pgi +[----------] finished processing OSUAllreduceTest_4 (OSU Allreduce test) + +[----------] started processing OSUAllreduceTest_2 (OSU Allreduce test) +[ RUN ] OSUAllreduceTest_2 on daint:gpu using gnu +[ DEP ] OSUAllreduceTest_2 on daint:gpu using gnu +[ RUN ] OSUAllreduceTest_2 on daint:gpu using intel +[ DEP ] OSUAllreduceTest_2 on daint:gpu using intel +[ RUN ] OSUAllreduceTest_2 on daint:gpu using pgi +[ DEP ] OSUAllreduceTest_2 on daint:gpu using pgi +[----------] finished processing OSUAllreduceTest_2 (OSU Allreduce test) + +[----------] started processing OSUBandwidthTest (OSU bandwidth test) +[ RUN ] OSUBandwidthTest on daint:gpu using gnu +[ DEP ] OSUBandwidthTest on daint:gpu using gnu +[ RUN ] OSUBandwidthTest on daint:gpu using intel +[ DEP ] OSUBandwidthTest on daint:gpu using intel +[ RUN ] OSUBandwidthTest on daint:gpu using pgi +[ DEP ] OSUBandwidthTest on daint:gpu using pgi +[----------] finished processing OSUBandwidthTest (OSU bandwidth test) + +[----------] started processing OSULatencyTest (OSU latency test) +[ RUN ] OSULatencyTest on daint:gpu using gnu +[ DEP ] OSULatencyTest on daint:gpu using gnu +[ RUN ] OSULatencyTest on daint:gpu using intel +[ DEP ] OSULatencyTest on daint:gpu using intel +[ RUN ] OSULatencyTest on daint:gpu using pgi +[ DEP ] OSULatencyTest on daint:gpu using pgi +[----------] finished processing OSULatencyTest (OSU latency test) + +[----------] waiting for spawned checks to finish +[ OK ] ( 1/22) OSUDownloadTest @daint:login+builtin [compile: 0.008s run: 1.125s total: 1.159s] +[ OK ] ( 2/22) OSUBuildTest @daint:gpu+gnu [compile: 25.387s run: 0.052s total: 99.859s] +[ OK ] ( 3/22) OSUBuildTest @daint:gpu+pgi [compile: 31.630s run: 67.980s total: 99.737s] +[ OK ] ( 4/22) OSUAllreduceTest %mpi_tasks=2 @daint:gpu+pgi [compile: 0.009s run: 34.229s total: 48.473s] +[ OK ] ( 5/22) OSULatencyTest @daint:gpu+gnu [compile: 0.009s run: 44.246s total: 48.462s] +[ OK ] ( 6/22) OSUBuildTest @daint:gpu+intel [compile: 42.458s run: 74.164s total: 148.541s] +[ OK ] ( 7/22) OSULatencyTest @daint:gpu+intel [compile: 0.009s run: 26.194s total: 26.229s] +[ OK ] ( 8/22) OSUAllreduceTest %mpi_tasks=8 @daint:gpu+gnu [compile: 0.009s run: 40.997s total: 75.008s] +[ OK ] ( 9/22) OSUAllreduceTest %mpi_tasks=4 @daint:gpu+gnu [compile: 0.009s run: 48.053s total: 75.012s] +[ OK ] (10/22) OSUAllreduceTest %mpi_tasks=2 @daint:gpu+gnu [compile: 0.009s run: 57.616s total: 75.014s] +[ OK ] (11/22) OSULatencyTest @daint:gpu+pgi [compile: 0.009s run: 74.928s total: 74.963s] +[ OK ] (12/22) OSUAllreduceTest %mpi_tasks=2 @daint:gpu+intel [compile: 0.012s run: 20.491s total: 26.871s] +[ OK ] (13/22) OSUAllreduceTest %mpi_tasks=16 @daint:gpu+pgi [compile: 0.009s run: 38.207s total: 75.629s] +[ OK ] (14/22) OSUAllreduceTest %mpi_tasks=4 @daint:gpu+pgi [compile: 0.009s run: 52.350s total: 75.599s] +[ OK ] (15/22) OSUAllreduceTest %mpi_tasks=8 @daint:gpu+pgi [compile: 0.011s run: 45.749s total: 75.990s] +[ OK ] (16/22) OSUAllreduceTest %mpi_tasks=16 @daint:gpu+gnu [compile: 0.010s run: 34.136s total: 76.337s] +[ OK ] (17/22) OSUBandwidthTest @daint:gpu+pgi [compile: 0.009s run: 84.226s total: 91.675s] +[ OK ] (18/22) OSUBandwidthTest @daint:gpu+gnu [compile: 0.009s run: 81.048s total: 92.037s] +[ OK ] (19/22) OSUAllreduceTest %mpi_tasks=8 @daint:gpu+intel [compile: 0.009s run: 33.734s total: 50.373s] +[ OK ] (20/22) OSUAllreduceTest %mpi_tasks=4 @daint:gpu+intel [compile: 0.010s run: 39.285s total: 50.622s] +[ OK ] (21/22) OSUAllreduceTest %mpi_tasks=16 @daint:gpu+intel [compile: 0.009s run: 30.307s total: 50.925s] +[ OK ] (22/22) OSUBandwidthTest @daint:gpu+intel [compile: 0.009s run: 82.258s total: 85.500s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 22/22 test case(s) from 8 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Wed Jan 19 22:05:15 2022 +Run report saved in '/users/user/.reframe/reports/run-report.json' +Log file(s) saved in '/tmp/rfm-8xfehbvy.log' diff --git a/docs/listings/osu_latency_list.txt b/docs/listings/osu_latency_list.txt new file mode 100644 index 0000000000..7aeb18a96d --- /dev/null +++ b/docs/listings/osu_latency_list.txt @@ -0,0 +1,21 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+e8bdbaaa + command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -l' + launched by: user@host + working directory: '/users/user/Devel/reframe' + settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' + stage directory: '/users/user/Devel/reframe/stage' + output directory: '/users/user/Devel/reframe/output' + +[List of matched checks] +- OSULatencyTest + ^OSUBuildTest + ^OSUDownloadTest + ^OSUBuildTest + ^OSUDownloadTest + ^OSUBuildTest + ^OSUDownloadTest +Found 3 check(s) + +Log file(s) saved in '/tmp/rfm-qrs60mvh.log' diff --git a/docs/listings/osu_latency_list_concretized.txt b/docs/listings/osu_latency_list_concretized.txt new file mode 100644 index 0000000000..349515ca9d --- /dev/null +++ b/docs/listings/osu_latency_list_concretized.txt @@ -0,0 +1,23 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+e8bdbaaa + command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -lC' + launched by: user@host + working directory: '/users/user/Devel/reframe' + settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' + stage directory: '/users/user/Devel/reframe/stage' + output directory: '/users/user/Devel/reframe/output' + +[List of matched checks] +- OSULatencyTest @daint:gpu+gnu + ^OSUBuildTest @daint:gpu+gnu + ^OSUDownloadTest @daint:login+builtin +- OSULatencyTest @daint:gpu+intel + ^OSUBuildTest @daint:gpu+intel + ^OSUDownloadTest @daint:login+builtin +- OSULatencyTest @daint:gpu+pgi + ^OSUBuildTest @daint:gpu+pgi + ^OSUDownloadTest @daint:login+builtin +Concretized 3 test case(s) + +Log file(s) saved in '/tmp/rfm-7jrbzu9s.log' diff --git a/docs/listings/osu_latency_res_error.txt b/docs/listings/osu_latency_res_error.txt new file mode 100644 index 0000000000..e3aa0c95a1 --- /dev/null +++ b/docs/listings/osu_latency_res_error.txt @@ -0,0 +1,40 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+e8bdbaaa + command: './bin/reframe -c tutorials/deps/osu_benchmarks.py --system=daint:gpu -n OSULatencyTest -l' + launched by: user@host + working directory: '/users/user/Devel/reframe' + settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' + stage directory: '/users/user/Devel/reframe/stage' + output directory: '/users/user/Devel/reframe/output' + +./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'gnu') -> 'OSUDownloadTest' +./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'intel') -> 'OSUDownloadTest' +./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'pgi') -> 'OSUDownloadTest' +./bin/reframe: skipping all dependent test cases + - ('OSUBuildTest', 'daint:gpu', 'intel') + - ('OSUBandwidthTest', 'daint:gpu', 'intel') + - ('OSUAllreduceTest_8', 'daint:gpu', 'intel') + - ('OSUAllreduceTest_2', 'daint:gpu', 'intel') + - ('OSUAllreduceTest_4', 'daint:gpu', 'intel') + - ('OSUBuildTest', 'daint:gpu', 'gnu') + - ('OSUBandwidthTest', 'daint:gpu', 'gnu') + - ('OSULatencyTest', 'daint:gpu', 'intel') + - ('OSUBuildTest', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_4', 'daint:gpu', 'gnu') + - ('OSUAllreduceTest_16', 'daint:gpu', 'intel') + - ('OSUAllreduceTest_2', 'daint:gpu', 'gnu') + - ('OSULatencyTest', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_16', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_8', 'daint:gpu', 'gnu') + - ('OSULatencyTest', 'daint:gpu', 'gnu') + - ('OSUAllreduceTest_16', 'daint:gpu', 'gnu') + - ('OSUAllreduceTest_8', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_4', 'daint:gpu', 'pgi') + - ('OSUBandwidthTest', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_2', 'daint:gpu', 'pgi') + +[List of matched checks] +Found 0 check(s) + +Log file(s) saved in '/tmp/rfm-o8qctq3o.log' From 1ef8d4bc8893dc4e86b9286c6edc2d6fec201745 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 19 Jan 2022 22:49:48 +0100 Subject: [PATCH 42/62] Fix count of concretized test cases --- reframe/frontend/cli.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index 3e6337903d..eb4b98c661 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -83,8 +83,8 @@ def dep_lines(u, *, prefix, depth=0, lines=None, printed=None): return lines # We need the leaf test cases to be printed at the leftmost - testcases = list(t for t in testcases if t.in_degree == 0) - for t in testcases: + leaf_testcases = list(t for t in testcases if t.in_degree == 0) + for t in leaf_testcases: tc_info = '' details = '' if concretized: From ee3e96196f402aa52c5d79c83440c4fae4f2d450 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Wed, 19 Jan 2022 23:31:24 +0100 Subject: [PATCH 43/62] WIP: Adapt tutorial --- docs/listings/osu_bench_list_concretized.txt | 68 ++++ .../osu_bench_list_concretized_gnu.txt | 32 ++ .../listings/osu_latency_list_concretized.txt | 23 -- docs/tutorial_deps.rst | 296 ++---------------- 4 files changed, 127 insertions(+), 292 deletions(-) create mode 100644 docs/listings/osu_bench_list_concretized.txt create mode 100644 docs/listings/osu_bench_list_concretized_gnu.txt delete mode 100644 docs/listings/osu_latency_list_concretized.txt diff --git a/docs/listings/osu_bench_list_concretized.txt b/docs/listings/osu_bench_list_concretized.txt new file mode 100644 index 0000000000..34027c8aaf --- /dev/null +++ b/docs/listings/osu_bench_list_concretized.txt @@ -0,0 +1,68 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+d9b4d32b + command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -lC' + launched by: user@host + working directory: '/users/user/Devel/reframe' + settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' + stage directory: '/users/user/Devel/reframe/stage' + output directory: '/users/user/Devel/reframe/output' + +[List of matched checks] +- OSUAllreduceTest %mpi_tasks=16 @daint:gpu+gnu + ^OSUBuildTest @daint:gpu+gnu + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=16 @daint:gpu+intel + ^OSUBuildTest @daint:gpu+intel + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=16 @daint:gpu+pgi + ^OSUBuildTest @daint:gpu+pgi + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=8 @daint:gpu+gnu + ^OSUBuildTest @daint:gpu+gnu + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=8 @daint:gpu+intel + ^OSUBuildTest @daint:gpu+intel + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=8 @daint:gpu+pgi + ^OSUBuildTest @daint:gpu+pgi + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=4 @daint:gpu+gnu + ^OSUBuildTest @daint:gpu+gnu + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=4 @daint:gpu+intel + ^OSUBuildTest @daint:gpu+intel + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=4 @daint:gpu+pgi + ^OSUBuildTest @daint:gpu+pgi + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=2 @daint:gpu+gnu + ^OSUBuildTest @daint:gpu+gnu + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=2 @daint:gpu+intel + ^OSUBuildTest @daint:gpu+intel + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=2 @daint:gpu+pgi + ^OSUBuildTest @daint:gpu+pgi + ^OSUDownloadTest @daint:login+builtin +- OSUBandwidthTest @daint:gpu+gnu + ^OSUBuildTest @daint:gpu+gnu + ^OSUDownloadTest @daint:login+builtin +- OSUBandwidthTest @daint:gpu+intel + ^OSUBuildTest @daint:gpu+intel + ^OSUDownloadTest @daint:login+builtin +- OSUBandwidthTest @daint:gpu+pgi + ^OSUBuildTest @daint:gpu+pgi + ^OSUDownloadTest @daint:login+builtin +- OSULatencyTest @daint:gpu+gnu + ^OSUBuildTest @daint:gpu+gnu + ^OSUDownloadTest @daint:login+builtin +- OSULatencyTest @daint:gpu+intel + ^OSUBuildTest @daint:gpu+intel + ^OSUDownloadTest @daint:login+builtin +- OSULatencyTest @daint:gpu+pgi + ^OSUBuildTest @daint:gpu+pgi + ^OSUDownloadTest @daint:login+builtin +Concretized 22 test case(s) + +Log file(s) saved in '/tmp/rfm-wzss58qz.log' diff --git a/docs/listings/osu_bench_list_concretized_gnu.txt b/docs/listings/osu_bench_list_concretized_gnu.txt new file mode 100644 index 0000000000..34fa3de266 --- /dev/null +++ b/docs/listings/osu_bench_list_concretized_gnu.txt @@ -0,0 +1,32 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+d9b4d32b + command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -l -p builtin -p gnu' + launched by: user@host + working directory: '/users/user/Devel/reframe' + settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' + stage directory: '/users/user/Devel/reframe/stage' + output directory: '/users/user/Devel/reframe/output' + +[List of matched checks] +- OSUAllreduceTest %mpi_tasks=16 + ^OSUBuildTest + ^OSUDownloadTest +- OSUAllreduceTest %mpi_tasks=8 + ^OSUBuildTest + ^OSUDownloadTest +- OSUAllreduceTest %mpi_tasks=4 + ^OSUBuildTest + ^OSUDownloadTest +- OSUAllreduceTest %mpi_tasks=2 + ^OSUBuildTest + ^OSUDownloadTest +- OSUBandwidthTest + ^OSUBuildTest + ^OSUDownloadTest +- OSULatencyTest + ^OSUBuildTest + ^OSUDownloadTest +Found 8 check(s) + +Log file(s) saved in '/tmp/rfm-mizz6g1t.log' diff --git a/docs/listings/osu_latency_list_concretized.txt b/docs/listings/osu_latency_list_concretized.txt deleted file mode 100644 index 349515ca9d..0000000000 --- a/docs/listings/osu_latency_list_concretized.txt +++ /dev/null @@ -1,23 +0,0 @@ -[ReFrame Setup] - version: 3.10.0-dev.2+e8bdbaaa - command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -lC' - launched by: user@host - working directory: '/users/user/Devel/reframe' - settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' - -[List of matched checks] -- OSULatencyTest @daint:gpu+gnu - ^OSUBuildTest @daint:gpu+gnu - ^OSUDownloadTest @daint:login+builtin -- OSULatencyTest @daint:gpu+intel - ^OSUBuildTest @daint:gpu+intel - ^OSUDownloadTest @daint:login+builtin -- OSULatencyTest @daint:gpu+pgi - ^OSUBuildTest @daint:gpu+pgi - ^OSUDownloadTest @daint:login+builtin -Concretized 3 test case(s) - -Log file(s) saved in '/tmp/rfm-7jrbzu9s.log' diff --git a/docs/tutorial_deps.rst b/docs/tutorial_deps.rst index 8d1e527f03..071ac28194 100644 --- a/docs/tutorial_deps.rst +++ b/docs/tutorial_deps.rst @@ -111,117 +111,8 @@ Here is the output when running the OSU tests with the asynchronous execution po ./bin/reframe -c tutorials/deps/osu_benchmarks.py -r -.. code-block:: none - - [ReFrame Setup] - version: 3.6.0-dev.0+4de0fee1 - command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -r' - launched by: user@daint101 - working directory: '/users/user/Devel/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' - - [==========] Running 8 check(s) - [==========] Started on Wed Mar 10 20:53:56 2021 - - [----------] started processing OSUDownloadTest (OSU benchmarks download sources) - [ RUN ] OSUDownloadTest on daint:login using builtin - [----------] finished processing OSUDownloadTest (OSU benchmarks download sources) - - [----------] started processing OSUBuildTest (OSU benchmarks build test) - [ RUN ] OSUBuildTest on daint:gpu using gnu - [ DEP ] OSUBuildTest on daint:gpu using gnu - [ RUN ] OSUBuildTest on daint:gpu using intel - [ DEP ] OSUBuildTest on daint:gpu using intel - [ RUN ] OSUBuildTest on daint:gpu using pgi - [ DEP ] OSUBuildTest on daint:gpu using pgi - [----------] finished processing OSUBuildTest (OSU benchmarks build test) - - [----------] started processing OSULatencyTest (OSU latency test) - [ RUN ] OSULatencyTest on daint:gpu using gnu - [ DEP ] OSULatencyTest on daint:gpu using gnu - [ RUN ] OSULatencyTest on daint:gpu using intel - [ DEP ] OSULatencyTest on daint:gpu using intel - [ RUN ] OSULatencyTest on daint:gpu using pgi - [ DEP ] OSULatencyTest on daint:gpu using pgi - [----------] finished processing OSULatencyTest (OSU latency test) - - [----------] started processing OSUBandwidthTest (OSU bandwidth test) - [ RUN ] OSUBandwidthTest on daint:gpu using gnu - [ DEP ] OSUBandwidthTest on daint:gpu using gnu - [ RUN ] OSUBandwidthTest on daint:gpu using intel - [ DEP ] OSUBandwidthTest on daint:gpu using intel - [ RUN ] OSUBandwidthTest on daint:gpu using pgi - [ DEP ] OSUBandwidthTest on daint:gpu using pgi - [----------] finished processing OSUBandwidthTest (OSU bandwidth test) - - [----------] started processing OSUAllreduceTest_2 (OSU Allreduce test) - [ RUN ] OSUAllreduceTest_2 on daint:gpu using gnu - [ DEP ] OSUAllreduceTest_2 on daint:gpu using gnu - [ RUN ] OSUAllreduceTest_2 on daint:gpu using intel - [ DEP ] OSUAllreduceTest_2 on daint:gpu using intel - [ RUN ] OSUAllreduceTest_2 on daint:gpu using pgi - [ DEP ] OSUAllreduceTest_2 on daint:gpu using pgi - [----------] finished processing OSUAllreduceTest_2 (OSU Allreduce test) - - [----------] started processing OSUAllreduceTest_4 (OSU Allreduce test) - [ RUN ] OSUAllreduceTest_4 on daint:gpu using gnu - [ DEP ] OSUAllreduceTest_4 on daint:gpu using gnu - [ RUN ] OSUAllreduceTest_4 on daint:gpu using intel - [ DEP ] OSUAllreduceTest_4 on daint:gpu using intel - [ RUN ] OSUAllreduceTest_4 on daint:gpu using pgi - [ DEP ] OSUAllreduceTest_4 on daint:gpu using pgi - [----------] finished processing OSUAllreduceTest_4 (OSU Allreduce test) - - [----------] started processing OSUAllreduceTest_8 (OSU Allreduce test) - [ RUN ] OSUAllreduceTest_8 on daint:gpu using gnu - [ DEP ] OSUAllreduceTest_8 on daint:gpu using gnu - [ RUN ] OSUAllreduceTest_8 on daint:gpu using intel - [ DEP ] OSUAllreduceTest_8 on daint:gpu using intel - [ RUN ] OSUAllreduceTest_8 on daint:gpu using pgi - [ DEP ] OSUAllreduceTest_8 on daint:gpu using pgi - [----------] finished processing OSUAllreduceTest_8 (OSU Allreduce test) - - [----------] started processing OSUAllreduceTest_16 (OSU Allreduce test) - [ RUN ] OSUAllreduceTest_16 on daint:gpu using gnu - [ DEP ] OSUAllreduceTest_16 on daint:gpu using gnu - [ RUN ] OSUAllreduceTest_16 on daint:gpu using intel - [ DEP ] OSUAllreduceTest_16 on daint:gpu using intel - [ RUN ] OSUAllreduceTest_16 on daint:gpu using pgi - [ DEP ] OSUAllreduceTest_16 on daint:gpu using pgi - [----------] finished processing OSUAllreduceTest_16 (OSU Allreduce test) - - [----------] waiting for spawned checks to finish - [ OK ] ( 1/22) OSUDownloadTest on daint:login using builtin [compile: 0.007s run: 2.033s total: 2.078s] - [ OK ] ( 2/22) OSUBuildTest on daint:gpu using gnu [compile: 20.531s run: 0.039s total: 83.089s] - [ OK ] ( 3/22) OSUBuildTest on daint:gpu using pgi [compile: 27.193s run: 55.871s total: 83.082s] - [ OK ] ( 4/22) OSUAllreduceTest_16 on daint:gpu using gnu [compile: 0.007s run: 30.713s total: 33.470s] - [ OK ] ( 5/22) OSUBuildTest on daint:gpu using intel [compile: 35.256s run: 54.218s total: 116.712s] - [ OK ] ( 6/22) OSULatencyTest on daint:gpu using pgi [compile: 0.011s run: 23.738s total: 51.190s] - [ OK ] ( 7/22) OSUAllreduceTest_2 on daint:gpu using gnu [compile: 0.008s run: 31.879s total: 51.187s] - [ OK ] ( 8/22) OSUAllreduceTest_4 on daint:gpu using gnu [compile: 0.006s run: 37.447s total: 51.194s] - [ OK ] ( 9/22) OSUAllreduceTest_8 on daint:gpu using gnu [compile: 0.007s run: 42.914s total: 51.202s] - [ OK ] (10/22) OSUAllreduceTest_16 on daint:gpu using pgi [compile: 0.006s run: 51.172s total: 51.197s] - [ OK ] (11/22) OSULatencyTest on daint:gpu using gnu [compile: 0.007s run: 21.500s total: 51.730s] - [ OK ] (12/22) OSUAllreduceTest_2 on daint:gpu using pgi [compile: 0.007s run: 35.083s total: 51.700s] - [ OK ] (13/22) OSUAllreduceTest_8 on daint:gpu using pgi [compile: 0.007s run: 46.187s total: 51.681s] - [ OK ] (14/22) OSUAllreduceTest_4 on daint:gpu using pgi [compile: 0.007s run: 41.060s total: 52.030s] - [ OK ] (15/22) OSUAllreduceTest_2 on daint:gpu using intel [compile: 0.008s run: 27.401s total: 35.900s] - [ OK ] (16/22) OSUBandwidthTest on daint:gpu using gnu [compile: 0.008s run: 82.553s total: 107.334s] - [ OK ] (17/22) OSUBandwidthTest on daint:gpu using pgi [compile: 0.009s run: 87.559s total: 109.613s] - [ OK ] (18/22) OSUAllreduceTest_16 on daint:gpu using intel [compile: 0.006s run: 99.899s total: 99.924s] - [ OK ] (19/22) OSUBandwidthTest on daint:gpu using intel [compile: 0.007s run: 116.771s total: 128.125s] - [ OK ] (20/22) OSULatencyTest on daint:gpu using intel [compile: 0.008s run: 114.236s total: 128.398s] - [ OK ] (21/22) OSUAllreduceTest_8 on daint:gpu using intel [compile: 0.008s run: 125.541s total: 128.387s] - [ OK ] (22/22) OSUAllreduceTest_4 on daint:gpu using intel [compile: 0.007s run: 123.079s total: 128.651s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 22/22 test case(s) from 8 check(s) (0 failure(s)) - [==========] Finished on Wed Mar 10 20:58:03 2021 - Log file(s) saved in: '/tmp/rfm-q0gd9y6v.log' - +.. literalinclude:: listings/osu_bench_deps.txt + :language: console Before starting running the tests, ReFrame topologically sorts them based on their dependencies and schedules them for running using the selected execution policy. With the serial execution policy, ReFrame simply executes the tests to completion as they "arrive," since the tests are already topologically sorted. @@ -240,27 +131,8 @@ For example, if we select only the :class:`OSULatencyTest` for running, ReFrame ./bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -l - -.. code-block:: none - - $ ./bin/reframe -C -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -l - [ReFrame Setup] - version: 3.3-dev2 (rev: 8ded20cd) - command: './bin/reframe -C tutorials/config/settings.py -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -l' - launched by: user@daint101 - working directory: '/users/user/Devel/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' - - [List of matched checks] - - OSUDownloadTest (found in '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py') - - OSUBuildTest (found in '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py') - - OSULatencyTest (found in '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py') - Found 3 check(s) - Log file(s) saved in: '/tmp/rfm-4c15g820.log' - +.. literalinclude:: docs/osu_latency_list.txt + :language: console Finally, when ReFrame cannot resolve a dependency of a test, it will issue a warning and skip completely all the test cases that recursively depend on this one. In the following example, we restrict the run of the :class:`OSULatencyTest` to the ``daint:gpu`` partition. @@ -271,109 +143,37 @@ As a result, its immediate dependency :class:`OSUBuildTest` will be skipped, whi ./bin/reframe -c tutorials/deps/osu_benchmarks.py --system=daint:gpu -n OSULatencyTest -l -.. code-block:: none - - [ReFrame Setup] - version: 3.6.0-dev.0+4de0fee1 - command: './bin/reframe -c tutorials/deps/osu_benchmarks.py --system=daint:gpu -n OSULatencyTest -l' - launched by: user@daint101 - working directory: '/users/user/Devel/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' - - ./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'gnu') -> 'OSUDownloadTest' - ./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'intel') -> 'OSUDownloadTest' - ./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'pgi') -> 'OSUDownloadTest' - ./bin/reframe: skipping all dependent test cases - - ('OSUBuildTest', 'daint:gpu', 'intel') - - ('OSUAllreduceTest_2', 'daint:gpu', 'intel') - - ('OSUBuildTest', 'daint:gpu', 'pgi') - - ('OSULatencyTest', 'daint:gpu', 'pgi') - - ('OSUAllreduceTest_8', 'daint:gpu', 'intel') - - ('OSUAllreduceTest_4', 'daint:gpu', 'pgi') - - ('OSULatencyTest', 'daint:gpu', 'intel') - - ('OSUAllreduceTest_4', 'daint:gpu', 'intel') - - ('OSUAllreduceTest_8', 'daint:gpu', 'pgi') - - ('OSUAllreduceTest_16', 'daint:gpu', 'pgi') - - ('OSUAllreduceTest_16', 'daint:gpu', 'intel') - - ('OSUBandwidthTest', 'daint:gpu', 'pgi') - - ('OSUBuildTest', 'daint:gpu', 'gnu') - - ('OSUBandwidthTest', 'daint:gpu', 'intel') - - ('OSUBandwidthTest', 'daint:gpu', 'gnu') - - ('OSUAllreduceTest_2', 'daint:gpu', 'pgi') - - ('OSUAllreduceTest_16', 'daint:gpu', 'gnu') - - ('OSUAllreduceTest_2', 'daint:gpu', 'gnu') - - ('OSULatencyTest', 'daint:gpu', 'gnu') - - ('OSUAllreduceTest_4', 'daint:gpu', 'gnu') - - ('OSUAllreduceTest_8', 'daint:gpu', 'gnu') - - [List of matched checks] - - Found 0 check(s) - - Log file(s) saved in: '/tmp/rfm-6cxeil6h.log' - +.. literalinclude:: docs/osu_latency_res_error.txt + :language: console Listing Dependencies -------------------- -You can view the dependencies of a test by using the :option:`-L` option: - - -.. code-block:: console - - ./bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -L - - -.. code-block:: none - - < ... omitted ... > - - - OSULatencyTest: - Description: - OSU latency test - - Environment modules: - +As shown in the listing of :class:`OSULatencyTest` before, the full dependency chain of the test is listed along with test. +Each target dependency is printed in a new line prefixed by the ``^`` character and indented proportionally to its level. +If a target dependency appears in multiple paths, it will only be listed once. - Location: - /users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py +The default test listing will list the dependencies at the test level or the *conceptual* dependencies. +ReFrame generates multiple test cases from each test depending on the target system configuration. +We have seen in the :doc:`tutorial_basics` already how the STREAM benchmark generated many more test cases when it was run in a HPC system with multiple partitions and programming environments. +These are the *actual* depedencies and form the actual test case graph that will be executed by the runtime. +The mapping of a test to its concrete test cases that will be executed on a system is called *test concretization*. +You can view the exact concretization of the selected tests with :option:`--list=concretized` or simply :option:`-lC`. +Here is how the OSU benchmarks of this tutorial are concretized on the system ``daint``: - Maintainers: - - Node allocation: - standard (2 task(s)) - - Pipeline hooks: - - post_setup: set_executable - - Tags: - - - Valid environments: - gnu, pgi, intel - - Valid systems: - daint:gpu - - Dependencies (conceptual): - OSUBuildTest +.. code-block:: console - Dependencies (actual): - - ('OSULatencyTest', 'daint:gpu', 'gnu') -> ('OSUBuildTest', 'daint:login', 'gnu') - - ('OSULatencyTest', 'daint:gpu', 'intel') -> ('OSUBuildTest', 'daint:login', 'intel') - - ('OSULatencyTest', 'daint:gpu', 'pgi') -> ('OSUBuildTest', 'daint:login', 'pgi') + ./bin/reframe -c tutorials/deps/osu_benchmarks.py -lC - < ... omitted ... > +.. literalinclude:: listings/osu_bench_list_concretized.txt + :language: console +Notice how the various test cases of the run benchmarks depend on the corresponding test cases of the build tests. -Dependencies are not only listed conceptually, e.g., "test A depends on test B," but also in a way that shows how they are actually interpreted between the different test cases of the tests. -The test dependencies do not change conceptually, but their actual interpretation might change from system to system or from programming environment to programming environment. -The following listing shows how the actual test cases dependencies are formed when we select only the ``gnu`` and ``builtin`` programming environment for running: +The concretization of test cases changes if a specifc partition or programming environment is passed from the command line or, of course, if the test is run on a different system. +If we scope our programming environments to ``gnu`` and ``builtin`` only, ReFrame will generate 8 test cases only instead of 22: .. note:: @@ -384,58 +184,18 @@ The following listing shows how the actual test cases dependencies are formed wh ./bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -L -p builtin -p gnu - -.. code-block:: none - :emphasize-lines: 35 - - < ... omitted ... > - - - OSULatencyTest: - Description: - OSU latency test - - Environment modules: - - - Location: - /users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py - - Maintainers: - - - Node allocation: - standard (2 task(s)) - - Pipeline hooks: - - post_setup: set_executable - - Tags: - - - Valid environments: - gnu, pgi, intel - - Valid systems: - daint:gpu - - Dependencies (conceptual): - OSUBuildTest - - Dependencies (actual): - - ('OSULatencyTest', 'daint:gpu', 'gnu') -> ('OSUBuildTest', 'daint:login', 'gnu') - - < ... omitted ... > +.. literalinclude:: docs/listings/osu_bench_list_concretized_gnu.txt -For more information on test dependencies, you can have a look at :doc:`dependencies`. +To gain a deeper understanding on how test dependencies work in Reframe, please refer to :doc:`dependencies`. Depending on Parameterized Tests -------------------------------- As shown earlier in this section, tests define their dependencies by referencing the target tests by their unique name. -This is straightforward when referring to regular tests, where their name matches the class name, but it becomes cumbersome trying to refer to a parameterized tests, since no safe assumption should be as of the variant number of the test or how the parameters are encoded in the name. -In order to safely and reliably refer to parameterized test, you should use the :func:`get_variant_nums` and :func:`variant_name` class methods as shown in the following example: +This is straightforward when referring to regular tests, where their name matches the class name, but it becomes cumbersome trying to refer to a parameterized tests, since no safe assumption should be made as of the variant number of the test or how the parameters are encoded in the name. +In order to safely and reliably refer to a parameterized test, you should use the :func:`get_variant_nums` and :func:`variant_name` class methods as shown in the following example: .. literalinclude:: ../tutorials/deps/parameterized.py :emphasize-lines: 37- @@ -443,12 +203,10 @@ In order to safely and reliably refer to parameterized test, you should use the In this example, :class:`TestB` depends only on selected variants of :class:`TestA`. The :func:`get_variant_nums` method accepts a set of key-value pairs representing the target test parameters and selector functions and returns the list of the variant numbers that correspond to these variants. Using the :func:`variant_name` subsequently, we can get the actual name of the variant. -Listing the tests using the `new naming scheme `__ we can easily see the dependency graph: .. code-block:: console - export RFM_COMPACT_TEST_NAMES=y ./bin/reframe -c tutorials/deps/parameterized.py -l From 1e1561f776b1ef54c43178307853a9652580b50f Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Thu, 20 Jan 2022 10:37:04 +0100 Subject: [PATCH 44/62] Fix listing of tests with multiple partitions and environments --- reframe/frontend/cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index eb4b98c661..c6a9fcf75c 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -53,7 +53,7 @@ def dep_lines(u, *, prefix, depth=0, lines=None, printed=None): lines = [] if printed is None: - printed = set() + printed = set(unique_checks) adj = u.deps for v in adj: From a1df4e67909c9f8c50797bf8ecfcf203df6133d2 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Thu, 20 Jan 2022 11:39:40 +0100 Subject: [PATCH 45/62] WIP: Adapt tutorial --- .../osu_bandwidth_concretized_daint.txt | 23 +++ .../osu_bandwidth_concretized_daint_pgi.txt | 17 ++ docs/listings/osu_bench_fixtures_list.txt | 56 ++++++ docs/listings/osu_bench_fixtures_run.txt | 115 ++++++++++++ docs/listings/param_deps_list.txt | 25 +++ docs/tutorial_deps.rst | 18 +- docs/tutorial_fixtures.rst | 176 +++--------------- 7 files changed, 266 insertions(+), 164 deletions(-) create mode 100644 docs/listings/osu_bandwidth_concretized_daint.txt create mode 100644 docs/listings/osu_bandwidth_concretized_daint_pgi.txt create mode 100644 docs/listings/osu_bench_fixtures_list.txt create mode 100644 docs/listings/osu_bench_fixtures_run.txt create mode 100644 docs/listings/param_deps_list.txt diff --git a/docs/listings/osu_bandwidth_concretized_daint.txt b/docs/listings/osu_bandwidth_concretized_daint.txt new file mode 100644 index 0000000000..050889e064 --- /dev/null +++ b/docs/listings/osu_bandwidth_concretized_daint.txt @@ -0,0 +1,23 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+1e1561f7 + command: './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC' + launched by: user@host + working directory: '/users/user/Devel/reframe' + settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' + stage directory: '/users/user/Devel/reframe/stage' + output directory: '/users/user/Devel/reframe/output' + +[List of matched checks] +- osu_bandwidth_test @daint:gpu+gnu + ^build_osu_benchmarks ~daint:gpu+gnu @daint:gpu+gnu + ^fetch_osu_benchmarks ~daint @daint:gpu+gnu +- osu_bandwidth_test @daint:gpu+intel + ^build_osu_benchmarks ~daint:gpu+intel @daint:gpu+intel + ^fetch_osu_benchmarks ~daint @daint:gpu+gnu +- osu_bandwidth_test @daint:gpu+pgi + ^build_osu_benchmarks ~daint:gpu+pgi @daint:gpu+pgi + ^fetch_osu_benchmarks ~daint @daint:gpu+gnu +Concretized 7 test case(s) + +Log file(s) saved in '/tmp/rfm-sew_xghv.log' diff --git a/docs/listings/osu_bandwidth_concretized_daint_pgi.txt b/docs/listings/osu_bandwidth_concretized_daint_pgi.txt new file mode 100644 index 0000000000..e762da3765 --- /dev/null +++ b/docs/listings/osu_bandwidth_concretized_daint_pgi.txt @@ -0,0 +1,17 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+1e1561f7 + command: './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC -p pgi' + launched by: user@host + working directory: '/users/user/Devel/reframe' + settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' + stage directory: '/users/user/Devel/reframe/stage' + output directory: '/users/user/Devel/reframe/output' + +[List of matched checks] +- osu_bandwidth_test @daint:gpu+pgi + ^build_osu_benchmarks ~daint:gpu+pgi @daint:gpu+pgi + ^fetch_osu_benchmarks ~daint @daint:gpu+pgi +Concretized 3 test case(s) + +Log file(s) saved in '/tmp/rfm-6cutxv8s.log' diff --git a/docs/listings/osu_bench_fixtures_list.txt b/docs/listings/osu_bench_fixtures_list.txt new file mode 100644 index 0000000000..cfafada8f6 --- /dev/null +++ b/docs/listings/osu_bench_fixtures_list.txt @@ -0,0 +1,56 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+1e1561f7 + command: './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -l' + launched by: user@host + working directory: '/users/user/Devel/reframe' + settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' + stage directory: '/users/user/Devel/reframe/stage' + output directory: '/users/user/Devel/reframe/output' + +[List of matched checks] +- osu_allreduce_test %mpi_tasks=16 + ^build_osu_benchmarks ~daint:gpu+gnu + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+intel + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+pgi + ^fetch_osu_benchmarks ~daint +- osu_allreduce_test %mpi_tasks=8 + ^build_osu_benchmarks ~daint:gpu+gnu + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+intel + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+pgi + ^fetch_osu_benchmarks ~daint +- osu_allreduce_test %mpi_tasks=4 + ^build_osu_benchmarks ~daint:gpu+gnu + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+intel + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+pgi + ^fetch_osu_benchmarks ~daint +- osu_allreduce_test %mpi_tasks=2 + ^build_osu_benchmarks ~daint:gpu+gnu + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+intel + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+pgi + ^fetch_osu_benchmarks ~daint +- osu_bandwidth_test + ^build_osu_benchmarks ~daint:gpu+gnu + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+intel + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+pgi + ^fetch_osu_benchmarks ~daint +- osu_latency_test + ^build_osu_benchmarks ~daint:gpu+gnu + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+intel + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+pgi + ^fetch_osu_benchmarks ~daint +Found 6 check(s) + +Log file(s) saved in '/tmp/rfm-31ywvi49.log' diff --git a/docs/listings/osu_bench_fixtures_run.txt b/docs/listings/osu_bench_fixtures_run.txt new file mode 100644 index 0000000000..e514854885 --- /dev/null +++ b/docs/listings/osu_bench_fixtures_run.txt @@ -0,0 +1,115 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+1e1561f7 + command: './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -r' + launched by: user@host + working directory: '/users/user/Devel/reframe' + settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' + stage directory: '/users/user/Devel/reframe/stage' + output directory: '/users/user/Devel/reframe/output' + +[==========] Running 10 check(s) +[==========] Started on Thu Jan 20 11:25:22 2022 + +[----------] started processing fetch_osu_benchmarks_ba14252c (Fetch OSU benchmarks) +[ RUN  ] fetch_osu_benchmarks_ba14252c on daint:gpu using gnu +[----------] finished processing fetch_osu_benchmarks_ba14252c (Fetch OSU benchmarks) + +[----------] started processing build_osu_benchmarks_01b07297 (Build OSU benchmarks) +[ RUN  ] build_osu_benchmarks_01b07297 on daint:gpu using gnu +[  DEP ] build_osu_benchmarks_01b07297 on daint:gpu using gnu +[----------] finished processing build_osu_benchmarks_01b07297 (Build OSU benchmarks) + +[----------] started processing build_osu_benchmarks_90e14d9d (Build OSU benchmarks) +[ RUN  ] build_osu_benchmarks_90e14d9d on daint:gpu using intel +[  DEP ] build_osu_benchmarks_90e14d9d on daint:gpu using intel +[----------] finished processing build_osu_benchmarks_90e14d9d (Build OSU benchmarks) + +[----------] started processing build_osu_benchmarks_845fc6e3 (Build OSU benchmarks) +[ RUN  ] build_osu_benchmarks_845fc6e3 on daint:gpu using pgi +[  DEP ] build_osu_benchmarks_845fc6e3 on daint:gpu using pgi +[----------] finished processing build_osu_benchmarks_845fc6e3 (Build OSU benchmarks) + +[----------] started processing osu_allreduce_test_16 (OSU Allreduce test) +[ RUN  ] osu_allreduce_test_16 on daint:gpu using gnu +[  DEP ] osu_allreduce_test_16 on daint:gpu using gnu +[ RUN  ] osu_allreduce_test_16 on daint:gpu using intel +[  DEP ] osu_allreduce_test_16 on daint:gpu using intel +[ RUN  ] osu_allreduce_test_16 on daint:gpu using pgi +[  DEP ] osu_allreduce_test_16 on daint:gpu using pgi +[----------] finished processing osu_allreduce_test_16 (OSU Allreduce test) + +[----------] started processing osu_allreduce_test_8 (OSU Allreduce test) +[ RUN  ] osu_allreduce_test_8 on daint:gpu using gnu +[  DEP ] osu_allreduce_test_8 on daint:gpu using gnu +[ RUN  ] osu_allreduce_test_8 on daint:gpu using intel +[  DEP ] osu_allreduce_test_8 on daint:gpu using intel +[ RUN  ] osu_allreduce_test_8 on daint:gpu using pgi +[  DEP ] osu_allreduce_test_8 on daint:gpu using pgi +[----------] finished processing osu_allreduce_test_8 (OSU Allreduce test) + +[----------] started processing osu_allreduce_test_4 (OSU Allreduce test) +[ RUN  ] osu_allreduce_test_4 on daint:gpu using gnu +[  DEP ] osu_allreduce_test_4 on daint:gpu using gnu +[ RUN  ] osu_allreduce_test_4 on daint:gpu using intel +[  DEP ] osu_allreduce_test_4 on daint:gpu using intel +[ RUN  ] osu_allreduce_test_4 on daint:gpu using pgi +[  DEP ] osu_allreduce_test_4 on daint:gpu using pgi +[----------] finished processing osu_allreduce_test_4 (OSU Allreduce test) + +[----------] started processing osu_allreduce_test_2 (OSU Allreduce test) +[ RUN  ] osu_allreduce_test_2 on daint:gpu using gnu +[  DEP ] osu_allreduce_test_2 on daint:gpu using gnu +[ RUN  ] osu_allreduce_test_2 on daint:gpu using intel +[  DEP ] osu_allreduce_test_2 on daint:gpu using intel +[ RUN  ] osu_allreduce_test_2 on daint:gpu using pgi +[  DEP ] osu_allreduce_test_2 on daint:gpu using pgi +[----------] finished processing osu_allreduce_test_2 (OSU Allreduce test) + +[----------] started processing osu_bandwidth_test (OSU bandwidth test) +[ RUN  ] osu_bandwidth_test on daint:gpu using gnu +[  DEP ] osu_bandwidth_test on daint:gpu using gnu +[ RUN  ] osu_bandwidth_test on daint:gpu using intel +[  DEP ] osu_bandwidth_test on daint:gpu using intel +[ RUN  ] osu_bandwidth_test on daint:gpu using pgi +[  DEP ] osu_bandwidth_test on daint:gpu using pgi +[----------] finished processing osu_bandwidth_test (OSU bandwidth test) + +[----------] started processing osu_latency_test (OSU latency test) +[ RUN  ] osu_latency_test on daint:gpu using gnu +[  DEP ] osu_latency_test on daint:gpu using gnu +[ RUN  ] osu_latency_test on daint:gpu using intel +[  DEP ] osu_latency_test on daint:gpu using intel +[ RUN  ] osu_latency_test on daint:gpu using pgi +[  DEP ] osu_latency_test on daint:gpu using pgi +[----------] finished processing osu_latency_test (OSU latency test) + +[----------] waiting for spawned checks to finish +[  OK ] ( 1/22) fetch_osu_benchmarks ~daint @daint:gpu+gnu [compile: 0.008s run: 3.024s total: 3.064s] +[  OK ] ( 2/22) build_osu_benchmarks ~daint:gpu+gnu @daint:gpu+gnu [compile: 26.745s run: 0.061s total: 127.595s] +[  OK ] ( 3/22) build_osu_benchmarks ~daint:gpu+pgi @daint:gpu+pgi [compile: 35.453s run: 92.081s total: 127.581s] +[  OK ] ( 4/22) build_osu_benchmarks ~daint:gpu+intel @daint:gpu+intel [compile: 65.206s run: 80.848s total: 181.596s] +[  OK ] ( 5/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+pgi [compile: 0.011s run: 60.059s total: 85.771s] +[  OK ] ( 6/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+pgi [compile: 0.012s run: 66.442s total: 85.783s] +[  OK ] ( 7/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+pgi [compile: 0.012s run: 72.865s total: 85.793s] +[  OK ] ( 8/22) osu_latency_test @daint:gpu+pgi [compile: 0.012s run: 85.720s total: 85.763s] +[  OK ] ( 9/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+gnu [compile: 0.011s run: 57.628s total: 86.430s] +[  OK ] (10/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+gnu [compile: 0.013s run: 70.200s total: 86.406s] +[  OK ] (11/22) osu_latency_test @daint:gpu+gnu [compile: 0.011s run: 83.179s total: 86.381s] +[  OK ] (12/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+gnu [compile: 0.011s run: 64.432s total: 86.937s] +[  OK ] (13/22) osu_bandwidth_test @daint:gpu+gnu [compile: 0.012s run: 122.812s total: 132.304s] +[  OK ] (14/22) osu_bandwidth_test @daint:gpu+pgi [compile: 0.011s run: 130.589s total: 136.884s] +[  OK ] (15/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+intel [compile: 0.012s run: 136.997s total: 155.368s] +[  OK ] (16/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+intel [compile: 0.011s run: 132.810s total: 157.931s] +[  OK ] (17/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+pgi [compile: 0.011s run: 180.444s total: 212.537s] +[  OK ] (18/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+gnu [compile: 0.013s run: 177.875s total: 213.031s] +[  OK ] (19/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+intel [compile: 0.013s run: 131.221s total: 159.503s] +[  OK ] (20/22) osu_latency_test @daint:gpu+intel [compile: 0.010s run: 159.878s total: 159.917s] +[  OK ] (21/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+intel [compile: 0.011s run: 139.273s total: 160.941s] +[  OK ] (22/22) osu_bandwidth_test @daint:gpu+intel [compile: 0.012s run: 205.774s total: 220.374s] +[----------] all spawned checks have finished + +[  PASSED  ] Ran 22/22 test case(s) from 10 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Thu Jan 20 11:32:08 2022 +Run report saved in '/users/user/.reframe/reports/run-report.json' +Log file(s) saved in '/tmp/rfm-4e8yn_rh.log' diff --git a/docs/listings/param_deps_list.txt b/docs/listings/param_deps_list.txt new file mode 100644 index 0000000000..94232d5379 --- /dev/null +++ b/docs/listings/param_deps_list.txt @@ -0,0 +1,25 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+1e1561f7 + command: './bin/reframe -c tutorials/deps/parameterized.py -l' + launched by: user@host + working directory: '/users/user/Devel/reframe' + settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/users/user/Devel/reframe/tutorials/deps/parameterized.py' + stage directory: '/users/user/Devel/reframe/stage' + output directory: '/users/user/Devel/reframe/output' + +[List of matched checks] +- TestB + ^TestA %z=9 + ^TestA %z=8 + ^TestA %z=7 + ^TestA %z=6 +- TestA %z=5 +- TestA %z=4 +- TestA %z=3 +- TestA %z=2 +- TestA %z=1 +- TestA %z=0 +Found 11 check(s) + +Log file(s) saved in '/tmp/rfm-79hx1u2k.log' diff --git a/docs/tutorial_deps.rst b/docs/tutorial_deps.rst index 071ac28194..2f518e96c7 100644 --- a/docs/tutorial_deps.rst +++ b/docs/tutorial_deps.rst @@ -209,19 +209,5 @@ Using the :func:`variant_name` subsequently, we can get the actual name of the v ./bin/reframe -c tutorials/deps/parameterized.py -l - -.. code-block:: console - - [List of matched checks] - - TestB - ^TestA %z=9 - ^TestA %z=8 - ^TestA %z=7 - ^TestA %z=6 - - TestA %z=5 - - TestA %z=4 - - TestA %z=3 - - TestA %z=2 - - TestA %z=1 - - TestA %z=0 - Found 11 check(s) +.. literalinclude:: listings/param_deps_list.txt + :language: console diff --git a/docs/tutorial_fixtures.rst b/docs/tutorial_fixtures.rst index e8410f461f..920ea04863 100644 --- a/docs/tutorial_fixtures.rst +++ b/docs/tutorial_fixtures.rst @@ -90,190 +90,70 @@ It is now time to run the new tests, but let us first list them: .. code-block:: bash - export RFM_CONFIG_FILE=$(pwd)/tutorials/config/settings.py reframe -c tutorials/fixtures/osu_benchmarks.py -l -.. code-block:: console +.. literalinclude:: listings/osu_bench_fixtures_list.txt + :language: console - [ReFrame Setup] - version: 3.9.0 - command: 'reframe -c tutorials/fixtures/osu_benchmarks.py -l' - launched by: user@daint106 - working directory: '/users/user/Devel/reframe' - settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' - [List of matched checks] - - osu_latency_test (found in '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py') - - osu_allreduce_test_8 (found in '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py') - - osu_allreduce_test_2 (found in '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py') - - osu_allreduce_test_4 (found in '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py') - - osu_bandwidth_test (found in '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py') - - osu_allreduce_test_16 (found in '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py') - Found 6 check(s) - - Log file(s) saved in '/tmp/rfm-dlkc1vb_.log' - -Notice that only the leaf tests are listed and not their fixtures. -Listing the tests in detailed mode, however, using the :option:`-L` option, you will see all the generated fixtures: - - -.. code-block:: bash - - reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -L - -ReFrame will generate 4 fixtures for this test based on the partition and environment configurations for the current system. -The following figure shows the generated fixtures as well as their conceptual dependencies. +Notice how the :class:`build_osu_benchmarks` fixture is populated three times, once for each partition and environment combination, and the :class:`fetch_osu_benchmarks` is generated only once. +The following figure shows visually the conceptual dependencies of the :class:`osu_bandwidth_test`. .. figure:: _static/img/fixtures-conceptual-deps.svg :align: center :sub:`Expanded fixtures and dependencies for the OSU benchmarks example.` -Notice how the :class:`build_osu_benchmarks` fixture is populated three times, once for each partition and environment combination, and the :class:`fetch_osu_benchmarks` is generated only once. -Tests in a single ReFrame session must have unique names, so the fixture class name is mangled by the framework to generate a unique name in the test dependency DAG. A *scope* part is added to the base name of the fixture, which in this figure is indicated with red color. Under the hood, fixtures use the test dependency mechanism which is described in :doc:`dependencies`. -The dependencies shown in the previous figure are conceptual. -A single test in ReFrame generates a series of test cases for all the combinations of valid systems and valid programming environments and the actual dependencies are expressed in this more fine-grained layer, which is also the layer at which the execution of tests is scheduled. +The dependencies listed by default and shown in the previous figure are conceptual. +Depending on the available partitions and environments, tests and fixtures can be concretized differently. +Fixtures in particular are also more flexible in the way they can be concretized depending on their scope. +The following listing and figure show the concretization of the :class:`osu_bandwidth_test`: + +.. code-block:: bash + + reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC + +.. literalinclude:: listings/osu_bandwidth_concretized_daint.txt + :language: console -The following figure shows how the above graph translates into the actual DAG of test cases. .. figure:: _static/img/fixtures-actual-deps.svg :align: center :sub:`The actual dependencies for the OSU benchmarks example using fixtures.` - The first thing to notice here is how the individual test cases of :class:`osu_bandwidth_test` depend only the specific fixtures for their scope: when :class:`osu_bandwidth_test` runs on the ``daint:gpu`` partition using the ``gnu`` compiler it will only depend on the :class:`build_osu_benchmarks~daint:gpu+gnu` fixture. The second thing to notice is where the :class:`fetch_osu_benchmarks~daint` fixture will run. Since this is a *session* fixture, ReFrame has arbitrarily chosen to run it on ``daint:gpu`` using the ``gnu`` environment. A session fixture can run on any combination of valid partitions and environments. -The following figure shows how the test dependency DAG is concretized when we scope the valid programming environments from the command line using ``-p pgi``. +The following listing and figure show how the test dependency DAG is concretized when we scope the valid programming environments from the command line using ``-p pgi``. +.. code-block:: bash + + reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC -p pgi + +.. literalinclude:: listings/osu_bandwidth_concretized_pgi_daint.txt + :language: console .. figure:: _static/img/fixtures-actual-deps-scoped.svg + :align: center :sub:`The dependency graph concretized for the 'pgi' environment only.` Notice how the :class:`fetch_osu_benchmarks~daint` fixture is selected to run in the only valid partition/environment combination. +This is an important difference compared to the same example written using raw dependencies in :doc:`dependencies`, in which case in order not to have unresolved dependencies, we would need to specify the valid programming environment of the test that fetches the sources. +Fixtures do not need that, since you can impose less strict constraints by setting their scope accordingly. -The following listing shows the output of running the tutorial examples. - -.. code-block:: console - - [==========] Running 10 check(s) - [==========] Started on Sun Oct 31 22:00:28 2021 - - [----------] started processing fetch_osu_benchmarks~daint (Fetch OSU benchmarks) - [ RUN ] fetch_osu_benchmarks~daint on daint:gpu using gnu - [----------] finished processing fetch_osu_benchmarks~daint (Fetch OSU benchmarks) - - [----------] started processing build_osu_benchmarks~daint:gpu+gnu (Build OSU benchmarks) - [ RUN ] build_osu_benchmarks~daint:gpu+gnu on daint:gpu using gnu - [ DEP ] build_osu_benchmarks~daint:gpu+gnu on daint:gpu using gnu - [----------] finished processing build_osu_benchmarks~daint:gpu+gnu (Build OSU benchmarks) - - [----------] started processing build_osu_benchmarks~daint:gpu+intel (Build OSU benchmarks) - [ RUN ] build_osu_benchmarks~daint:gpu+intel on daint:gpu using intel - [ DEP ] build_osu_benchmarks~daint:gpu+intel on daint:gpu using intel - [----------] finished processing build_osu_benchmarks~daint:gpu+intel (Build OSU benchmarks) - - [----------] started processing build_osu_benchmarks~daint:gpu+pgi (Build OSU benchmarks) - [ RUN ] build_osu_benchmarks~daint:gpu+pgi on daint:gpu using pgi - [ DEP ] build_osu_benchmarks~daint:gpu+pgi on daint:gpu using pgi - [----------] finished processing build_osu_benchmarks~daint:gpu+pgi (Build OSU benchmarks) - - [----------] started processing osu_allreduce_test_16 (OSU Allreduce test) - [ RUN ] osu_allreduce_test_16 on daint:gpu using gnu - [ DEP ] osu_allreduce_test_16 on daint:gpu using gnu - [ RUN ] osu_allreduce_test_16 on daint:gpu using intel - [ DEP ] osu_allreduce_test_16 on daint:gpu using intel - [ RUN ] osu_allreduce_test_16 on daint:gpu using pgi - [ DEP ] osu_allreduce_test_16 on daint:gpu using pgi - [----------] finished processing osu_allreduce_test_16 (OSU Allreduce test) - - [----------] started processing osu_allreduce_test_8 (OSU Allreduce test) - [ RUN ] osu_allreduce_test_8 on daint:gpu using gnu - [ DEP ] osu_allreduce_test_8 on daint:gpu using gnu - [ RUN ] osu_allreduce_test_8 on daint:gpu using intel - [ DEP ] osu_allreduce_test_8 on daint:gpu using intel - [ RUN ] osu_allreduce_test_8 on daint:gpu using pgi - [ DEP ] osu_allreduce_test_8 on daint:gpu using pgi - [----------] finished processing osu_allreduce_test_8 (OSU Allreduce test) - - [----------] started processing osu_allreduce_test_4 (OSU Allreduce test) - [ RUN ] osu_allreduce_test_4 on daint:gpu using gnu - [ DEP ] osu_allreduce_test_4 on daint:gpu using gnu - [ RUN ] osu_allreduce_test_4 on daint:gpu using intel - [ DEP ] osu_allreduce_test_4 on daint:gpu using intel - [ RUN ] osu_allreduce_test_4 on daint:gpu using pgi - [ DEP ] osu_allreduce_test_4 on daint:gpu using pgi - [----------] finished processing osu_allreduce_test_4 (OSU Allreduce test) - - [----------] started processing osu_allreduce_test_2 (OSU Allreduce test) - [ RUN ] osu_allreduce_test_2 on daint:gpu using gnu - [ DEP ] osu_allreduce_test_2 on daint:gpu using gnu - [ RUN ] osu_allreduce_test_2 on daint:gpu using intel - [ DEP ] osu_allreduce_test_2 on daint:gpu using intel - [ RUN ] osu_allreduce_test_2 on daint:gpu using pgi - [ DEP ] osu_allreduce_test_2 on daint:gpu using pgi - [----------] finished processing osu_allreduce_test_2 (OSU Allreduce test) - - [----------] started processing osu_bandwidth_test (OSU bandwidth test) - [ RUN ] osu_bandwidth_test on daint:gpu using gnu - [ DEP ] osu_bandwidth_test on daint:gpu using gnu - [ RUN ] osu_bandwidth_test on daint:gpu using intel - [ DEP ] osu_bandwidth_test on daint:gpu using intel - [ RUN ] osu_bandwidth_test on daint:gpu using pgi - [ DEP ] osu_bandwidth_test on daint:gpu using pgi - [----------] finished processing osu_bandwidth_test (OSU bandwidth test) - - [----------] started processing osu_latency_test (OSU latency test) - [ RUN ] osu_latency_test on daint:gpu using gnu - [ DEP ] osu_latency_test on daint:gpu using gnu - [ RUN ] osu_latency_test on daint:gpu using intel - [ DEP ] osu_latency_test on daint:gpu using intel - [ RUN ] osu_latency_test on daint:gpu using pgi - [ DEP ] osu_latency_test on daint:gpu using pgi - [----------] finished processing osu_latency_test (OSU latency test) - - [----------] waiting for spawned checks to finish - [ OK ] ( 1/22) fetch_osu_benchmarks~daint on daint:gpu using gnu [compile: 0.009s run: 2.761s total: 2.802s] - [ OK ] ( 2/22) build_osu_benchmarks~daint:gpu+gnu on daint:gpu using gnu [compile: 25.758s run: 0.056s total: 104.626s] - [ OK ] ( 3/22) build_osu_benchmarks~daint:gpu+pgi on daint:gpu using pgi [compile: 33.936s run: 70.452s total: 104.473s] - [ OK ] ( 4/22) build_osu_benchmarks~daint:gpu+intel on daint:gpu using intel [compile: 44.565s run: 65.010s total: 143.664s] - [ OK ] ( 5/22) osu_allreduce_test_4 on daint:gpu using gnu [compile: 0.011s run: 78.717s total: 101.428s] - [ OK ] ( 6/22) osu_allreduce_test_2 on daint:gpu using pgi [compile: 0.014s run: 88.060s total: 101.409s] - [ OK ] ( 7/22) osu_latency_test on daint:gpu using pgi [compile: 0.009s run: 101.325s total: 101.375s] - [ OK ] ( 8/22) osu_allreduce_test_8 on daint:gpu using pgi [compile: 0.013s run: 76.031s total: 102.005s] - [ OK ] ( 9/22) osu_allreduce_test_2 on daint:gpu using gnu [compile: 0.011s run: 85.525s total: 101.974s] - [ OK ] (10/22) osu_allreduce_test_4 on daint:gpu using pgi [compile: 0.011s run: 82.847s total: 102.407s] - [ OK ] (11/22) osu_allreduce_test_8 on daint:gpu using gnu [compile: 0.010s run: 77.818s total: 106.993s] - [ OK ] (12/22) osu_latency_test on daint:gpu using gnu [compile: 0.012s run: 103.641s total: 106.858s] - [ OK ] (13/22) osu_bandwidth_test on daint:gpu using pgi [compile: 0.011s run: 157.129s total: 164.087s] - [ OK ] (14/22) osu_bandwidth_test on daint:gpu using gnu [compile: 0.010s run: 154.343s total: 164.540s] - [ OK ] (15/22) osu_allreduce_test_8 on daint:gpu using intel [compile: 0.010s run: 194.643s total: 207.980s] - [ OK ] (16/22) osu_allreduce_test_2 on daint:gpu using intel [compile: 0.013s run: 201.145s total: 207.983s] - [ OK ] (17/22) osu_allreduce_test_4 on daint:gpu using intel [compile: 0.016s run: 198.143s total: 208.335s] - [ OK ] (18/22) osu_latency_test on daint:gpu using intel [compile: 0.010s run: 208.271s total: 208.312s] - [ OK ] (19/22) osu_allreduce_test_16 on daint:gpu using pgi [compile: 0.013s run: 215.854s total: 248.101s] - [ OK ] (20/22) osu_allreduce_test_16 on daint:gpu using gnu [compile: 0.010s run: 213.190s total: 248.731s] - [ OK ] (21/22) osu_allreduce_test_16 on daint:gpu using intel [compile: 0.010s run: 194.339s total: 210.962s] - [ OK ] (22/22) osu_bandwidth_test on daint:gpu using intel [compile: 0.022s run: 267.171s total: 270.475s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 22/22 test case(s) from 10 check(s) (0 failure(s), 0 skipped) - [==========] Finished on Sun Oct 31 22:07:25 2021 - Run report saved in '/users/user/.reframe/reports/run-report.json' - Log file(s) saved in '/tmp/rfm-qst7lvou.log' +Finally, let's run all the benchmarks at once: +.. literalinclude:: listings/osu_bench_fixtures_run.txt + :language: console .. tip:: A reasonable question is how to choose between fixtures and dependencies? From cb5edd8bbc2ee4081fdc3608b2c498cb561b6d82 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Thu, 20 Jan 2022 11:51:48 +0100 Subject: [PATCH 46/62] Fix listing of unresolved deps in docs --- ...or.txt => osu_latency_unresolved_deps.txt} | 42 +++++++++---------- docs/tutorial_deps.rst | 2 +- 2 files changed, 22 insertions(+), 22 deletions(-) rename docs/listings/{osu_latency_res_error.txt => osu_latency_unresolved_deps.txt} (75%) diff --git a/docs/listings/osu_latency_res_error.txt b/docs/listings/osu_latency_unresolved_deps.txt similarity index 75% rename from docs/listings/osu_latency_res_error.txt rename to docs/listings/osu_latency_unresolved_deps.txt index e3aa0c95a1..3b09140588 100644 --- a/docs/listings/osu_latency_res_error.txt +++ b/docs/listings/osu_latency_unresolved_deps.txt @@ -1,5 +1,5 @@ [ReFrame Setup] - version: 3.10.0-dev.2+e8bdbaaa + version: 3.10.0-dev.2+a1df4e67 command: './bin/reframe -c tutorials/deps/osu_benchmarks.py --system=daint:gpu -n OSULatencyTest -l' launched by: user@host working directory: '/users/user/Devel/reframe' @@ -8,33 +8,33 @@ stage directory: '/users/user/Devel/reframe/stage' output directory: '/users/user/Devel/reframe/output' -./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'gnu') -> 'OSUDownloadTest' -./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'intel') -> 'OSUDownloadTest' -./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'pgi') -> 'OSUDownloadTest' -./bin/reframe: skipping all dependent test cases - - ('OSUBuildTest', 'daint:gpu', 'intel') - - ('OSUBandwidthTest', 'daint:gpu', 'intel') - - ('OSUAllreduceTest_8', 'daint:gpu', 'intel') - - ('OSUAllreduceTest_2', 'daint:gpu', 'intel') - - ('OSUAllreduceTest_4', 'daint:gpu', 'intel') +./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'gnu') -> 'OSUDownloadTest' +./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'intel') -> 'OSUDownloadTest' +./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'pgi') -> 'OSUDownloadTest' +./bin/reframe: skipping all dependent test cases - ('OSUBuildTest', 'daint:gpu', 'gnu') - - ('OSUBandwidthTest', 'daint:gpu', 'gnu') - - ('OSULatencyTest', 'daint:gpu', 'intel') - - ('OSUBuildTest', 'daint:gpu', 'pgi') - ('OSUAllreduceTest_4', 'daint:gpu', 'gnu') - - ('OSUAllreduceTest_16', 'daint:gpu', 'intel') - - ('OSUAllreduceTest_2', 'daint:gpu', 'gnu') - - ('OSULatencyTest', 'daint:gpu', 'pgi') - - ('OSUAllreduceTest_16', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_16', 'daint:gpu', 'gnu') - ('OSUAllreduceTest_8', 'daint:gpu', 'gnu') - ('OSULatencyTest', 'daint:gpu', 'gnu') - - ('OSUAllreduceTest_16', 'daint:gpu', 'gnu') - - ('OSUAllreduceTest_8', 'daint:gpu', 'pgi') + - ('OSUBuildTest', 'daint:gpu', 'intel') + - ('OSUBuildTest', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_2', 'daint:gpu', 'intel') + - ('OSUAllreduceTest_2', 'daint:gpu', 'gnu') + - ('OSUBandwidthTest', 'daint:gpu', 'intel') + - ('OSUBandwidthTest', 'daint:gpu', 'gnu') - ('OSUAllreduceTest_4', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_4', 'daint:gpu', 'intel') - ('OSUBandwidthTest', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_16', 'daint:gpu', 'intel') + - ('OSUAllreduceTest_8', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_8', 'daint:gpu', 'intel') + - ('OSUAllreduceTest_16', 'daint:gpu', 'pgi') - ('OSUAllreduceTest_2', 'daint:gpu', 'pgi') - + - ('OSULatencyTest', 'daint:gpu', 'intel') + - ('OSULatencyTest', 'daint:gpu', 'pgi') + [List of matched checks] Found 0 check(s) -Log file(s) saved in '/tmp/rfm-o8qctq3o.log' +Log file(s) saved in '/tmp/rfm-3a27s3qw.log' diff --git a/docs/tutorial_deps.rst b/docs/tutorial_deps.rst index 2f518e96c7..e47c09a813 100644 --- a/docs/tutorial_deps.rst +++ b/docs/tutorial_deps.rst @@ -143,7 +143,7 @@ As a result, its immediate dependency :class:`OSUBuildTest` will be skipped, whi ./bin/reframe -c tutorials/deps/osu_benchmarks.py --system=daint:gpu -n OSULatencyTest -l -.. literalinclude:: docs/osu_latency_res_error.txt +.. literalinclude:: docs/osu_latency_unresolved_deps.txt :language: console From 42b8a1504be5f26e8f7523c2fe5d95eec8e93b8a Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Thu, 20 Jan 2022 12:11:43 +0100 Subject: [PATCH 47/62] Adapt tutorials --- docs/listings/hello2_typo.txt | 18 ++++++ docs/listings/hello2_typo_stacktrace.txt | 43 +++++++++++++ docs/listings/hello2_verbose_load.txt | 80 ++++++++++++++++++++++++ docs/tutorial_deps.rst | 9 +-- docs/tutorial_fixtures.rst | 3 +- docs/tutorial_tips_tricks.rst | 46 ++------------ 6 files changed, 151 insertions(+), 48 deletions(-) create mode 100644 docs/listings/hello2_typo.txt create mode 100644 docs/listings/hello2_typo_stacktrace.txt create mode 100644 docs/listings/hello2_verbose_load.txt diff --git a/docs/listings/hello2_typo.txt b/docs/listings/hello2_typo.txt new file mode 100644 index 0000000000..a8f21aa2a9 --- /dev/null +++ b/docs/listings/hello2_typo.txt @@ -0,0 +1,18 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+cb5edd8b + command: './bin/reframe -c tutorials/basics/hello -R -l' + launched by: user@host + working directory: '/Users/user/Repositories/reframe' + settings file: 'tutorials/config/settings.py' + check search path: (R) '/Users/user/Repositories/reframe/tutorials/basics/hello' + stage directory: '/Users/user/Repositories/reframe/stage' + output directory: '/Users/user/Repositories/reframe/output' + +./bin/reframe: skipping test file '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py': name error: tutorials/basics/hello/hello2.py:13: name 'paramter' is not defined + lang = paramter(['c', 'cpp']) + (rerun with '-v' for more information) +[List of matched checks] +- HelloTest +Found 1 check(s) + +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-rt_ds_vp.log' diff --git a/docs/listings/hello2_typo_stacktrace.txt b/docs/listings/hello2_typo_stacktrace.txt new file mode 100644 index 0000000000..5114639732 --- /dev/null +++ b/docs/listings/hello2_typo_stacktrace.txt @@ -0,0 +1,43 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+cb5edd8b + command: './bin/reframe -c tutorials/basics/hello -R -l -v' + launched by: user@host + working directory: '/Users/user/Repositories/reframe' + settings file: 'tutorials/config/settings.py' + check search path: (R) '/Users/user/Repositories/reframe/tutorials/basics/hello' + stage directory: '/Users/user/Repositories/reframe/stage' + output directory: '/Users/user/Repositories/reframe/output' + +./bin/reframe: skipping test file '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py': name error: tutorials/basics/hello/hello2.py:13: name 'paramter' is not defined + lang = paramter(['c', 'cpp']) + (rerun with '-v' for more information) +Traceback (most recent call last): + File "/Users/user/Repositories/reframe/reframe/frontend/loader.py", line 237, in load_from_file + util.import_module_from_file(filename, force) + File "/Users/user/Repositories/reframe/reframe/utility/__init__.py", line 103, in import_module_from_file + return importlib.import_module(module_name) + File "/usr/local/Cellar/python@3.9/3.9.1_6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/importlib/__init__.py", line 127, in import_module + return _bootstrap._gcd_import(name[level:], package, level) + File "", line 1030, in _gcd_import + File "", line 1007, in _find_and_load + File "", line 986, in _find_and_load_unlocked + File "", line 680, in _load_unlocked + File "", line 790, in exec_module + File "", line 228, in _call_with_frames_removed + File "/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py", line 12, in + class HelloMultiLangTest(rfm.RegressionTest): + File "/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py", line 13, in HelloMultiLangTest + lang = paramter(['c', 'cpp']) +NameError: name 'paramter' is not defined + +Loaded 1 test(s) +Generated 1 test case(s) +Filtering test cases(s) by name: 1 remaining +Filtering test cases(s) by tags: 1 remaining +Filtering test cases(s) by other attributes: 1 remaining +Final number of test cases: 1 +[List of matched checks] +- HelloTest +Found 1 check(s) + +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-do69ki6t.log' diff --git a/docs/listings/hello2_verbose_load.txt b/docs/listings/hello2_verbose_load.txt new file mode 100644 index 0000000000..b97ee20a93 --- /dev/null +++ b/docs/listings/hello2_verbose_load.txt @@ -0,0 +1,80 @@ +Loading user configuration +Loading configuration file: 'tutorials/config/settings.py' +Detecting system +Looking for a matching configuration entry for system '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa' +Configuration found: picking system 'generic' +Selecting subconfig for 'generic' +Initializing runtime +Selecting subconfig for 'generic:default' +Initializing system partition 'default' +Selecting subconfig for 'generic' +Initializing system 'generic' +Initializing modules system 'nomod' +detecting topology info for generic:default +> found topology file '/Users/user/.reframe/topology/generic-default/processor.json'; loading... +> device auto-detection is not supported +[ReFrame Environment] + RFM_CHECK_SEARCH_PATH= + RFM_CHECK_SEARCH_RECURSIVE= + RFM_CLEAN_STAGEDIR= + RFM_COLORIZE= + RFM_COMPACT_TEST_NAMES= + RFM_CONFIG_FILE=tutorials/config/settings.py + RFM_GIT_TIMEOUT= + RFM_GRAYLOG_ADDRESS= + RFM_HTTPJSON_URL= + RFM_IGNORE_CHECK_CONFLICTS= + RFM_IGNORE_REQNODENOTAVAIL= + RFM_INSTALL_PREFIX=/Users/user/Repositories/reframe + RFM_KEEP_STAGE_FILES= + RFM_MODULE_MAPPINGS= + RFM_MODULE_MAP_FILE= + RFM_NON_DEFAULT_CRAYPE= + RFM_OUTPUT_DIR= + RFM_PERFLOG_DIR= + RFM_PREFIX= + RFM_PURGE_ENVIRONMENT= + RFM_REMOTE_DETECT= + RFM_REMOTE_WORKDIR= + RFM_REPORT_FILE= + RFM_REPORT_JUNIT= + RFM_RESOLVE_MODULE_CONFLICTS= + RFM_SAVE_LOG_FILES= + RFM_STAGE_DIR= + RFM_SYSLOG_ADDRESS= + RFM_SYSTEM= + RFM_TIMESTAMP_DIRS= + RFM_TRAP_JOB_ERRORS= + RFM_UNLOAD_MODULES= + RFM_USER_MODULES= + RFM_USE_LOGIN_SHELL= + RFM_VERBOSE= +[ReFrame Setup] + version: 3.10.0-dev.2+cb5edd8b + command: './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -l -vv' + launched by: user@host + working directory: '/Users/user/Repositories/reframe' + settings file: 'tutorials/config/settings.py' + check search path: '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py' + stage directory: '/Users/user/Repositories/reframe/stage' + output directory: '/Users/user/Repositories/reframe/output' + +Looking for tests in '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py' +Validating '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py': OK + > Loaded 2 test(s) +Loaded 2 test(s) +Generated 2 test case(s) +Filtering test cases(s) by name: 2 remaining +Filtering test cases(s) by tags: 2 remaining +Filtering test cases(s) by other attributes: 2 remaining +Building and validating the full test DAG +Full test DAG: + ('HelloMultiLangTest_cpp', 'generic:default', 'builtin') -> [] + ('HelloMultiLangTest_c', 'generic:default', 'builtin') -> [] +Final number of test cases: 2 +[List of matched checks] +- HelloMultiLangTest %lang=cpp +- HelloMultiLangTest %lang=c +Found 2 check(s) + +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-fpjj5gru.log' diff --git a/docs/tutorial_deps.rst b/docs/tutorial_deps.rst index e47c09a813..44444c6faa 100644 --- a/docs/tutorial_deps.rst +++ b/docs/tutorial_deps.rst @@ -131,7 +131,7 @@ For example, if we select only the :class:`OSULatencyTest` for running, ReFrame ./bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -l -.. literalinclude:: docs/osu_latency_list.txt +.. literalinclude:: listings/osu_latency_list.txt :language: console Finally, when ReFrame cannot resolve a dependency of a test, it will issue a warning and skip completely all the test cases that recursively depend on this one. @@ -143,7 +143,7 @@ As a result, its immediate dependency :class:`OSUBuildTest` will be skipped, whi ./bin/reframe -c tutorials/deps/osu_benchmarks.py --system=daint:gpu -n OSULatencyTest -l -.. literalinclude:: docs/osu_latency_unresolved_deps.txt +.. literalinclude:: listings/osu_latency_unresolved_deps.txt :language: console @@ -159,7 +159,7 @@ ReFrame generates multiple test cases from each test depending on the target sys We have seen in the :doc:`tutorial_basics` already how the STREAM benchmark generated many more test cases when it was run in a HPC system with multiple partitions and programming environments. These are the *actual* depedencies and form the actual test case graph that will be executed by the runtime. The mapping of a test to its concrete test cases that will be executed on a system is called *test concretization*. -You can view the exact concretization of the selected tests with :option:`--list=concretized` or simply :option:`-lC`. +You can view the exact concretization of the selected tests with ``--list=concretized`` or simply ``-lC``. Here is how the OSU benchmarks of this tutorial are concretized on the system ``daint``: @@ -184,7 +184,8 @@ If we scope our programming environments to ``gnu`` and ``builtin`` only, ReFram ./bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -L -p builtin -p gnu -.. literalinclude:: docs/listings/osu_bench_list_concretized_gnu.txt +.. literalinclude:: listings/osu_bench_list_concretized_gnu.txt + :language: console To gain a deeper understanding on how test dependencies work in Reframe, please refer to :doc:`dependencies`. diff --git a/docs/tutorial_fixtures.rst b/docs/tutorial_fixtures.rst index 920ea04863..1ccbe30e06 100644 --- a/docs/tutorial_fixtures.rst +++ b/docs/tutorial_fixtures.rst @@ -136,11 +136,10 @@ The following listing and figure show how the test dependency DAG is concretized reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC -p pgi -.. literalinclude:: listings/osu_bandwidth_concretized_pgi_daint.txt +.. literalinclude:: listings/osu_bandwidth_concretized_daint_pgi.txt :language: console .. figure:: _static/img/fixtures-actual-deps-scoped.svg - :align: center :sub:`The dependency graph concretized for the 'pgi' environment only.` diff --git a/docs/tutorial_tips_tricks.rst b/docs/tutorial_tips_tricks.rst index 304a87bf5e..43ebe276c7 100644 --- a/docs/tutorial_tips_tricks.rst +++ b/docs/tutorial_tips_tricks.rst @@ -19,14 +19,8 @@ In the following, we have inserted a small typo in the ``hello2.py`` tutorial ex ./bin/reframe -c tutorials/basics/hello -R -l -.. code-block:: none - - ./bin/reframe: skipping test file '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py': name error: tutorials/basics/hello/hello2.py:17: name 's' is not defined - sanity_patterns = s.assert_found(r'Hello, World\!', 'hello.out') - (rerun with '-v' for more information) - [List of matched checks] - - HelloTest (found in '/Users/user/Repositories/reframe/tutorials/basics/hello/hello1.py') - Found 1 check(s) +.. literalinclude:: listings/hello2_typo.txt + :language: console Notice how ReFrame prints also the source code line that caused the error. This is not always the case, however. @@ -48,40 +42,8 @@ As suggested by the warning message, passing :option:`-v` will give you the stac ./bin/reframe -c tutorials/basics/hello -R -lv -.. code-block:: none - - ./bin/reframe: skipping test file '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py': name error: tutorials/basics/hello/hello2.py:17: name 's' is not defined - sanity_patterns = s.assert_found(r'Hello, World\!', 'hello.out') - (rerun with '-v' for more information) - Traceback (most recent call last): - File "/Users/user/Repositories/reframe/reframe/frontend/loader.py", line 172, in load_from_file - util.import_module_from_file(filename, force) - File "/Users/user/Repositories/reframe/reframe/utility/__init__.py", line 101, in import_module_from_file - return importlib.import_module(module_name) - File "/usr/local/Cellar/python@3.9/3.9.1_6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/importlib/__init__.py", line 127, in import_module - return _bootstrap._gcd_import(name[level:], package, level) - File "", line 1030, in _gcd_import - File "", line 1007, in _find_and_load - File "", line 986, in _find_and_load_unlocked - File "", line 680, in _load_unlocked - File "", line 790, in exec_module - File "", line 228, in _call_with_frames_removed - File "/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py", line 11, in - class HelloMultiLangTest(rfm.RegressionTest): - File "/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py", line 17, in HelloMultiLangTest - sanity_patterns = s.assert_found(r'Hello, World\!', 'hello.out') - NameError: name 's' is not defined - - Loaded 1 test(s) - Generated 1 test case(s) - Filtering test cases(s) by name: 1 remaining - Filtering test cases(s) by tags: 1 remaining - Filtering test cases(s) by other attributes: 1 remaining - Final number of test cases: 1 - [List of matched checks] - - HelloTest (found in '/Users/user/Repositories/reframe/tutorials/basics/hello/hello1.py') - Found 1 check(s) - +.. literalinclude:: listings/hello2_typo_stacktrace.txt + :language: console .. tip:: The :option:`-v` option can be given multiple times to increase the verbosity level further. From 71f32c78b279235a1508e84c29b49d5681172f7c Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Fri, 21 Jan 2022 11:33:51 +0100 Subject: [PATCH 48/62] WIP: Update docs --- docs/manpage.rst | 59 ++++++++++++++++++++++---- docs/regression_test_api.rst | 80 ++---------------------------------- reframe/core/meta.py | 43 +++++++++++++------ reframe/core/pipeline.py | 38 +++++++++++++++++ 4 files changed, 123 insertions(+), 97 deletions(-) diff --git a/docs/manpage.rst b/docs/manpage.rst index 698bd4d6d0..26ef36bf55 100644 --- a/docs/manpage.rst +++ b/docs/manpage.rst @@ -114,11 +114,25 @@ This happens recursively so that if test ``T1`` depends on ``T2`` and ``T2`` dep Filter tests by name. ``NAME`` is interpreted as a `Python Regular Expression `__; - any test whose name matches ``NAME`` will be selected. + any test whose *display name* matches ``NAME`` will be selected. + The display name of a test encodes also any parameterization information. + See XXX for more details on the test naming scheme. + + Before matching, any whitespace will be removed from the display name of the test. This option may be specified multiple times, in which case tests with *any* of the specified names will be selected: ``-n NAME1 -n NAME2`` is therefore equivalent to ``-n 'NAME1|NAME2'``. + If the special notation ``@`` is passed as the ``NAME`` argument, then an exact match will be performed selecting the variant ``variant_num`` of the test ``test_name``. + + .. note:: + + Fixtures cannot be selected. + + .. versionchanged:: 3.10.0 + + The option's behaviour was adapted and extended in order to work with the updated test naming scheme. + .. option:: -p, --prgenv=NAME Filter tests by programming environment. @@ -189,15 +203,42 @@ An action must always be specified. .. versionadded:: 3.4.1 -.. option:: -L, --list-detailed +.. option:: --describe + + Print a detailed description of the `selected tests <#test-filtering>`__ in JSON format and exit. + + .. note:: + The generated test description corresponds to its state after it has been initialized. + If any of its attributes are changed or set during its execution, their updated values will not be shown by this listing. + + .. versionadded:: 3.10.0 + + +.. option:: -L, --list-detailed[=T|C] + + List selected tests providing more details for each test. + + The unique id of each test (see also :attr:`~reframe.core.pipeline.RegressionTest.unique_name`) as well as the file where each test is defined are printed. - List selected tests providing detailed information per test. + This option accepts optionally a single argument denoting what type of listing is requested. + Please refer to :option:`-l` for an explanation of this argument. -.. option:: -l, --list + .. versionadded:: 3.10.0 + Support for different types of listing is added. - List selected tests. +.. option:: -l, --list[=T|C] - A single line per test is printed. + List selected tests and their dependencies. + + This option accepts optionally a single argument denoting what type of listing is requested. + There are two types of possible listings: + + - *Regular test listing* (``T``, the default): This type of listing lists the tests and their dependencies or fixtures using their :attr:`~reframe.core.pipeline.RegressionTest.display_name`. A test that is listed as a dependency of another test will not be listed separately. + - *Concretized test case listing* (``C``): This type of listing lists the exact test cases and their dependencies as they have been concretized for the current system and environment combinations. + This listing shows practically the exact test DAG that will be executed. + + .. versionadded:: 3.10.0 + Support for different types of listing is added. .. option:: --list-tags @@ -211,7 +252,11 @@ An action must always be specified. Execute the selected tests. -If more than one action options are specified, :option:`-l` precedes :option:`-L`, which in turn precedes :option:`-r`. +If more than one action options are specified, the precedence order is the following: + + .. code-block:: console + + --describe > --list-detailed > --list > --list-tags > --ci-generate ---------------------------------- diff --git a/docs/regression_test_api.rst b/docs/regression_test_api.rst index d0f686e771..4c5cb5711c 100644 --- a/docs/regression_test_api.rst +++ b/docs/regression_test_api.rst @@ -657,86 +657,12 @@ Therefore, classes that derive from the base :class:`~reframe.core.pipeline.Regr .. py:attribute:: RegressionMixin.num_variants - Total number of unique test variants in a class. + Total number of variants of the test. +.. automethod:: reframe.core.pipeline.RegressionMixin.get_variant_nums -.. py:function:: RegressionMixin.get_variant_info(cls, variant_num, *, recurse=False, max_depth=None) +.. automethod:: reframe.core.pipeline.RegressionMixin.variant_name - Get the raw variant data for a given variant index. - This function returns a dictionary with the variant data such as the parameter values and the fixture variants. - The parameter space information is presented in a sub-dictionary under the ``'params'`` key, gathering all the parameter values associated with the given variant number. - Similarly, the information on the test's fixtures is gathered in another sub-dictionary under the ``'fixtures'`` key. - By default, this sub-dictionary shows a tuple for each fixture, containing the respective fixture variants associated with the given ``variant_num``. - These tuples may only contain more than one fixture variant index if the fixture was declared with a `join` action (see the :func:`~RegressionMixin.fixture` documentation for more information). - However, when ``recurse`` is set to ``True``, each fixture entry with a single-element tuple will be expanded to show the full fixture variant information. - By default, the recursion will traverse the full fixture tree, but this recursion depth can be limited with the ``max_depth`` argument. - See the example below. - - .. code:: python - - class Foo(rfm.RegressionTest): - p0 = parameter(range(2)) - ... - - class Bar(rfm.RegressionTest): - p0 = parameter(range(3)) - ... - - class MyTest(rfm.RegressionTest): - p1 = parameter(['a', 'b']) - f0 = fixture(Foo, action='fork') - f1 = fixture(Bar, action='join') - ... - - # Get the raw info for variant 0 - without recursion - MyTest.get_variant_info(0, recursive=False) - # { - # 'params': {'p1': 'a'}, - # 'fixtures': { - # 'f0': (0,), - # 'f1': (0, 1, 2,) - # } - # } - - # Get the raw info for variant 0 - show the full tree - MyTest.get_variant_info(0, recursive=True) - # { - # 'params': {'p1': 'a'}, - # 'fixtures': { - # 'f0': { - # 'params': {'p0': 0}, - # 'fixtures': {} - # }, - # 'f1': (0, 1, 2,) - # } - # } - - :param variant_num: An integer in the range of [0, cls.num_variants). - :param recurse: Flag to control the recursion through the fixture space. - :param max_depth: Set the recursion limit. When the ``recurse`` argument is set to ``False``, this option has no effect. - - -.. py:function:: RegressionMixin.get_variant_nums(cls, **conditions) - - Get the variant numbers that meet the specified conditions. - The given conditions enable filtering the parameter space of the test. - These can be specified by passing key-value pairs with the parameter name to filter and an associated callable that returns ``True`` when the filtering condition is met. Multiple conditions are supported. - However, filtering the fixture space is not allowed. - - .. code-block:: python - - # Get the variant numbers where my_param is lower than 4 - cls.get_variant_nums(my_param=lambda x: x < 4) - - :param conditions: keyword arguments where the key is the test parameter name and the value is a unary function that evaluates a bool condition on the parameter value. - - -.. py:function:: RegressionMixin.fullname(cls, variant_num=None) - - Return the full unique name of a test for a given test variant number. - If no ``variant_num`` is provided, this function returns the qualified class name. - - :param variant_num: An integer in the range of [0, cls.num_variants). ------------------------ diff --git a/reframe/core/meta.py b/reframe/core/meta.py index 2b9b639b9d..e2e535e0e5 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -662,7 +662,7 @@ class attribute. This behavior does not apply when the assigned value @property def num_variants(cls): - '''Number of unique tests that can be instantiated from this class.''' + '''Total number of variants of the test.''' return len(cls._rfm_param_space) * len(cls._rfm_fixture_space) def _map_variant_num(cls, variant_num=None): @@ -689,15 +689,32 @@ def get_variant_nums(cls, **conditions): '''Get the variant numbers that meet the specified conditions. The given conditions enable filtering the parameter space of the test. - These can be specified by passing key-value pairs with the parameter - name to filter and an associated callable that returns ``True`` when - the filtering condition is met. Multiple conditions are supported. - However, filtering the fixture space is not allowed. + Filtering the fixture space is not allowed. .. code-block:: python # Filter out the test variants where my_param is greater than 3 cls.get_variant_nums(my_param=lambda x: x < 4) + + The returned list of variant numbers can be passed to + :func:`variant_name` in order to retrieve the actual test name. + + :param conditions: keyword arguments where the key is the test + parameter name and the value is either a single value or a unary + function that evaluates to :obj:`True` if the parameter point must + be kept, :obj:`False` otherwise. If a single value is passed this + is implicitly converted to the equality function, such that + + .. code-block:: python + + get_variant_nums(p=10) + + is equivalent to + + .. code-block:: python + + get_variant_nums(p=lambda x: x == 10) + ''' if not conditions: return list(range(cls.num_variants)) @@ -766,6 +783,12 @@ class MyTest(rfm.RegressionTest): # } # } + :param variant_num: An integer in the range of [0, cls.num_variants). + :param recurse: Flag to control the recursion through the fixture + space. + :param max_depth: Set the recursion limit. When the ``recurse`` + argument is set to ``False``, this option has no effect. + ''' pid, fid = cls._map_variant_num(variant_num) @@ -823,15 +846,9 @@ def is_abstract(cls): return cls.num_variants == 0 def variant_name(cls, variant_num=None): - '''Return the name of a test variant for a given variant number. - - This function returns a unique name for each of the provided variant - numbers. If no ``variant_num`` is provided, this function returns the - qualified class name. + '''Return the name of the test variant with a specific variant number. - :param variant_num: An integer in the range of [0, cls.num_variants). - - :meta private: + :param variant_num: An integer in the range of ``[0, cls.num_variants)``. ''' name = cls.__name__ diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index d4e534f88c..25966a8851 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -119,6 +119,44 @@ def _wrapped(*args, **kwargs): def make_test(name, bases, body, **kwargs): + '''Define a new test class programmatically. + + Using this method is completely equivalent to using the :keyword:`class` + to define the test class. More specifically, the following: + + .. code-block:: python + + hello_cls = rfm.make_test( + 'HelloTest', (rfm.RunOnlyRegressionTest,), + { + 'valid_systems': ['*'], + 'valid_prog_environs': ['*'], + 'executable': 'echo', + 'sanity_patterns': sn.assert_true(1) + } + ) + + is completely equivalent to + + .. code-block:: python + + class HelloTest(rfm.RunOnlyRegressionTest): + valid_systems = ['*'] + valid_prog_environs = ['*'] + executable = 'echo', + sanity_patterns: sn.assert_true(1) + + hello_cls = HelloTest + + :param name: The name of the new test class. + :param bases: A tuple of the base classes of the class that is being + created. + :param body: A mapping of key/value pairs that will be inserted as class + attributes in the newly created class. + :param kwargs: Any keyword arguments to be passed to the + :class:`RegressionTestMeta` metaclass. + + ''' namespace = RegressionTestMeta.__prepare__(name, bases, **kwargs) namespace.update(body) cls = RegressionTestMeta(name, bases, namespace, **kwargs) From 8b6291f3d5a529e72ded055d35653b8c5c37dd28 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Fri, 21 Jan 2022 13:40:58 +0100 Subject: [PATCH 49/62] Move make_test() to meta.py --- reframe/core/meta.py | 45 ++++++++++++++++++++++++++++++++ reframe/core/pipeline.py | 47 +--------------------------------- unittests/test_dependencies.py | 32 ++++++++++------------- unittests/test_pipeline.py | 11 ++++---- unittests/utility.py | 3 ++- 5 files changed, 67 insertions(+), 71 deletions(-) diff --git a/reframe/core/meta.py b/reframe/core/meta.py index e2e535e0e5..32388932f8 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -871,3 +871,48 @@ def variant_name(cls, variant_num=None): name += f'_{fid}' return name + + +def make_test(name, bases, body, **kwargs): + '''Define a new test class programmatically. + + Using this method is completely equivalent to using the :keyword:`class` + to define the test class. More specifically, the following: + + .. code-block:: python + + hello_cls = rfm.make_test( + 'HelloTest', (rfm.RunOnlyRegressionTest,), + { + 'valid_systems': ['*'], + 'valid_prog_environs': ['*'], + 'executable': 'echo', + 'sanity_patterns': sn.assert_true(1) + } + ) + + is completely equivalent to + + .. code-block:: python + + class HelloTest(rfm.RunOnlyRegressionTest): + valid_systems = ['*'] + valid_prog_environs = ['*'] + executable = 'echo', + sanity_patterns: sn.assert_true(1) + + hello_cls = HelloTest + + :param name: The name of the new test class. + :param bases: A tuple of the base classes of the class that is being + created. + :param body: A mapping of key/value pairs that will be inserted as class + attributes in the newly created class. + :param kwargs: Any keyword arguments to be passed to the + :class:`RegressionTestMeta` metaclass. + + ''' + namespace = RegressionTestMeta.__prepare__(name, bases, **kwargs) + namespace.update(body) + cls = RegressionTestMeta(name, bases, namespace, **kwargs) + return cls diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 25966a8851..6fa8de0703 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -10,7 +10,7 @@ __all__ = [ 'CompileOnlyRegressionTest', 'RegressionTest', 'RunOnlyRegressionTest', 'DEPEND_BY_ENV', 'DEPEND_EXACT', 'DEPEND_FULLY', 'final', - 'RegressionMixin', 'make_test' + 'RegressionMixin' ] @@ -118,51 +118,6 @@ def _wrapped(*args, **kwargs): _RFM_TEST_KIND_RUN = 2 -def make_test(name, bases, body, **kwargs): - '''Define a new test class programmatically. - - Using this method is completely equivalent to using the :keyword:`class` - to define the test class. More specifically, the following: - - .. code-block:: python - - hello_cls = rfm.make_test( - 'HelloTest', (rfm.RunOnlyRegressionTest,), - { - 'valid_systems': ['*'], - 'valid_prog_environs': ['*'], - 'executable': 'echo', - 'sanity_patterns': sn.assert_true(1) - } - ) - - is completely equivalent to - - .. code-block:: python - - class HelloTest(rfm.RunOnlyRegressionTest): - valid_systems = ['*'] - valid_prog_environs = ['*'] - executable = 'echo', - sanity_patterns: sn.assert_true(1) - - hello_cls = HelloTest - - :param name: The name of the new test class. - :param bases: A tuple of the base classes of the class that is being - created. - :param body: A mapping of key/value pairs that will be inserted as class - attributes in the newly created class. - :param kwargs: Any keyword arguments to be passed to the - :class:`RegressionTestMeta` metaclass. - - ''' - namespace = RegressionTestMeta.__prepare__(name, bases, **kwargs) - namespace.update(body) - cls = RegressionTestMeta(name, bases, namespace, **kwargs) - return cls - - class RegressionMixin(metaclass=RegressionTestMeta): '''Base mixin class for regression tests. diff --git a/unittests/test_dependencies.py b/unittests/test_dependencies.py index 1e14c75a75..350382ea37 100644 --- a/unittests/test_dependencies.py +++ b/unittests/test_dependencies.py @@ -13,6 +13,7 @@ import reframe.utility as util import reframe.utility.sanity as sn import reframe.utility.udeps as udeps +import unittests.utility as test_util from reframe.core.environments import Environment from reframe.core.exceptions import DependencyError @@ -515,21 +516,14 @@ def test_build_deps_empty(default_exec_ctx): assert {} == dependencies.build_deps([])[0] -@pytest.fixture -def make_test(): - def _make_test(test_name): - class _Test(rfm.RegressionTest): - valid_systems = ['*'] - valid_prog_environs = ['*'] - executable = 'echo' - executable_opts = [test_name] - - return rfm.make_test(test_name, (_Test,), {})() - - return _make_test +def make_test(name): + return test_util.make_check(rfm.RegressionTest, + alt_name=name, + valid_systems=['*'], + valid_prog_environs=['*']) -def test_valid_deps(make_test, default_exec_ctx): +def test_valid_deps(default_exec_ctx): # # t0 +-->t5<--+ # ^ | | @@ -567,7 +561,7 @@ def test_valid_deps(make_test, default_exec_ctx): ) -def test_cyclic_deps(make_test, default_exec_ctx): +def test_cyclic_deps(default_exec_ctx): # # t0 +-->t5<--+ # ^ | | @@ -613,7 +607,7 @@ def test_cyclic_deps(make_test, default_exec_ctx): 't3->t1->t4->t3' in str(exc_info.value)) -def test_cyclic_deps_by_env(make_test, default_exec_ctx): +def test_cyclic_deps_by_env(default_exec_ctx): t0 = make_test('t0') t1 = make_test('t1') t1.depends_on('t0', udeps.env_is('e0')) @@ -632,7 +626,7 @@ def test_validate_deps_empty(default_exec_ctx): dependencies.validate_deps({}) -def test_skip_unresolved_deps(make_test, make_exec_ctx): +def test_skip_unresolved_deps(make_exec_ctx): # # t0 t4 # ^ ^ ^ @@ -698,7 +692,7 @@ def assert_topological_order(cases, graph): assert cases_order in valid_orderings -def test_prune_deps(make_test, default_exec_ctx): +def test_prune_deps(default_exec_ctx): # # t0 +-->t5<--+ # ^ | | @@ -753,7 +747,7 @@ def test_prune_deps(make_test, default_exec_ctx): assert len(pruned_deps[node('t0')]) == 0 -def test_toposort(make_test, default_exec_ctx): +def test_toposort(default_exec_ctx): # # t0 +-->t5<--+ # ^ | | @@ -803,7 +797,7 @@ def test_toposort(make_test, default_exec_ctx): assert cases_by_level[4] == {'t4'} -def test_toposort_subgraph(make_test, default_exec_ctx): +def test_toposort_subgraph(default_exec_ctx): # # t0 # ^ diff --git a/unittests/test_pipeline.py b/unittests/test_pipeline.py index 53b18b1138..0635a791cd 100644 --- a/unittests/test_pipeline.py +++ b/unittests/test_pipeline.py @@ -18,6 +18,7 @@ from reframe.core.exceptions import (BuildError, PipelineError, ReframeError, PerformanceError, SanityError, SkipTestError, ReframeSyntaxError) +from reframe.core.meta import make_test def _run(test, partition, prgenv): @@ -140,9 +141,9 @@ def _container_exec_ctx(platform): def test_eq(): - T0 = rfm.make_test('T0', (rfm.RegressionTest,), {}) - T1 = rfm.make_test('T1', (rfm.RegressionTest,), {}) - T2 = rfm.make_test('T1', (rfm.RegressionTest,), {}) + T0 = make_test('T0', (rfm.RegressionTest,), {}) + T1 = make_test('T1', (rfm.RegressionTest,), {}) + T2 = make_test('T1', (rfm.RegressionTest,), {}) t0, t1, t2 = T0(), T1(), T2() assert t0 != t1 @@ -1491,7 +1492,7 @@ def access_topo(self): def test_make_test_without_builtins(local_exec_ctx): - hello_cls = rfm.make_test( + hello_cls = make_test( 'HelloTest', (rfm.RunOnlyRegressionTest,), { 'valid_systems': ['*'], @@ -1520,7 +1521,7 @@ def set_message(self): def validate(self): return sn.assert_found(self.message, self.stdout) - hello_cls = rfm.make_test('HelloTest', (_X,), {}) + hello_cls = make_test('HelloTest', (_X,), {}) hello_cls.setvar('message', 'hello') assert hello_cls.__name__ == 'HelloTest' _run(hello_cls(), *local_exec_ctx) diff --git a/unittests/utility.py b/unittests/utility.py index 823e0791f3..0e82d8d487 100644 --- a/unittests/utility.py +++ b/unittests/utility.py @@ -16,6 +16,7 @@ import reframe.core.config as config import reframe.core.modules as modules import reframe.core.runtime as rt +from reframe.core.meta import make_test TEST_RESOURCES = os.path.join( @@ -171,7 +172,7 @@ def make_check(cls, *, alt_name=None, **vars): ''' if alt_name: - cls = rfm.make_test(alt_name, (cls,), {}) + cls = make_test(alt_name, (cls,), {}) for k, v in vars.items(): cls.setvar(k, v) From 5f5e9a37bbdc0c2549b6c9c587bb7ea670235002 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Fri, 21 Jan 2022 17:48:27 +0100 Subject: [PATCH 50/62] Update docs --- docs/manpage.rst | 119 ++++++++++++++++++++++++++++++++++- docs/regression_test_api.rst | 8 ++- docs/tutorial_advanced.rst | 2 +- docs/tutorial_deps.rst | 4 +- reframe/core/pipeline.py | 2 +- 5 files changed, 129 insertions(+), 6 deletions(-) diff --git a/docs/manpage.rst b/docs/manpage.rst index 26ef36bf55..3d91639078 100644 --- a/docs/manpage.rst +++ b/docs/manpage.rst @@ -116,7 +116,7 @@ This happens recursively so that if test ``T1`` depends on ``T2`` and ``T2`` dep ``NAME`` is interpreted as a `Python Regular Expression `__; any test whose *display name* matches ``NAME`` will be selected. The display name of a test encodes also any parameterization information. - See XXX for more details on the test naming scheme. + See :ref:`test_naming_scheme` for more details on how the tests are automatically named by the framework. Before matching, any whitespace will be removed from the display name of the test. @@ -796,6 +796,121 @@ Miscellaneous options This option can also be set using the :envvar:`RFM_VERBOSE` environment variable or the :js:attr:`verbose` general configuration parameter. +.. _test_naming_scheme: + +Test Naming Scheme +------------------ + +.. versionadded:: 3.10.0 + +This section describes the new test naming scheme which will replace the current one in ReFrame 4.0. +It can be enabled by setting the :envvar:`RFM_COMPACT_TEST_NAMES` environment variable. + +Each ReFrame test is assigned a unique name, which will be used internally by the framework to reference the test. +Any test-specific path component will use that name, too. +It is formed as follows for the various types of tests: + +- *Regular tests*: The unique name is simply the test class name. + This implies that you cannot load two tests with the same class name within the same run session even if these tests reside in separate directories. +- *Parameterized tests*: The unique name is formed by the test class name followed by an ``_`` and the variant number of the test. + Each point in the parameter space of the test is assigned a unique variant number. +- *Fixtures*: The unique name is formed by the test class name followed by an ``_`` and a hash. + The hash is constructed by combining the information of the fixture variant (if the fixture is parameterized), the fixture's scope and any fixture variables that were explicitly set. + +Since unique names can be cryptic, they are not listed by the :option:`-l` option, but are listed when a detailed listing is requested by using the :option:`-L` option. + +A human readable version of the test name, which is called the *display name*, is also constructed for each test. +This name encodes all the parameterization information as well as the fixture-specific information (scopes, variables). +The format of the display name is the following in BNF notation: + +.. code-block:: bnf + + ::= ()* ()? + ::= "%" "=" + ::= ( ".")* + ::= "~" + ::= ("+" )* + + ::= (* as in Python *) + ::= (* string *) + ::= (* string *) + ::= (* string *) + ::= (* string *) + ::= (* string *) + +The following is an example of a fictitious complex test that is itself parameterized and depends on parameterized fixtures as well. + +.. code-block:: python + + import reframe as rfm + + + class MyFixture(rfm.RunOnlyRegressionTest): + p = parameter([1, 2]) + + + class X(rfm.RunOnlyRegressionTest): + foo = variable(int, value=1) + + + @rfm.simple_test + class TestA(rfm.RunOnlyRegressionTest): + f = fixture(MyFixture, scope='test', action='join') + x = parameter([3, 4]) + t = fixture(MyFixture, scope='test') + l = fixture(X, scope='environment', variables={'foo': 10}) + valid_systems = ['*'] + valid_prog_environs = ['*'] + + +Here is how this test is listed where the various components of the display name can be seen: + +.. code-block:: console + + - TestA %x=4 %l.foo=10 %t.p=2 + ^MyFixture %p=1 ~TestA_4_1 + ^MyFixture %p=2 ~TestA_4_1 + ^X %foo=10 ~generic:default+builtin + - TestA %x=3 %l.foo=10 %t.p=2 + ^MyFixture %p=1 ~TestA_3_1 + ^MyFixture %p=2 ~TestA_3_1 + ^X %foo=10 ~generic:default+builtin + - TestA %x=4 %l.foo=10 %t.p=1 + ^MyFixture %p=2 ~TestA_4_0 + ^MyFixture %p=1 ~TestA_4_0 + ^X %foo=10 ~generic:default+builtin + - TestA %x=3 %l.foo=10 %t.p=1 + ^MyFixture %p=2 ~TestA_3_0 + ^MyFixture %p=1 ~TestA_3_0 + ^X %foo=10 ~generic:default+builtin + Found 4 check(s) + +Display names may not always be unique. +In the following example: + +.. code-block:: python + + class MyTest(RegressionTest): + p = parameter([1, 1, 1]) + +This generates three different tests with different unique names, but their display name is the same for all: ``MyTest %p=1``. +Notice that this example leads to a name conflict with the old naming scheme, since all tests would be named ``MyTest_1``. + + +-------------------------------------- +Differences from the old naming scheme +-------------------------------------- + +Prior to version 3.10, ReFrame used to encode the parameter values of an instance of parameterized test in its name. +It did so by taking the string representation of the value and replacing any non-alphanumeric character with an underscore. +This could lead to very large and hard to read names when a test defined multiple parameters or the parameter type was more complex. +Very large test names meant also very large path names which could also lead to problems and random failures. +Fixtures followed a similar naming pattern making them hard to debug. + +The old naming scheme is still the default for parameterized tests (but not for fixtures) and will remain so until ReFrame 4.0, in order to ensure backward compatibility. +However, users are advised to enable the new naming scheme by setting the :envvar:`RFM_COMPACT_TEST_NAMES` environment variable. + + Environment ----------- @@ -867,7 +982,7 @@ Here is an alphabetical list of the environment variables recognized by ReFrame: .. envvar:: RFM_COMPACT_TEST_NAMES - Enable the compact test naming scheme. + Enable the new test naming scheme. .. table:: :align: left diff --git a/docs/regression_test_api.rst b/docs/regression_test_api.rst index 4c5cb5711c..4e62e14c3a 100644 --- a/docs/regression_test_api.rst +++ b/docs/regression_test_api.rst @@ -51,7 +51,7 @@ In essence, these builtins exert control over the test creation, and they allow p1 = [parameter([1, 2])] # Undefined behavior -.. py:function:: RegressionMixin.parameter(values=None, inherit_params=False, filter_params=None) +.. py:function:: RegressionMixin.parameter(values=None, inherit_params=False, filter_params=None, fmt=None) Inserts or modifies a regression test parameter. At the class level, these parameters are stored in a separate namespace referred to as the *parameter space*. @@ -125,6 +125,12 @@ In essence, these builtins exert control over the test creation, and they allow This function must accept a single iterable argument and return an iterable. It will be called with the inherited parameter values and it must return the filtered set of parameter values. This function will only have an effect if used with ``inherit_params=True``. + :param fmt: A formatting function that will be used to format the values of this parameter in the test's :attr:`~reframe.core.pipeline.RegressionTest.display_name`. + This function should take as argument the parameter value and return a string representation of the value. + If the returned value is not a string, it will be converted using the :py:func:`str` function. + + .. versionadded:: 3.10.0 + The ``fmt`` argument is added. .. py:function:: RegressionMixin.variable(*types, value=None, field=None, **kwargs) diff --git a/docs/tutorial_advanced.rst b/docs/tutorial_advanced.rst index 8057e60ca7..a24dbc73b2 100644 --- a/docs/tutorial_advanced.rst +++ b/docs/tutorial_advanced.rst @@ -50,7 +50,7 @@ Let's try listing the generated tests: ReFrame generates 11 tests from the single parameterized test. When listing parameterized tests, ReFrame adds the list of parameters after the base test name using the notation ``%=``. Each generated test gets also a unique name. -For more details on how the test names are generated for various types of tests, please refer to :doc:`test-naming-scheme`. +For more details on how the test names are generated for various types of tests, please refer to :ref:`test_naming_scheme`. Test parameterization in ReFrame is very powerful since you can parameterize your tests on anything and you can create complex parameterization spaces. A common pattern is to parameterize a test on the environment module that loads a software in order to test different versions of it. diff --git a/docs/tutorial_deps.rst b/docs/tutorial_deps.rst index 44444c6faa..c32a085a3c 100644 --- a/docs/tutorial_deps.rst +++ b/docs/tutorial_deps.rst @@ -191,12 +191,14 @@ If we scope our programming environments to ``gnu`` and ``builtin`` only, ReFram To gain a deeper understanding on how test dependencies work in Reframe, please refer to :doc:`dependencies`. +.. _param_deps: + Depending on Parameterized Tests -------------------------------- As shown earlier in this section, tests define their dependencies by referencing the target tests by their unique name. This is straightforward when referring to regular tests, where their name matches the class name, but it becomes cumbersome trying to refer to a parameterized tests, since no safe assumption should be made as of the variant number of the test or how the parameters are encoded in the name. -In order to safely and reliably refer to a parameterized test, you should use the :func:`get_variant_nums` and :func:`variant_name` class methods as shown in the following example: +In order to safely and reliably refer to a parameterized test, you should use the :func:`~reframe.core.pipeline.RegressionMixin.get_variant_nums` and :func:`~reframe.core.pipeline.RegressionMixin.variant_name` class methods as shown in the following example: .. literalinclude:: ../tutorials/deps/parameterized.py :emphasize-lines: 37- diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 6fa8de0703..0cb63c5ee3 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -218,7 +218,7 @@ def pipeline_hooks(cls): #: Setting the name of a test is deprecated and will be disabled in the #: future. If you were setting the name of a test to circumvent the old #: long parameterized test names in order to reference them in - #: dependency chains, please refer to :doc:`xxx` for more details on how + #: dependency chains, please refer to :ref:`param_deps` for more details on how #: to achieve this. #: #: .. versionchanged:: 3.10.0 From c22440c13910b6fae1ca9eda7aa8f04b64c27dc8 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sat, 22 Jan 2022 10:55:51 +0100 Subject: [PATCH 51/62] Adapt info message --- reframe/frontend/executors/__init__.py | 7 +++++++ reframe/frontend/executors/policies.py | 12 +++++------- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/reframe/frontend/executors/__init__.py b/reframe/frontend/executors/__init__.py index b376be944a..f98ec9c0b6 100644 --- a/reframe/frontend/executors/__init__.py +++ b/reframe/frontend/executors/__init__.py @@ -389,6 +389,13 @@ def abort(self, cause=None): else: self.fail((type(exc), exc, None)) + def info(self): + '''Return an info string about this task.''' + name = self.check.display_name + part = self.testcase.partition.fullname + env = self.testcase.environ.name + return f'{name} @{part}+{env}' + class TaskEventListener(abc.ABC): @abc.abstractmethod diff --git a/reframe/frontend/executors/policies.py b/reframe/frontend/executors/policies.py index 438cb6fb06..3c34780129 100644 --- a/reframe/frontend/executors/policies.py +++ b/reframe/frontend/executors/policies.py @@ -437,11 +437,9 @@ def _advance_startup(self, task): return 1 elif self.deps_succeeded(task): try: - self.printer.status( - 'RUN', f'{task.check.name} on ' - f'{task.testcase.partition.fullname} using ' - f'{task.testcase.environ.name}' - ) + part = task.testcase.partition + env = task.testcase.environ.name + self.printer.status('RUN', task.info()) task.setup(task.testcase.partition, task.testcase.environ, sched_flex_alloc_nodes=self.sched_flex_alloc_nodes, @@ -592,7 +590,7 @@ def on_task_failure(self, task): timings = task.pipeline_timings(['compile_complete', 'run_complete', 'total']) - msg = f'{task.check.info()} [{timings}]' + msg = f'{task.info()} [{timings}]' if task.failed_stage == 'cleanup': self.printer.status('ERROR', msg, just='right') else: @@ -616,7 +614,7 @@ def on_task_success(self, task): timings = task.pipeline_timings(['compile_complete', 'run_complete', 'total']) - msg = f'{task.check.info()} [{timings}]' + msg = f'{task.info()} [{timings}]' self.printer.status('OK', msg, just='right') timings = task.pipeline_timings(['setup', 'compile_complete', From 56bba6cc031755b686a23459dc0b750397488f17 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sat, 22 Jan 2022 13:25:30 +0100 Subject: [PATCH 52/62] WIP: Add tool for generating the output listings --- tools/gendoclistings.py | 129 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) create mode 100755 tools/gendoclistings.py diff --git a/tools/gendoclistings.py b/tools/gendoclistings.py new file mode 100755 index 0000000000..fcb81ec696 --- /dev/null +++ b/tools/gendoclistings.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python3 + +import collections +import functools +import os +import re +import socket +import sys +import reframe.utility.osext as osext + + +def print_usage(): + print(f'Usage: {sys.argv[0]} [local|remote|all|]') + + +ListingInfo = collections.namedtuple( + 'ListingInfo', + ['command', 'filename', 'tags', 'filters', 'env', 'xfail'], + defaults=[None, None, False] +) + + +def remove_nocolor_opt(s): + return s.replace(' --nocolor', '') + + +def remove_system_opt(s): + return s.replace(' --system=catalina', '') + + +def replace_paths(s): + cwd = os.getcwd() + return s.replace(cwd, '/path/to/reframe').replace(os.getenv('HOME'), '/home/user') + + +def replace_user(s): + user = osext.osuser() + return s.replace(user, 'user') + + +def replace_hostname(s): + host = socket.getfqdn() + return s.replace(host, 'host') + + +DEFAULT_FILTERS = [remove_nocolor_opt, remove_system_opt, + replace_paths, replace_user, replace_hostname] + + +LISTINGS = { + 'hello1': ListingInfo( + './bin/reframe --nocolor -c tutorials/basics/hello/hello1.py -r', + 'docs/listings/hello1.txt', + {'local'}, + DEFAULT_FILTERS + ), + 'run-report': ListingInfo( + f'cat {os.getenv("HOME")}/.reframe/reports/run-report.json', + 'docs/listings/run-report.json', + {'local'}, + DEFAULT_FILTERS + ), + 'hello2': ListingInfo( + './bin/reframe --nocolor -c tutorials/basics/hello/hello2.py -r', + 'docs/listings/hello2.txt', + {'local'}, + DEFAULT_FILTERS, + xfail=True + ), + 'hello2_catalina': ListingInfo( + './bin/reframe -C tutorials/config/settings.py --system=catalina --nocolor -c tutorials/basics/hello/hello2.py -r', + 'docs/listings/hello2_catalina.txt', + {'local'}, + DEFAULT_FILTERS + ), + 'hellomp1': ListingInfo( + './bin/reframe --system=catalina --nocolor -c tutorials/basics/hellomp/hellomp1.py -r', + 'docs/listings/hellomp1.txt', + {'local'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py') + } + ), + 'hellomp2': ListingInfo( + './bin/reframe --system=catalina --nocolor -c tutorials/basics/hellomp/hellomp2.py -r', + 'docs/listings/hellomp2.txt', + {'local'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py') + }, + xfail=True + ) +} + + +runcmd = functools.partial(osext.run_command, log=False) + +if __name__ == '__main__': + try: + choice = sys.argv[1] + except IndexError: + choice = 'all' + + for name, info in LISTINGS.items(): + if choice != 'all' and choice != name: + continue + + print(f'Generating listing {name}...') + + # Set up the environment + if info.env: + for k, v in info.env.items(): + os.environ[k] = v + + completed = runcmd(info.command, check=not info.xfail) + if info.xfail and completed.returncode == 0: + print(f'{info.command} should have failed, but it did not; skipping...') + continue + + # Apply filters + output = completed.stdout + for f in info.filters: + output = f(output) + + # Write the listing + with open(info.filename, 'w') as fp: + fp.write(output) From 53a23375d7dbf5149e1e8a854df6cd805d76ec2c Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sat, 22 Jan 2022 13:25:51 +0100 Subject: [PATCH 53/62] Update local listings of Tutorial 1 --- docs/listings/hello1.txt | 27 +++++++--------- docs/listings/hello2.txt | 40 ++++++++++------------- docs/listings/hello2_catalina.txt | 46 ++++++++++++--------------- docs/listings/hellomp1.txt | 33 +++++++++---------- docs/listings/hellomp2.txt | 53 +++++++++++++++---------------- docs/listings/run-report.json | 36 ++++++++++----------- 6 files changed, 107 insertions(+), 128 deletions(-) diff --git a/docs/listings/hello1.txt b/docs/listings/hello1.txt index e302ed7ee3..ebc7eabcf5 100644 --- a/docs/listings/hello1.txt +++ b/docs/listings/hello1.txt @@ -1,25 +1,22 @@ [ReFrame Setup] - version: 3.10.0-dev.2+072543f3 + version: 3.10.0-dev.3+c22440c1 command: './bin/reframe -c tutorials/basics/hello/hello1.py -r' launched by: user@host - working directory: '/Users/user/Repositories/reframe' + working directory: '/path/to/reframe' settings file: '' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/hello/hello1.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' + check search path: '/path/to/reframe/tutorials/basics/hello/hello1.py' + stage directory: '/path/to/reframe/stage' + output directory: '/path/to/reframe/output' [==========] Running 1 check(s) -[==========] Started on Tue Jan 18 23:54:45 2022 +[==========] Started on Sat Jan 22 13:21:50 2022 -[----------] started processing HelloTest (HelloTest) -[ RUN ] HelloTest on generic:default using builtin -[----------] finished processing HelloTest (HelloTest) - -[----------] waiting for spawned checks to finish -[ OK ] (1/1) HelloTest @generic:default+builtin [compile: 0.149s run: 0.163s total: 0.338s] +[----------] start processing checks +[ RUN ] HelloTest @generic:default+builtin +[ OK ] (1/1) HelloTest @generic:default+builtin [compile: 0.272s run: 0.359s total: 0.784s] [----------] all spawned checks have finished [ PASSED ] Ran 1/1 test case(s) from 1 check(s) (0 failure(s), 0 skipped) -[==========] Finished on Tue Jan 18 23:54:45 2022 -Run report saved in '/Users/user/.reframe/reports/run-report.json' -Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-nv8jqh00.log' +[==========] Finished on Sat Jan 22 13:21:51 2022 +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-8c6ybdvg.log' diff --git a/docs/listings/hello2.txt b/docs/listings/hello2.txt index beba15a8e9..8056ae9797 100644 --- a/docs/listings/hello2.txt +++ b/docs/listings/hello2.txt @@ -1,32 +1,26 @@ [ReFrame Setup] - version: 3.10.0-dev.2+bf404ae1 + version: 3.10.0-dev.3+c22440c1 command: './bin/reframe -c tutorials/basics/hello/hello2.py -r' - launched by: karakasv@vpn-39.cscs.ch - working directory: '/Users/karakasv/Repositories/reframe' + launched by: user@host + working directory: '/path/to/reframe' settings file: '' - check search path: '/Users/karakasv/Repositories/reframe/tutorials/basics/hello/hello2.py' - stage directory: '/Users/karakasv/Repositories/reframe/stage' - output directory: '/Users/karakasv/Repositories/reframe/output' + check search path: '/path/to/reframe/tutorials/basics/hello/hello2.py' + stage directory: '/path/to/reframe/stage' + output directory: '/path/to/reframe/output' [==========] Running 2 check(s) -[==========] Started on Wed Jan 19 14:50:11 2022 +[==========] Started on Sat Jan 22 13:21:51 2022 -[----------] started processing HelloMultiLangTest_cpp (HelloMultiLangTest %lang=cpp) -[ RUN ] HelloMultiLangTest_cpp on generic:default using builtin -[ FAIL ] (1/2) HelloMultiLangTest %lang=cpp @generic:default+builtin [compile: 0.011s run: n/a total: 0.028s] -==> test failed during 'compile': test staged in '/Users/karakasv/Repositories/reframe/stage/generic/default/builtin/HelloMultiLangTest_cpp' -[----------] finished processing HelloMultiLangTest_cpp (HelloMultiLangTest %lang=cpp) - -[----------] started processing HelloMultiLangTest_c (HelloMultiLangTest %lang=c) -[ RUN ] HelloMultiLangTest_c on generic:default using builtin -[----------] finished processing HelloMultiLangTest_c (HelloMultiLangTest %lang=c) - -[----------] waiting for spawned checks to finish -[ OK ] (2/2) HelloMultiLangTest %lang=c @generic:default+builtin [compile: 0.437s run: 0.434s total: 0.896s] +[----------] start processing checks +[ RUN ] HelloMultiLangTest %lang=cpp @generic:default+builtin +[ RUN ] HelloMultiLangTest %lang=c @generic:default+builtin +[ FAIL ] (1/2) HelloMultiLangTest %lang=cpp @generic:default+builtin [compile: 0.006s run: n/a total: 0.043s] +==> test failed during 'compile': test staged in '/path/to/reframe/stage/generic/default/builtin/HelloMultiLangTest_cpp' +[ OK ] (2/2) HelloMultiLangTest %lang=c @generic:default+builtin [compile: 0.268s run: 0.368s total: 0.813s] [----------] all spawned checks have finished [ FAILED ] Ran 2/2 test case(s) from 2 check(s) (1 failure(s), 0 skipped) -[==========] Finished on Wed Jan 19 14:50:12 2022 +[==========] Finished on Sat Jan 22 13:21:52 2022 ============================================================================== SUMMARY OF FAILURES @@ -36,7 +30,7 @@ FAILURE INFO for HelloMultiLangTest_cpp * Description: HelloMultiLangTest %lang=cpp * System partition: generic:default * Environment: builtin - * Stage directory: /Users/karakasv/Repositories/reframe/stage/generic/default/builtin/HelloMultiLangTest_cpp + * Stage directory: /path/to/reframe/stage/generic/default/builtin/HelloMultiLangTest_cpp * Node list: * Job type: local (id=None) * Dependencies (conceptual): [] @@ -46,5 +40,5 @@ FAILURE INFO for HelloMultiLangTest_cpp * Rerun with '-n HelloMultiLangTest_cpp -p builtin --system generic:default -r' * Reason: build system error: I do not know how to compile a C++ program ------------------------------------------------------------------------------ -Run report saved in '/Users/karakasv/.reframe/reports/run-report.json' -Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-82sxn1an.log' +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-tse_opq0.log' diff --git a/docs/listings/hello2_catalina.txt b/docs/listings/hello2_catalina.txt index 079ed51d61..36ca1a3dd3 100644 --- a/docs/listings/hello2_catalina.txt +++ b/docs/listings/hello2_catalina.txt @@ -1,34 +1,28 @@ [ReFrame Setup] - version: 3.10.0-dev.2+bf404ae1 - command: './bin/reframe -C tutorials/config/mysettings.py -c tutorials/basics/hello/hello2.py -r' + version: 3.10.0-dev.3+c22440c1 + command: './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -r' launched by: user@host - working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/mysettings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' + working directory: '/path/to/reframe' + settings file: 'tutorials/config/settings.py' + check search path: '/path/to/reframe/tutorials/basics/hello/hello2.py' + stage directory: '/path/to/reframe/stage' + output directory: '/path/to/reframe/output' [==========] Running 2 check(s) -[==========] Started on Wed Jan 19 14:56:30 2022 +[==========] Started on Sat Jan 22 13:21:53 2022 -[----------] started processing HelloMultiLangTest_cpp (HelloMultiLangTest %lang=cpp) -[ RUN ] HelloMultiLangTest_cpp on catalina:default using gnu -[ RUN ] HelloMultiLangTest_cpp on catalina:default using clang -[----------] finished processing HelloMultiLangTest_cpp (HelloMultiLangTest %lang=cpp) - -[----------] started processing HelloMultiLangTest_c (HelloMultiLangTest %lang=c) -[ RUN ] HelloMultiLangTest_c on catalina:default using gnu -[ RUN ] HelloMultiLangTest_c on catalina:default using clang -[----------] finished processing HelloMultiLangTest_c (HelloMultiLangTest %lang=c) - -[----------] waiting for spawned checks to finish -[ OK ] (1/4) HelloMultiLangTest %lang=c @catalina:default+gnu [compile: 0.249s run: 0.348s total: 0.623s] -[ OK ] (2/4) HelloMultiLangTest %lang=cpp @catalina:default+gnu [compile: 0.777s run: 1.270s total: 2.076s] -[ OK ] (3/4) HelloMultiLangTest %lang=cpp @catalina:default+clang [compile: 0.563s run: 0.841s total: 1.431s] -[ OK ] (4/4) HelloMultiLangTest %lang=c @catalina:default+clang [compile: 0.236s run: 0.430s total: 0.692s] +[----------] start processing checks +[ RUN ] HelloMultiLangTest %lang=cpp @catalina:default+gnu +[ RUN ] HelloMultiLangTest %lang=cpp @catalina:default+clang +[ RUN ] HelloMultiLangTest %lang=c @catalina:default+gnu +[ RUN ] HelloMultiLangTest %lang=c @catalina:default+clang +[ OK ] (1/4) HelloMultiLangTest %lang=c @catalina:default+gnu [compile: 0.360s run: 0.511s total: 1.135s] +[ OK ] (2/4) HelloMultiLangTest %lang=c @catalina:default+clang [compile: 0.359s run: 0.514s total: 1.139s] +[ OK ] (3/4) HelloMultiLangTest %lang=cpp @catalina:default+gnu [compile: 0.563s run: 0.549s total: 1.343s] +[ OK ] (4/4) HelloMultiLangTest %lang=cpp @catalina:default+clang [compile: 0.564s run: 0.551s total: 1.346s] [----------] all spawned checks have finished [ PASSED ] Ran 4/4 test case(s) from 2 check(s) (0 failure(s), 0 skipped) -[==========] Finished on Wed Jan 19 14:56:33 2022 -Run report saved in '/Users/user/.reframe/reports/run-report.json' -Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-ia9qxjzo.log' +[==========] Finished on Sat Jan 22 13:21:54 2022 +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-iehz9eub.log' diff --git a/docs/listings/hellomp1.txt b/docs/listings/hellomp1.txt index 71e238a538..b032b8decf 100644 --- a/docs/listings/hellomp1.txt +++ b/docs/listings/hellomp1.txt @@ -1,27 +1,24 @@ [ReFrame Setup] - version: 3.10.0-dev.2+bf404ae1 + version: 3.10.0-dev.3+c22440c1 command: './bin/reframe -c tutorials/basics/hellomp/hellomp1.py -r' launched by: user@host - working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/mysettings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/hellomp/hellomp1.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' + working directory: '/path/to/reframe' + settings file: '/path/to/reframe/tutorials/config/settings.py' + check search path: '/path/to/reframe/tutorials/basics/hellomp/hellomp1.py' + stage directory: '/path/to/reframe/stage' + output directory: '/path/to/reframe/output' [==========] Running 1 check(s) -[==========] Started on Wed Jan 19 17:04:06 2022 +[==========] Started on Sat Jan 22 13:21:54 2022 -[----------] started processing HelloThreadedTest (HelloThreadedTest) -[ RUN ] HelloThreadedTest on catalina:default using gnu -[ RUN ] HelloThreadedTest on catalina:default using clang -[----------] finished processing HelloThreadedTest (HelloThreadedTest) - -[----------] waiting for spawned checks to finish -[ OK ] (1/2) HelloThreadedTest @catalina:default+gnu [compile: 1.764s run: 1.566s total: 3.355s] -[ OK ] (2/2) HelloThreadedTest @catalina:default+clang [compile: 1.481s run: 0.469s total: 1.975s] +[----------] start processing checks +[ RUN ] HelloThreadedTest @catalina:default+gnu +[ RUN ] HelloThreadedTest @catalina:default+clang +[ OK ] (1/2) HelloThreadedTest @catalina:default+gnu [compile: 0.963s run: 0.296s total: 1.418s] +[ OK ] (2/2) HelloThreadedTest @catalina:default+clang [compile: 0.760s run: 0.434s total: 1.421s] [----------] all spawned checks have finished [ PASSED ] Ran 2/2 test case(s) from 1 check(s) (0 failure(s), 0 skipped) -[==========] Finished on Wed Jan 19 17:04:09 2022 -Run report saved in '/Users/user/.reframe/reports/run-report.json' -Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-r26o0go9.log' +[==========] Finished on Sat Jan 22 13:21:56 2022 +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-chq08zds.log' diff --git a/docs/listings/hellomp2.txt b/docs/listings/hellomp2.txt index 303f52e331..c8a4b3918e 100644 --- a/docs/listings/hellomp2.txt +++ b/docs/listings/hellomp2.txt @@ -1,30 +1,27 @@ [ReFrame Setup] - version: 3.10.0-dev.2+bf404ae1 + version: 3.10.0-dev.3+c22440c1 command: './bin/reframe -c tutorials/basics/hellomp/hellomp2.py -r' launched by: user@host - working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/mysettings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/hellomp/hellomp2.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' + working directory: '/path/to/reframe' + settings file: '/path/to/reframe/tutorials/config/settings.py' + check search path: '/path/to/reframe/tutorials/basics/hellomp/hellomp2.py' + stage directory: '/path/to/reframe/stage' + output directory: '/path/to/reframe/output' [==========] Running 1 check(s) -[==========] Started on Wed Jan 19 17:06:19 2022 +[==========] Started on Sat Jan 22 13:21:56 2022 -[----------] started processing HelloThreadedExtendedTest (HelloThreadedExtendedTest) -[ RUN ] HelloThreadedExtendedTest on catalina:default using gnu -[ RUN ] HelloThreadedExtendedTest on catalina:default using clang -[----------] finished processing HelloThreadedExtendedTest (HelloThreadedExtendedTest) - -[----------] waiting for spawned checks to finish -[ FAIL ] (1/2) HelloThreadedExtendedTest @catalina:default+gnu [compile: 1.259s run: 0.904s total: 2.190s] -==> test failed during 'sanity': test staged in '/Users/user/Repositories/reframe/stage/catalina/default/gnu/HelloThreadedExtendedTest' -[ FAIL ] (2/2) HelloThreadedExtendedTest @catalina:default+clang [compile: 0.827s run: 0.296s total: 1.148s] -==> test failed during 'sanity': test staged in '/Users/user/Repositories/reframe/stage/catalina/default/clang/HelloThreadedExtendedTest' +[----------] start processing checks +[ RUN ] HelloThreadedExtendedTest @catalina:default+gnu +[ RUN ] HelloThreadedExtendedTest @catalina:default+clang +[ FAIL ] (1/2) HelloThreadedExtendedTest @catalina:default+clang [compile: 0.761s run: 0.413s total: 1.401s] +==> test failed during 'sanity': test staged in '/path/to/reframe/stage/catalina/default/clang/HelloThreadedExtendedTest' +[ FAIL ] (2/2) HelloThreadedExtendedTest @catalina:default+gnu [compile: 0.962s run: 0.412s total: 1.538s] +==> test failed during 'sanity': test staged in '/path/to/reframe/stage/catalina/default/gnu/HelloThreadedExtendedTest' [----------] all spawned checks have finished [ FAILED ] Ran 2/2 test case(s) from 1 check(s) (2 failure(s), 0 skipped) -[==========] Finished on Wed Jan 19 17:06:21 2022 +[==========] Finished on Sat Jan 22 13:21:58 2022 ============================================================================== SUMMARY OF FAILURES @@ -34,30 +31,30 @@ FAILURE INFO for HelloThreadedExtendedTest * Description: HelloThreadedExtendedTest * System partition: catalina:default * Environment: gnu - * Stage directory: /Users/user/Repositories/reframe/stage/catalina/default/gnu/HelloThreadedExtendedTest - * Node list: vpn-39 - * Job type: local (id=34268) + * Stage directory: /path/to/reframe/stage/catalina/default/gnu/HelloThreadedExtendedTest + * Node list: tresa.localNone + * Job type: local (id=43387) * Dependencies (conceptual): [] * Dependencies (actual): [] * Maintainers: [] * Failing phase: sanity * Rerun with '-n HelloThreadedExtendedTest -p gnu --system catalina:default -r' - * Reason: sanity error: 10 != 16 + * Reason: sanity error: 7 != 16 ------------------------------------------------------------------------------ FAILURE INFO for HelloThreadedExtendedTest * Expanded name: HelloThreadedExtendedTest * Description: HelloThreadedExtendedTest * System partition: catalina:default * Environment: clang - * Stage directory: /Users/user/Repositories/reframe/stage/catalina/default/clang/HelloThreadedExtendedTest - * Node list: vpn-39 - * Job type: local (id=34279) + * Stage directory: /path/to/reframe/stage/catalina/default/clang/HelloThreadedExtendedTest + * Node list: tresa.localNone + * Job type: local (id=43384) * Dependencies (conceptual): [] * Dependencies (actual): [] * Maintainers: [] * Failing phase: sanity * Rerun with '-n HelloThreadedExtendedTest -p clang --system catalina:default -r' - * Reason: sanity error: 13 != 16 + * Reason: sanity error: 11 != 16 ------------------------------------------------------------------------------ -Run report saved in '/Users/user/.reframe/reports/run-report.json' -Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-uoew5foo.log' +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-31lkxfie.log' diff --git a/docs/listings/run-report.json b/docs/listings/run-report.json index 7d2f8be19b..a31169ecd1 100644 --- a/docs/listings/run-report.json +++ b/docs/listings/run-report.json @@ -4,14 +4,14 @@ "config_file": "", "data_version": "2.0", "hostname": "host", - "prefix_output": "/Users/user/Repositories/reframe/output", - "prefix_stage": "/Users/user/Repositories/reframe/stage", + "prefix_output": "/path/to/reframe/output", + "prefix_stage": "/path/to/reframe/stage", "user": "user", - "version": "3.10.0-dev.2+072543f3", - "workdir": "/Users/user/Repositories/reframe", - "time_start": "2022-01-18T23:57:17+0100", - "time_end": "2022-01-18T23:57:17+0100", - "time_elapsed": 0.5832219123840332, + "version": "3.10.0-dev.3+c22440c1", + "workdir": "/path/to/reframe", + "time_start": "2022-01-22T13:21:50+0100", + "time_end": "2022-01-22T13:21:51+0100", + "time_elapsed": 0.8124568462371826, "num_cases": 1, "num_failures": 0 }, @@ -30,11 +30,11 @@ "dependencies_conceptual": [], "description": "HelloTest", "display_name": "HelloTest", - "filename": "/Users/user/Repositories/reframe/tutorials/basics/hello/hello1.py", + "filename": "/path/to/reframe/tutorials/basics/hello/hello1.py", "environment": "builtin", "fail_phase": null, "fail_reason": null, - "jobid": "27101", + "jobid": "43152", "job_stderr": "rfm_HelloTest_job.err", "job_stdout": "rfm_HelloTest_job.out", "maintainers": [], @@ -42,20 +42,20 @@ "nodelist": [ "tresa.local" ], - "outputdir": "/Users/user/Repositories/reframe/output/generic/default/builtin/HelloTest", + "outputdir": "/path/to/reframe/output/generic/default/builtin/HelloTest", "perfvars": null, - "prefix": "/Users/user/Repositories/reframe/tutorials/basics/hello", + "prefix": "/path/to/reframe/tutorials/basics/hello", "result": "success", - "stagedir": "/Users/user/Repositories/reframe/stage/generic/default/builtin/HelloTest", + "stagedir": "/path/to/reframe/stage/generic/default/builtin/HelloTest", "scheduler": "local", "system": "generic:default", "tags": [], - "time_compile": 0.367156982421875, - "time_performance": 8.7738037109375e-05, - "time_run": 0.1748819351196289, - "time_sanity": 0.0006799697875976562, - "time_setup": 0.009120941162109375, - "time_total": 0.5680220127105713, + "time_compile": 0.27164483070373535, + "time_performance": 0.00010180473327636719, + "time_run": 0.3764667510986328, + "time_sanity": 0.0006909370422363281, + "time_setup": 0.007919073104858398, + "time_total": 0.8006880283355713, "unique_name": "HelloTest" } ] From 76e0266728032b9ccd0758b918beddc3d9f4b07a Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sat, 22 Jan 2022 20:29:53 +0100 Subject: [PATCH 54/62] Update tutorial listings --- docs/listings/alltests_daint.txt | 226 +++++++++--------- .../osu_bandwidth_concretized_daint.txt | 14 +- .../osu_bandwidth_concretized_daint_pgi.txt | 14 +- docs/listings/osu_bench_deps.txt | 155 +++++------- docs/listings/osu_bench_fixtures_list.txt | 14 +- docs/listings/osu_bench_fixtures_run.txt | 163 +++++-------- docs/listings/osu_bench_list_concretized.txt | 14 +- .../osu_bench_list_concretized_gnu.txt | 39 +-- docs/listings/osu_latency_list.txt | 18 +- docs/listings/osu_latency_unresolved_deps.txt | 48 ++-- docs/listings/param_deps_list.txt | 14 +- docs/listings/stream4_daint.txt | 169 +++++++------ tools/gendoclistings.py | 184 ++++++++++++-- 13 files changed, 539 insertions(+), 533 deletions(-) diff --git a/docs/listings/alltests_daint.txt b/docs/listings/alltests_daint.txt index f38cc18bcf..48192eadaa 100644 --- a/docs/listings/alltests_daint.txt +++ b/docs/listings/alltests_daint.txt @@ -1,117 +1,105 @@ [ReFrame Setup] - version: 3.10.0-dev.2+b7a6e14a + version: 3.10.0-dev.3+605af31a command: './bin/reframe -c tutorials/basics/ -R -n HelloMultiLangTest|HelloThreadedExtended2Test|StreamWithRefTest --performance-report -r' launched by: user@host - working directory: '/users/user/Devel/reframe' - settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' - check search path: (R) '/users/user/Devel/reframe/tutorials/basics' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: (R) '/home/user/Devel/reframe/tutorials/basics' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' [==========] Running 4 check(s) -[==========] Started on Wed Jan 19 18:20:56 2022 +[==========] Started on Sat Jan 22 22:43:38 2022 -[----------] started processing HelloMultiLangTest_cpp (HelloMultiLangTest %lang=cpp) -[ RUN ] HelloMultiLangTest_cpp on daint:login using builtin -[ RUN ] HelloMultiLangTest_cpp on daint:login using gnu -[ RUN ] HelloMultiLangTest_cpp on daint:login using intel -[ RUN ] HelloMultiLangTest_cpp on daint:login using pgi -[ RUN ] HelloMultiLangTest_cpp on daint:login using cray -[ RUN ] HelloMultiLangTest_cpp on daint:gpu using gnu -[ RUN ] HelloMultiLangTest_cpp on daint:gpu using intel -[ RUN ] HelloMultiLangTest_cpp on daint:gpu using pgi -[ RUN ] HelloMultiLangTest_cpp on daint:gpu using cray -[ RUN ] HelloMultiLangTest_cpp on daint:mc using gnu -[ RUN ] HelloMultiLangTest_cpp on daint:mc using intel -[ RUN ] HelloMultiLangTest_cpp on daint:mc using pgi -[ RUN ] HelloMultiLangTest_cpp on daint:mc using cray -[----------] finished processing HelloMultiLangTest_cpp (HelloMultiLangTest %lang=cpp) - -[----------] started processing HelloMultiLangTest_c (HelloMultiLangTest %lang=c) -[ RUN ] HelloMultiLangTest_c on daint:login using builtin -[ RUN ] HelloMultiLangTest_c on daint:login using gnu -[ RUN ] HelloMultiLangTest_c on daint:login using intel -[ RUN ] HelloMultiLangTest_c on daint:login using pgi -[ RUN ] HelloMultiLangTest_c on daint:login using cray -[ RUN ] HelloMultiLangTest_c on daint:gpu using gnu -[ RUN ] HelloMultiLangTest_c on daint:gpu using intel -[ RUN ] HelloMultiLangTest_c on daint:gpu using pgi -[ RUN ] HelloMultiLangTest_c on daint:gpu using cray -[ RUN ] HelloMultiLangTest_c on daint:mc using gnu -[ RUN ] HelloMultiLangTest_c on daint:mc using intel -[ RUN ] HelloMultiLangTest_c on daint:mc using pgi -[ RUN ] HelloMultiLangTest_c on daint:mc using cray -[----------] finished processing HelloMultiLangTest_c (HelloMultiLangTest %lang=c) - -[----------] started processing HelloThreadedExtended2Test (HelloThreadedExtended2Test) -[ RUN ] HelloThreadedExtended2Test on daint:login using builtin -[ RUN ] HelloThreadedExtended2Test on daint:login using gnu -[ RUN ] HelloThreadedExtended2Test on daint:login using intel -[ RUN ] HelloThreadedExtended2Test on daint:login using pgi -[ RUN ] HelloThreadedExtended2Test on daint:login using cray -[ RUN ] HelloThreadedExtended2Test on daint:gpu using gnu -[ RUN ] HelloThreadedExtended2Test on daint:gpu using intel -[ RUN ] HelloThreadedExtended2Test on daint:gpu using pgi -[ RUN ] HelloThreadedExtended2Test on daint:gpu using cray -[ RUN ] HelloThreadedExtended2Test on daint:mc using gnu -[ RUN ] HelloThreadedExtended2Test on daint:mc using intel -[ RUN ] HelloThreadedExtended2Test on daint:mc using pgi -[ RUN ] HelloThreadedExtended2Test on daint:mc using cray -[----------] finished processing HelloThreadedExtended2Test (HelloThreadedExtended2Test) - -[----------] started processing StreamWithRefTest (StreamWithRefTest) -[ RUN ] StreamWithRefTest on daint:login using gnu -[ RUN ] StreamWithRefTest on daint:gpu using gnu -[ RUN ] StreamWithRefTest on daint:mc using gnu -[----------] finished processing StreamWithRefTest (StreamWithRefTest) - -[----------] waiting for spawned checks to finish -[ OK ] ( 1/42) HelloThreadedExtended2Test @daint:login+cray [compile: 0.869s run: 105.884s total: 106.789s] -[ OK ] ( 2/42) HelloThreadedExtended2Test @daint:login+intel [compile: 2.418s run: 111.171s total: 121.396s] -[ OK ] ( 3/42) HelloMultiLangTest %lang=c @daint:login+cray [compile: 0.194s run: 176.455s total: 176.687s] -[ OK ] ( 4/42) HelloMultiLangTest %lang=cpp @daint:login+pgi [compile: 2.373s run: 238.350s total: 240.758s] -[ OK ] ( 5/42) HelloThreadedExtended2Test @daint:gpu+pgi [compile: 3.084s run: 90.799s total: 93.923s] -[ OK ] ( 6/42) HelloThreadedExtended2Test @daint:gpu+gnu [compile: 2.380s run: 103.562s total: 105.977s] -[ OK ] ( 7/42) HelloMultiLangTest %lang=c @daint:gpu+pgi [compile: 1.614s run: 163.384s total: 165.042s] -[ OK ] ( 8/42) HelloMultiLangTest %lang=c @daint:gpu+gnu [compile: 1.571s run: 174.946s total: 176.552s] -[ OK ] ( 9/42) HelloMultiLangTest %lang=cpp @daint:gpu+intel [compile: 2.124s run: 229.839s total: 231.999s] -[ OK ] (10/42) HelloMultiLangTest %lang=c @daint:mc+pgi [compile: 1.626s run: 136.895s total: 138.567s] -[ OK ] (11/42) HelloMultiLangTest %lang=c @daint:mc+gnu [compile: 1.540s run: 147.386s total: 148.961s] -[ OK ] (12/42) HelloMultiLangTest %lang=cpp @daint:mc+intel [compile: 2.158s run: 209.736s total: 211.936s] -[ OK ] (13/42) HelloThreadedExtended2Test @daint:login+builtin [compile: 0.825s run: 124.587s total: 125.455s] -[ OK ] (14/42) HelloMultiLangTest %lang=c @daint:login+pgi [compile: 1.599s run: 177.615s total: 179.539s] -[ OK ] (15/42) HelloMultiLangTest %lang=c @daint:login+builtin [compile: 0.163s run: 184.760s total: 184.963s] -[ OK ] (16/42) HelloMultiLangTest %lang=cpp @daint:login+gnu [compile: 1.874s run: 244.668s total: 246.575s] -[ OK ] (17/42) HelloMultiLangTest %lang=cpp @daint:gpu+pgi [compile: 2.239s run: 224.340s total: 226.619s] -[ OK ] (18/42) HelloMultiLangTest %lang=cpp @daint:gpu+gnu [compile: 1.884s run: 235.554s total: 237.470s] -[ OK ] (19/42) HelloMultiLangTest %lang=cpp @daint:mc+pgi [compile: 2.121s run: 191.871s total: 194.032s] -[ OK ] (20/42) HelloMultiLangTest %lang=cpp @daint:mc+gnu [compile: 1.788s run: 215.189s total: 217.017s] -[ OK ] (21/42) HelloMultiLangTest %lang=c @daint:login+gnu [compile: 1.571s run: 183.268s total: 184.878s] -[ OK ] (22/42) HelloMultiLangTest %lang=cpp @daint:login+cray [compile: 0.516s run: 237.593s total: 238.140s] -[ OK ] (23/42) HelloMultiLangTest %lang=cpp @daint:login+intel [compile: 2.124s run: 242.000s total: 244.157s] -[ OK ] (24/42) HelloMultiLangTest %lang=cpp @daint:login+builtin [compile: 0.517s run: 246.791s total: 247.343s] -[ OK ] (25/42) HelloThreadedExtended2Test @daint:login+pgi [compile: 3.005s run: 108.904s total: 111.944s] -[ OK ] (26/42) HelloMultiLangTest %lang=c @daint:login+intel [compile: 2.135s run: 181.436s total: 183.606s] -[ OK ] (27/42) HelloThreadedExtended2Test @daint:gpu+cray [compile: 0.886s run: 87.642s total: 88.565s] -[ OK ] (28/42) HelloMultiLangTest %lang=c @daint:gpu+cray [compile: 0.170s run: 153.574s total: 153.780s] -[ OK ] (29/42) HelloMultiLangTest %lang=cpp @daint:gpu+cray [compile: 0.519s run: 220.703s total: 221.262s] -[ OK ] (30/42) HelloMultiLangTest %lang=c @daint:mc+cray [compile: 0.271s run: 133.432s total: 133.746s] -[ OK ] (31/42) HelloMultiLangTest %lang=cpp @daint:mc+cray [compile: 0.504s run: 188.941s total: 189.483s] -[ OK ] (32/42) HelloThreadedExtended2Test @daint:login+gnu [compile: 2.138s run: 124.264s total: 126.440s] -[ OK ] (33/42) HelloMultiLangTest %lang=c @daint:gpu+intel [compile: 2.186s run: 170.732s total: 172.956s] -[ OK ] (34/42) HelloMultiLangTest %lang=c @daint:mc+intel [compile: 2.097s run: 143.858s total: 145.996s] -[ OK ] (35/42) StreamWithRefTest @daint:login+gnu [compile: 1.923s run: 16.452s total: 18.410s] -[ OK ] (36/42) HelloThreadedExtended2Test @daint:gpu+intel [compile: 2.447s run: 99.826s total: 102.307s] -[ OK ] (37/42) HelloThreadedExtended2Test @daint:mc+pgi [compile: 2.723s run: 172.072s total: 174.835s] -[ OK ] (38/42) HelloThreadedExtended2Test @daint:mc+gnu [compile: 2.187s run: 216.338s total: 218.569s] -[ OK ] (39/42) HelloThreadedExtended2Test @daint:mc+intel [compile: 2.351s run: 210.326s total: 212.711s] -[ OK ] (40/42) HelloThreadedExtended2Test @daint:mc+cray [compile: 0.827s run: 243.227s total: 244.100s] -[ OK ] (41/42) StreamWithRefTest @daint:mc+gnu [compile: 1.797s run: 228.275s total: 230.113s] -[ OK ] (42/42) StreamWithRefTest @daint:gpu+gnu [compile: 1.887s run: 400.080s total: 402.012s] +[----------] start processing checks +[ RUN ] HelloMultiLangTest %lang=cpp @daint:login+builtin +[ RUN ] HelloMultiLangTest %lang=cpp @daint:login+gnu +[ RUN ] HelloMultiLangTest %lang=cpp @daint:login+intel +[ RUN ] HelloMultiLangTest %lang=cpp @daint:login+pgi +[ RUN ] HelloMultiLangTest %lang=cpp @daint:login+cray +[ RUN ] HelloMultiLangTest %lang=cpp @daint:gpu+gnu +[ RUN ] HelloMultiLangTest %lang=cpp @daint:gpu+intel +[ RUN ] HelloMultiLangTest %lang=cpp @daint:gpu+pgi +[ RUN ] HelloMultiLangTest %lang=cpp @daint:gpu+cray +[ RUN ] HelloMultiLangTest %lang=cpp @daint:mc+gnu +[ RUN ] HelloMultiLangTest %lang=cpp @daint:mc+intel +[ RUN ] HelloMultiLangTest %lang=cpp @daint:mc+pgi +[ RUN ] HelloMultiLangTest %lang=cpp @daint:mc+cray +[ RUN ] HelloMultiLangTest %lang=c @daint:login+builtin +[ RUN ] HelloMultiLangTest %lang=c @daint:login+gnu +[ RUN ] HelloMultiLangTest %lang=c @daint:login+intel +[ RUN ] HelloMultiLangTest %lang=c @daint:login+pgi +[ RUN ] HelloMultiLangTest %lang=c @daint:login+cray +[ RUN ] HelloMultiLangTest %lang=c @daint:gpu+gnu +[ RUN ] HelloMultiLangTest %lang=c @daint:gpu+intel +[ RUN ] HelloMultiLangTest %lang=c @daint:gpu+pgi +[ RUN ] HelloMultiLangTest %lang=c @daint:gpu+cray +[ RUN ] HelloMultiLangTest %lang=c @daint:mc+gnu +[ RUN ] HelloMultiLangTest %lang=c @daint:mc+intel +[ RUN ] HelloMultiLangTest %lang=c @daint:mc+pgi +[ RUN ] HelloMultiLangTest %lang=c @daint:mc+cray +[ RUN ] HelloThreadedExtended2Test @daint:login+builtin +[ RUN ] HelloThreadedExtended2Test @daint:login+gnu +[ RUN ] HelloThreadedExtended2Test @daint:login+intel +[ RUN ] HelloThreadedExtended2Test @daint:login+pgi +[ RUN ] HelloThreadedExtended2Test @daint:login+cray +[ RUN ] HelloThreadedExtended2Test @daint:gpu+gnu +[ RUN ] HelloThreadedExtended2Test @daint:gpu+intel +[ RUN ] HelloThreadedExtended2Test @daint:gpu+pgi +[ RUN ] HelloThreadedExtended2Test @daint:gpu+cray +[ RUN ] HelloThreadedExtended2Test @daint:mc+gnu +[ RUN ] HelloThreadedExtended2Test @daint:mc+intel +[ RUN ] HelloThreadedExtended2Test @daint:mc+pgi +[ RUN ] HelloThreadedExtended2Test @daint:mc+cray +[ RUN ] StreamWithRefTest @daint:login+gnu +[ RUN ] StreamWithRefTest @daint:gpu+gnu +[ RUN ] StreamWithRefTest @daint:mc+gnu +[ OK ] ( 1/42) HelloMultiLangTest %lang=cpp @daint:login+builtin [compile: 4.053s run: 36.016s total: 43.208s] +[ OK ] ( 2/42) HelloMultiLangTest %lang=cpp @daint:login+gnu [compile: 4.047s run: 36.009s total: 43.203s] +[ OK ] ( 3/42) HelloMultiLangTest %lang=cpp @daint:login+intel [compile: 3.431s run: 35.376s total: 43.206s] +[ OK ] ( 4/42) HelloMultiLangTest %lang=cpp @daint:login+pgi [compile: 2.758s run: 34.675s total: 43.208s] +[ OK ] ( 5/42) HelloMultiLangTest %lang=cpp @daint:login+cray [compile: 2.149s run: 34.052s total: 43.211s] +[ OK ] ( 6/42) HelloMultiLangTest %lang=cpp @daint:gpu+gnu [compile: 2.139s run: 60.830s total: 69.995s] +[ OK ] ( 7/42) HelloMultiLangTest %lang=cpp @daint:gpu+intel [compile: 8.863s run: 55.184s total: 70.004s] +[ OK ] ( 8/42) HelloMultiLangTest %lang=c @daint:login+builtin [compile: 32.460s run: 18.053s total: 69.949s] +[ OK ] ( 9/42) HelloMultiLangTest %lang=c @daint:login+gnu [compile: 27.081s run: 18.051s total: 69.954s] +[ OK ] (10/42) HelloMultiLangTest %lang=c @daint:login+intel [compile: 39.615s run: 32.065s total: 87.922s] +[ OK ] (11/42) HelloMultiLangTest %lang=c @daint:login+pgi [compile: 38.873s run: 31.356s total: 87.926s] +[ OK ] (12/42) HelloMultiLangTest %lang=c @daint:login+cray [compile: 38.265s run: 30.731s total: 87.931s] +[ OK ] (13/42) HelloThreadedExtended2Test @daint:login+builtin [compile: 12.837s run: 7.254s total: 92.404s] +[ OK ] (14/42) HelloThreadedExtended2Test @daint:login+gnu [compile: 31.377s run: 31.894s total: 119.747s] +[ OK ] (15/42) HelloThreadedExtended2Test @daint:login+intel [compile: 30.708s run: 31.252s total: 119.749s] +[ OK ] (16/42) HelloThreadedExtended2Test @daint:login+pgi [compile: 18.581s run: 30.571s total: 119.753s] +[ OK ] (17/42) HelloThreadedExtended2Test @daint:login+cray [compile: 17.981s run: 29.963s total: 119.756s] +[ OK ] (18/42) HelloMultiLangTest %lang=cpp @daint:mc+intel [compile: 33.792s run: 87.427s total: 130.572s] +[ OK ] (19/42) HelloMultiLangTest %lang=cpp @daint:mc+pgi [compile: 33.120s run: 84.192s total: 130.591s] +[ OK ] (20/42) HelloMultiLangTest %lang=cpp @daint:mc+cray [compile: 32.474s run: 81.119s total: 130.609s] +[ OK ] (21/42) HelloMultiLangTest %lang=c @daint:mc+pgi [compile: 13.468s run: 51.389s total: 130.540s] +[ OK ] (22/42) HelloMultiLangTest %lang=c @daint:mc+cray [compile: 12.847s run: 48.146s total: 130.559s] +[ OK ] (23/42) HelloMultiLangTest %lang=cpp @daint:gpu+pgi [compile: 8.167s run: 120.870s total: 138.874s] +[ OK ] (24/42) HelloMultiLangTest %lang=cpp @daint:gpu+cray [compile: 7.412s run: 109.470s total: 138.883s] +[ OK ] (25/42) HelloMultiLangTest %lang=c @daint:gpu+gnu [compile: 13.293s run: 81.519s total: 138.729s] +[ OK ] (26/42) HelloMultiLangTest %lang=c @daint:gpu+cray [compile: 11.378s run: 74.651s total: 138.736s] +[ OK ] (27/42) HelloMultiLangTest %lang=c @daint:mc+gnu [compile: 25.399s run: 65.789s total: 138.749s] +[ OK ] (28/42) HelloMultiLangTest %lang=c @daint:gpu+intel [compile: 12.677s run: 79.097s total: 139.421s] +[ OK ] (29/42) HelloMultiLangTest %lang=c @daint:gpu+pgi [compile: 23.579s run: 69.505s total: 139.432s] +[ OK ] (30/42) HelloThreadedExtended2Test @daint:gpu+gnu [compile: 22.616s run: 46.878s total: 139.268s] +[ OK ] (31/42) HelloThreadedExtended2Test @daint:gpu+pgi [compile: 21.265s run: 40.181s total: 139.267s] +[ OK ] (32/42) HelloThreadedExtended2Test @daint:gpu+cray [compile: 20.642s run: 37.158s total: 139.275s] +[ OK ] (33/42) HelloThreadedExtended2Test @daint:mc+gnu [compile: 4.691s run: 30.273s total: 139.280s] +[ OK ] (34/42) HelloThreadedExtended2Test @daint:mc+intel [compile: 28.304s run: 19.597s total: 139.281s] +[ OK ] (35/42) StreamWithRefTest @daint:login+gnu [compile: 24.257s run: 10.594s total: 139.286s] +[ OK ] (36/42) HelloMultiLangTest %lang=c @daint:mc+intel [compile: 14.135s run: 70.976s total: 146.961s] +[ OK ] (37/42) HelloMultiLangTest %lang=cpp @daint:mc+gnu [compile: 7.397s run: 194.065s total: 229.737s] +[ OK ] (38/42) HelloThreadedExtended2Test @daint:gpu+intel [compile: 21.956s run: 133.885s total: 229.342s] +[ OK ] (39/42) HelloThreadedExtended2Test @daint:mc+pgi [compile: 27.596s run: 106.403s total: 229.264s] +[ OK ] (40/42) HelloThreadedExtended2Test @daint:mc+cray [compile: 26.958s run: 103.318s total: 229.274s] +[ OK ] (41/42) StreamWithRefTest @daint:gpu+gnu [compile: 38.940s run: 98.873s total: 229.279s] +[ OK ] (42/42) StreamWithRefTest @daint:mc+gnu [compile: 38.304s run: 94.811s total: 229.299s] [----------] all spawned checks have finished [ PASSED ] Ran 42/42 test case(s) from 4 check(s) (0 failure(s), 0 skipped) -[==========] Finished on Wed Jan 19 18:31:31 2022 +[==========] Finished on Sat Jan 22 22:47:28 2022 ============================================================================== PERFORMANCE REPORT ------------------------------------------------------------------------------ @@ -119,24 +107,24 @@ StreamWithRefTest - daint:login - gnu * num_tasks: 1 - * Copy: 65583.2 MB/s - * Scale: 36908.7 MB/s - * Add: 38514.7 MB/s - * Triad: 38648.2 MB/s + * Copy: 67915.3 MB/s + * Scale: 37485.6 MB/s + * Add: 39545.5 MB/s + * Triad: 39906.2 MB/s - daint:gpu - gnu * num_tasks: 1 - * Copy: 50946.8 MB/s - * Scale: 35096.1 MB/s - * Add: 38841.5 MB/s - * Triad: 38729.8 MB/s + * Copy: 50553.4 MB/s + * Scale: 34780.1 MB/s + * Add: 38043.6 MB/s + * Triad: 38522.2 MB/s - daint:mc - gnu * num_tasks: 1 - * Copy: 48686.5 MB/s - * Scale: 31394.5 MB/s - * Add: 33423.7 MB/s - * Triad: 33520.9 MB/s + * Copy: 48200.9 MB/s + * Scale: 31370.4 MB/s + * Add: 33000.2 MB/s + * Triad: 33205.5 MB/s ------------------------------------------------------------------------------ -Run report saved in '/users/user/.reframe/reports/run-report.json' -Log file(s) saved in '/tmp/rfm-63nptdxz.log' +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/tmp/rfm-n3d18lq9.log' diff --git a/docs/listings/osu_bandwidth_concretized_daint.txt b/docs/listings/osu_bandwidth_concretized_daint.txt index 050889e064..de00dc2e9f 100644 --- a/docs/listings/osu_bandwidth_concretized_daint.txt +++ b/docs/listings/osu_bandwidth_concretized_daint.txt @@ -1,12 +1,12 @@ [ReFrame Setup] - version: 3.10.0-dev.2+1e1561f7 + version: 3.10.0-dev.3+605af31a command: './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC' launched by: user@host - working directory: '/users/user/Devel/reframe' - settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' [List of matched checks] - osu_bandwidth_test @daint:gpu+gnu @@ -20,4 +20,4 @@ ^fetch_osu_benchmarks ~daint @daint:gpu+gnu Concretized 7 test case(s) -Log file(s) saved in '/tmp/rfm-sew_xghv.log' +Log file(s) saved in '/tmp/rfm-uza91jj1.log' diff --git a/docs/listings/osu_bandwidth_concretized_daint_pgi.txt b/docs/listings/osu_bandwidth_concretized_daint_pgi.txt index e762da3765..ab6c51f915 100644 --- a/docs/listings/osu_bandwidth_concretized_daint_pgi.txt +++ b/docs/listings/osu_bandwidth_concretized_daint_pgi.txt @@ -1,12 +1,12 @@ [ReFrame Setup] - version: 3.10.0-dev.2+1e1561f7 + version: 3.10.0-dev.3+605af31a command: './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC -p pgi' launched by: user@host - working directory: '/users/user/Devel/reframe' - settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' [List of matched checks] - osu_bandwidth_test @daint:gpu+pgi @@ -14,4 +14,4 @@ ^fetch_osu_benchmarks ~daint @daint:gpu+pgi Concretized 3 test case(s) -Log file(s) saved in '/tmp/rfm-6cutxv8s.log' +Log file(s) saved in '/tmp/rfm-dnfdagj8.log' diff --git a/docs/listings/osu_bench_deps.txt b/docs/listings/osu_bench_deps.txt index 1c4ea1be94..4f54bba5a8 100644 --- a/docs/listings/osu_bench_deps.txt +++ b/docs/listings/osu_bench_deps.txt @@ -1,109 +1,64 @@ [ReFrame Setup] - version: 3.10.0-dev.2+e8bdbaaa + version: 3.10.0-dev.3+605af31a command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -r' launched by: user@host - working directory: '/users/user/Devel/reframe' - settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' [==========] Running 8 check(s) -[==========] Started on Wed Jan 19 22:01:19 2022 +[==========] Started on Sat Jan 22 22:49:00 2022 -[----------] started processing OSUDownloadTest (OSU benchmarks download sources) -[ RUN ] OSUDownloadTest on daint:login using builtin -[----------] finished processing OSUDownloadTest (OSU benchmarks download sources) - -[----------] started processing OSUBuildTest (OSU benchmarks build test) -[ RUN ] OSUBuildTest on daint:gpu using gnu -[ DEP ] OSUBuildTest on daint:gpu using gnu -[ RUN ] OSUBuildTest on daint:gpu using intel -[ DEP ] OSUBuildTest on daint:gpu using intel -[ RUN ] OSUBuildTest on daint:gpu using pgi -[ DEP ] OSUBuildTest on daint:gpu using pgi -[----------] finished processing OSUBuildTest (OSU benchmarks build test) - -[----------] started processing OSUAllreduceTest_16 (OSU Allreduce test) -[ RUN ] OSUAllreduceTest_16 on daint:gpu using gnu -[ DEP ] OSUAllreduceTest_16 on daint:gpu using gnu -[ RUN ] OSUAllreduceTest_16 on daint:gpu using intel -[ DEP ] OSUAllreduceTest_16 on daint:gpu using intel -[ RUN ] OSUAllreduceTest_16 on daint:gpu using pgi -[ DEP ] OSUAllreduceTest_16 on daint:gpu using pgi -[----------] finished processing OSUAllreduceTest_16 (OSU Allreduce test) - -[----------] started processing OSUAllreduceTest_8 (OSU Allreduce test) -[ RUN ] OSUAllreduceTest_8 on daint:gpu using gnu -[ DEP ] OSUAllreduceTest_8 on daint:gpu using gnu -[ RUN ] OSUAllreduceTest_8 on daint:gpu using intel -[ DEP ] OSUAllreduceTest_8 on daint:gpu using intel -[ RUN ] OSUAllreduceTest_8 on daint:gpu using pgi -[ DEP ] OSUAllreduceTest_8 on daint:gpu using pgi -[----------] finished processing OSUAllreduceTest_8 (OSU Allreduce test) - -[----------] started processing OSUAllreduceTest_4 (OSU Allreduce test) -[ RUN ] OSUAllreduceTest_4 on daint:gpu using gnu -[ DEP ] OSUAllreduceTest_4 on daint:gpu using gnu -[ RUN ] OSUAllreduceTest_4 on daint:gpu using intel -[ DEP ] OSUAllreduceTest_4 on daint:gpu using intel -[ RUN ] OSUAllreduceTest_4 on daint:gpu using pgi -[ DEP ] OSUAllreduceTest_4 on daint:gpu using pgi -[----------] finished processing OSUAllreduceTest_4 (OSU Allreduce test) - -[----------] started processing OSUAllreduceTest_2 (OSU Allreduce test) -[ RUN ] OSUAllreduceTest_2 on daint:gpu using gnu -[ DEP ] OSUAllreduceTest_2 on daint:gpu using gnu -[ RUN ] OSUAllreduceTest_2 on daint:gpu using intel -[ DEP ] OSUAllreduceTest_2 on daint:gpu using intel -[ RUN ] OSUAllreduceTest_2 on daint:gpu using pgi -[ DEP ] OSUAllreduceTest_2 on daint:gpu using pgi -[----------] finished processing OSUAllreduceTest_2 (OSU Allreduce test) - -[----------] started processing OSUBandwidthTest (OSU bandwidth test) -[ RUN ] OSUBandwidthTest on daint:gpu using gnu -[ DEP ] OSUBandwidthTest on daint:gpu using gnu -[ RUN ] OSUBandwidthTest on daint:gpu using intel -[ DEP ] OSUBandwidthTest on daint:gpu using intel -[ RUN ] OSUBandwidthTest on daint:gpu using pgi -[ DEP ] OSUBandwidthTest on daint:gpu using pgi -[----------] finished processing OSUBandwidthTest (OSU bandwidth test) - -[----------] started processing OSULatencyTest (OSU latency test) -[ RUN ] OSULatencyTest on daint:gpu using gnu -[ DEP ] OSULatencyTest on daint:gpu using gnu -[ RUN ] OSULatencyTest on daint:gpu using intel -[ DEP ] OSULatencyTest on daint:gpu using intel -[ RUN ] OSULatencyTest on daint:gpu using pgi -[ DEP ] OSULatencyTest on daint:gpu using pgi -[----------] finished processing OSULatencyTest (OSU latency test) - -[----------] waiting for spawned checks to finish -[ OK ] ( 1/22) OSUDownloadTest @daint:login+builtin [compile: 0.008s run: 1.125s total: 1.159s] -[ OK ] ( 2/22) OSUBuildTest @daint:gpu+gnu [compile: 25.387s run: 0.052s total: 99.859s] -[ OK ] ( 3/22) OSUBuildTest @daint:gpu+pgi [compile: 31.630s run: 67.980s total: 99.737s] -[ OK ] ( 4/22) OSUAllreduceTest %mpi_tasks=2 @daint:gpu+pgi [compile: 0.009s run: 34.229s total: 48.473s] -[ OK ] ( 5/22) OSULatencyTest @daint:gpu+gnu [compile: 0.009s run: 44.246s total: 48.462s] -[ OK ] ( 6/22) OSUBuildTest @daint:gpu+intel [compile: 42.458s run: 74.164s total: 148.541s] -[ OK ] ( 7/22) OSULatencyTest @daint:gpu+intel [compile: 0.009s run: 26.194s total: 26.229s] -[ OK ] ( 8/22) OSUAllreduceTest %mpi_tasks=8 @daint:gpu+gnu [compile: 0.009s run: 40.997s total: 75.008s] -[ OK ] ( 9/22) OSUAllreduceTest %mpi_tasks=4 @daint:gpu+gnu [compile: 0.009s run: 48.053s total: 75.012s] -[ OK ] (10/22) OSUAllreduceTest %mpi_tasks=2 @daint:gpu+gnu [compile: 0.009s run: 57.616s total: 75.014s] -[ OK ] (11/22) OSULatencyTest @daint:gpu+pgi [compile: 0.009s run: 74.928s total: 74.963s] -[ OK ] (12/22) OSUAllreduceTest %mpi_tasks=2 @daint:gpu+intel [compile: 0.012s run: 20.491s total: 26.871s] -[ OK ] (13/22) OSUAllreduceTest %mpi_tasks=16 @daint:gpu+pgi [compile: 0.009s run: 38.207s total: 75.629s] -[ OK ] (14/22) OSUAllreduceTest %mpi_tasks=4 @daint:gpu+pgi [compile: 0.009s run: 52.350s total: 75.599s] -[ OK ] (15/22) OSUAllreduceTest %mpi_tasks=8 @daint:gpu+pgi [compile: 0.011s run: 45.749s total: 75.990s] -[ OK ] (16/22) OSUAllreduceTest %mpi_tasks=16 @daint:gpu+gnu [compile: 0.010s run: 34.136s total: 76.337s] -[ OK ] (17/22) OSUBandwidthTest @daint:gpu+pgi [compile: 0.009s run: 84.226s total: 91.675s] -[ OK ] (18/22) OSUBandwidthTest @daint:gpu+gnu [compile: 0.009s run: 81.048s total: 92.037s] -[ OK ] (19/22) OSUAllreduceTest %mpi_tasks=8 @daint:gpu+intel [compile: 0.009s run: 33.734s total: 50.373s] -[ OK ] (20/22) OSUAllreduceTest %mpi_tasks=4 @daint:gpu+intel [compile: 0.010s run: 39.285s total: 50.622s] -[ OK ] (21/22) OSUAllreduceTest %mpi_tasks=16 @daint:gpu+intel [compile: 0.009s run: 30.307s total: 50.925s] -[ OK ] (22/22) OSUBandwidthTest @daint:gpu+intel [compile: 0.009s run: 82.258s total: 85.500s] +[----------] start processing checks +[ RUN ] OSUDownloadTest @daint:login+builtin +[ OK ] ( 1/22) OSUDownloadTest @daint:login+builtin [compile: 0.017s run: 1.547s total: 1.594s] +[ RUN ] OSUBuildTest @daint:gpu+gnu +[ RUN ] OSUBuildTest @daint:gpu+intel +[ RUN ] OSUBuildTest @daint:gpu+pgi +[ OK ] ( 2/22) OSUBuildTest @daint:gpu+gnu [compile: 28.351s run: 2.614s total: 31.045s] +[ RUN ] OSUAllreduceTest %mpi_tasks=16 @daint:gpu+gnu +[ RUN ] OSUAllreduceTest %mpi_tasks=8 @daint:gpu+gnu +[ RUN ] OSUAllreduceTest %mpi_tasks=4 @daint:gpu+gnu +[ RUN ] OSUAllreduceTest %mpi_tasks=2 @daint:gpu+gnu +[ RUN ] OSUBandwidthTest @daint:gpu+gnu +[ RUN ] OSULatencyTest @daint:gpu+gnu +[ OK ] ( 3/22) OSUBuildTest @daint:gpu+intel [compile: 56.259s run: 0.294s total: 57.548s] +[ OK ] ( 4/22) OSUBuildTest @daint:gpu+pgi [compile: 55.287s run: 0.274s total: 57.549s] +[ RUN ] OSUAllreduceTest %mpi_tasks=16 @daint:gpu+intel +[ RUN ] OSUAllreduceTest %mpi_tasks=16 @daint:gpu+pgi +[ RUN ] OSUAllreduceTest %mpi_tasks=8 @daint:gpu+intel +[ RUN ] OSUAllreduceTest %mpi_tasks=8 @daint:gpu+pgi +[ RUN ] OSUAllreduceTest %mpi_tasks=4 @daint:gpu+intel +[ RUN ] OSUAllreduceTest %mpi_tasks=4 @daint:gpu+pgi +[ RUN ] OSUAllreduceTest %mpi_tasks=2 @daint:gpu+intel +[ RUN ] OSUAllreduceTest %mpi_tasks=2 @daint:gpu+pgi +[ RUN ] OSUBandwidthTest @daint:gpu+intel +[ RUN ] OSUBandwidthTest @daint:gpu+pgi +[ RUN ] OSULatencyTest @daint:gpu+intel +[ RUN ] OSULatencyTest @daint:gpu+pgi +[ OK ] ( 5/22) OSUAllreduceTest %mpi_tasks=8 @daint:gpu+gnu [compile: 0.019s run: 62.714s total: 66.672s] +[ OK ] ( 6/22) OSUAllreduceTest %mpi_tasks=16 @daint:gpu+gnu [compile: 0.021s run: 66.653s total: 67.092s] +[ OK ] ( 7/22) OSUAllreduceTest %mpi_tasks=4 @daint:gpu+gnu [compile: 0.019s run: 59.875s total: 67.058s] +[ OK ] ( 8/22) OSULatencyTest @daint:gpu+gnu [compile: 0.022s run: 81.297s total: 102.720s] +[ OK ] ( 9/22) OSUAllreduceTest %mpi_tasks=2 @daint:gpu+gnu [compile: 0.023s run: 97.213s total: 107.661s] +[ OK ] (10/22) OSUAllreduceTest %mpi_tasks=16 @daint:gpu+intel [compile: 0.017s run: 80.743s total: 81.586s] +[ OK ] (11/22) OSUAllreduceTest %mpi_tasks=16 @daint:gpu+pgi [compile: 0.017s run: 141.746s total: 145.957s] +[ OK ] (12/22) OSUAllreduceTest %mpi_tasks=8 @daint:gpu+intel [compile: 0.016s run: 138.667s total: 145.944s] +[ OK ] (13/22) OSUAllreduceTest %mpi_tasks=8 @daint:gpu+pgi [compile: 0.017s run: 135.257s total: 145.938s] +[ OK ] (14/22) OSUBandwidthTest @daint:gpu+gnu [compile: 0.034s run: 156.112s total: 172.474s] +[ OK ] (15/22) OSUAllreduceTest %mpi_tasks=4 @daint:gpu+intel [compile: 0.017s run: 173.876s total: 187.629s] +[ OK ] (16/22) OSUAllreduceTest %mpi_tasks=2 @daint:gpu+pgi [compile: 0.016s run: 171.544s total: 194.752s] +[ OK ] (17/22) OSUAllreduceTest %mpi_tasks=2 @daint:gpu+intel [compile: 0.017s run: 175.095s total: 195.082s] +[ OK ] (18/22) OSULatencyTest @daint:gpu+pgi [compile: 0.017s run: 159.422s total: 195.672s] +[ OK ] (19/22) OSULatencyTest @daint:gpu+intel [compile: 0.017s run: 163.070s total: 196.207s] +[ OK ] (20/22) OSUAllreduceTest %mpi_tasks=4 @daint:gpu+pgi [compile: 0.016s run: 180.370s total: 197.379s] +[ OK ] (21/22) OSUBandwidthTest @daint:gpu+intel [compile: 0.017s run: 240.385s total: 266.772s] +[ OK ] (22/22) OSUBandwidthTest @daint:gpu+pgi [compile: 0.018s run: 236.944s total: 266.766s] [----------] all spawned checks have finished [ PASSED ] Ran 22/22 test case(s) from 8 check(s) (0 failure(s), 0 skipped) -[==========] Finished on Wed Jan 19 22:05:15 2022 -Run report saved in '/users/user/.reframe/reports/run-report.json' -Log file(s) saved in '/tmp/rfm-8xfehbvy.log' +[==========] Finished on Sat Jan 22 22:54:26 2022 +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/tmp/rfm-15ghvao1.log' diff --git a/docs/listings/osu_bench_fixtures_list.txt b/docs/listings/osu_bench_fixtures_list.txt index cfafada8f6..c863cdcc63 100644 --- a/docs/listings/osu_bench_fixtures_list.txt +++ b/docs/listings/osu_bench_fixtures_list.txt @@ -1,12 +1,12 @@ [ReFrame Setup] - version: 3.10.0-dev.2+1e1561f7 + version: 3.10.0-dev.3+605af31a command: './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -l' launched by: user@host - working directory: '/users/user/Devel/reframe' - settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' [List of matched checks] - osu_allreduce_test %mpi_tasks=16 @@ -53,4 +53,4 @@ ^fetch_osu_benchmarks ~daint Found 6 check(s) -Log file(s) saved in '/tmp/rfm-31ywvi49.log' +Log file(s) saved in '/tmp/rfm-eopdze64.log' diff --git a/docs/listings/osu_bench_fixtures_run.txt b/docs/listings/osu_bench_fixtures_run.txt index e514854885..694cb29dd6 100644 --- a/docs/listings/osu_bench_fixtures_run.txt +++ b/docs/listings/osu_bench_fixtures_run.txt @@ -1,115 +1,64 @@ [ReFrame Setup] - version: 3.10.0-dev.2+1e1561f7 + version: 3.10.0-dev.3+605af31a command: './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -r' launched by: user@host - working directory: '/users/user/Devel/reframe' - settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' [==========] Running 10 check(s) -[==========] Started on Thu Jan 20 11:25:22 2022 +[==========] Started on Sat Jan 22 22:54:32 2022 -[----------] started processing fetch_osu_benchmarks_ba14252c (Fetch OSU benchmarks) -[ RUN  ] fetch_osu_benchmarks_ba14252c on daint:gpu using gnu -[----------] finished processing fetch_osu_benchmarks_ba14252c (Fetch OSU benchmarks) - -[----------] started processing build_osu_benchmarks_01b07297 (Build OSU benchmarks) -[ RUN  ] build_osu_benchmarks_01b07297 on daint:gpu using gnu -[  DEP ] build_osu_benchmarks_01b07297 on daint:gpu using gnu -[----------] finished processing build_osu_benchmarks_01b07297 (Build OSU benchmarks) - -[----------] started processing build_osu_benchmarks_90e14d9d (Build OSU benchmarks) -[ RUN  ] build_osu_benchmarks_90e14d9d on daint:gpu using intel -[  DEP ] build_osu_benchmarks_90e14d9d on daint:gpu using intel -[----------] finished processing build_osu_benchmarks_90e14d9d (Build OSU benchmarks) - -[----------] started processing build_osu_benchmarks_845fc6e3 (Build OSU benchmarks) -[ RUN  ] build_osu_benchmarks_845fc6e3 on daint:gpu using pgi -[  DEP ] build_osu_benchmarks_845fc6e3 on daint:gpu using pgi -[----------] finished processing build_osu_benchmarks_845fc6e3 (Build OSU benchmarks) - -[----------] started processing osu_allreduce_test_16 (OSU Allreduce test) -[ RUN  ] osu_allreduce_test_16 on daint:gpu using gnu -[  DEP ] osu_allreduce_test_16 on daint:gpu using gnu -[ RUN  ] osu_allreduce_test_16 on daint:gpu using intel -[  DEP ] osu_allreduce_test_16 on daint:gpu using intel -[ RUN  ] osu_allreduce_test_16 on daint:gpu using pgi -[  DEP ] osu_allreduce_test_16 on daint:gpu using pgi -[----------] finished processing osu_allreduce_test_16 (OSU Allreduce test) - -[----------] started processing osu_allreduce_test_8 (OSU Allreduce test) -[ RUN  ] osu_allreduce_test_8 on daint:gpu using gnu -[  DEP ] osu_allreduce_test_8 on daint:gpu using gnu -[ RUN  ] osu_allreduce_test_8 on daint:gpu using intel -[  DEP ] osu_allreduce_test_8 on daint:gpu using intel -[ RUN  ] osu_allreduce_test_8 on daint:gpu using pgi -[  DEP ] osu_allreduce_test_8 on daint:gpu using pgi -[----------] finished processing osu_allreduce_test_8 (OSU Allreduce test) - -[----------] started processing osu_allreduce_test_4 (OSU Allreduce test) -[ RUN  ] osu_allreduce_test_4 on daint:gpu using gnu -[  DEP ] osu_allreduce_test_4 on daint:gpu using gnu -[ RUN  ] osu_allreduce_test_4 on daint:gpu using intel -[  DEP ] osu_allreduce_test_4 on daint:gpu using intel -[ RUN  ] osu_allreduce_test_4 on daint:gpu using pgi -[  DEP ] osu_allreduce_test_4 on daint:gpu using pgi -[----------] finished processing osu_allreduce_test_4 (OSU Allreduce test) - -[----------] started processing osu_allreduce_test_2 (OSU Allreduce test) -[ RUN  ] osu_allreduce_test_2 on daint:gpu using gnu -[  DEP ] osu_allreduce_test_2 on daint:gpu using gnu -[ RUN  ] osu_allreduce_test_2 on daint:gpu using intel -[  DEP ] osu_allreduce_test_2 on daint:gpu using intel -[ RUN  ] osu_allreduce_test_2 on daint:gpu using pgi -[  DEP ] osu_allreduce_test_2 on daint:gpu using pgi -[----------] finished processing osu_allreduce_test_2 (OSU Allreduce test) - -[----------] started processing osu_bandwidth_test (OSU bandwidth test) -[ RUN  ] osu_bandwidth_test on daint:gpu using gnu -[  DEP ] osu_bandwidth_test on daint:gpu using gnu -[ RUN  ] osu_bandwidth_test on daint:gpu using intel -[  DEP ] osu_bandwidth_test on daint:gpu using intel -[ RUN  ] osu_bandwidth_test on daint:gpu using pgi -[  DEP ] osu_bandwidth_test on daint:gpu using pgi -[----------] finished processing osu_bandwidth_test (OSU bandwidth test) - -[----------] started processing osu_latency_test (OSU latency test) -[ RUN  ] osu_latency_test on daint:gpu using gnu -[  DEP ] osu_latency_test on daint:gpu using gnu -[ RUN  ] osu_latency_test on daint:gpu using intel -[  DEP ] osu_latency_test on daint:gpu using intel -[ RUN  ] osu_latency_test on daint:gpu using pgi -[  DEP ] osu_latency_test on daint:gpu using pgi -[----------] finished processing osu_latency_test (OSU latency test) - -[----------] waiting for spawned checks to finish -[  OK ] ( 1/22) fetch_osu_benchmarks ~daint @daint:gpu+gnu [compile: 0.008s run: 3.024s total: 3.064s] -[  OK ] ( 2/22) build_osu_benchmarks ~daint:gpu+gnu @daint:gpu+gnu [compile: 26.745s run: 0.061s total: 127.595s] -[  OK ] ( 3/22) build_osu_benchmarks ~daint:gpu+pgi @daint:gpu+pgi [compile: 35.453s run: 92.081s total: 127.581s] -[  OK ] ( 4/22) build_osu_benchmarks ~daint:gpu+intel @daint:gpu+intel [compile: 65.206s run: 80.848s total: 181.596s] -[  OK ] ( 5/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+pgi [compile: 0.011s run: 60.059s total: 85.771s] -[  OK ] ( 6/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+pgi [compile: 0.012s run: 66.442s total: 85.783s] -[  OK ] ( 7/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+pgi [compile: 0.012s run: 72.865s total: 85.793s] -[  OK ] ( 8/22) osu_latency_test @daint:gpu+pgi [compile: 0.012s run: 85.720s total: 85.763s] -[  OK ] ( 9/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+gnu [compile: 0.011s run: 57.628s total: 86.430s] -[  OK ] (10/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+gnu [compile: 0.013s run: 70.200s total: 86.406s] -[  OK ] (11/22) osu_latency_test @daint:gpu+gnu [compile: 0.011s run: 83.179s total: 86.381s] -[  OK ] (12/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+gnu [compile: 0.011s run: 64.432s total: 86.937s] -[  OK ] (13/22) osu_bandwidth_test @daint:gpu+gnu [compile: 0.012s run: 122.812s total: 132.304s] -[  OK ] (14/22) osu_bandwidth_test @daint:gpu+pgi [compile: 0.011s run: 130.589s total: 136.884s] -[  OK ] (15/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+intel [compile: 0.012s run: 136.997s total: 155.368s] -[  OK ] (16/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+intel [compile: 0.011s run: 132.810s total: 157.931s] -[  OK ] (17/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+pgi [compile: 0.011s run: 180.444s total: 212.537s] -[  OK ] (18/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+gnu [compile: 0.013s run: 177.875s total: 213.031s] -[  OK ] (19/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+intel [compile: 0.013s run: 131.221s total: 159.503s] -[  OK ] (20/22) osu_latency_test @daint:gpu+intel [compile: 0.010s run: 159.878s total: 159.917s] -[  OK ] (21/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+intel [compile: 0.011s run: 139.273s total: 160.941s] -[  OK ] (22/22) osu_bandwidth_test @daint:gpu+intel [compile: 0.012s run: 205.774s total: 220.374s] +[----------] start processing checks +[ RUN ] fetch_osu_benchmarks ~daint @daint:gpu+gnu +[ OK ] ( 1/22) fetch_osu_benchmarks ~daint @daint:gpu+gnu [compile: 0.019s run: 2.664s total: 2.716s] +[ RUN ] build_osu_benchmarks ~daint:gpu+gnu @daint:gpu+gnu +[ RUN ] build_osu_benchmarks ~daint:gpu+intel @daint:gpu+intel +[ RUN ] build_osu_benchmarks ~daint:gpu+pgi @daint:gpu+pgi +[ OK ] ( 2/22) build_osu_benchmarks ~daint:gpu+gnu @daint:gpu+gnu [compile: 30.544s run: 2.874s total: 33.480s] +[ RUN ] osu_allreduce_test %mpi_tasks=16 @daint:gpu+gnu +[ RUN ] osu_allreduce_test %mpi_tasks=8 @daint:gpu+gnu +[ RUN ] osu_allreduce_test %mpi_tasks=4 @daint:gpu+gnu +[ RUN ] osu_allreduce_test %mpi_tasks=2 @daint:gpu+gnu +[ RUN ] osu_bandwidth_test @daint:gpu+gnu +[ RUN ] osu_latency_test @daint:gpu+gnu +[ OK ] ( 3/22) build_osu_benchmarks ~daint:gpu+intel @daint:gpu+intel [compile: 55.178s run: 0.340s total: 56.194s] +[ OK ] ( 4/22) build_osu_benchmarks ~daint:gpu+pgi @daint:gpu+pgi [compile: 54.528s run: 0.299s total: 56.200s] +[ RUN ] osu_allreduce_test %mpi_tasks=16 @daint:gpu+intel +[ RUN ] osu_allreduce_test %mpi_tasks=16 @daint:gpu+pgi +[ RUN ] osu_allreduce_test %mpi_tasks=8 @daint:gpu+intel +[ RUN ] osu_allreduce_test %mpi_tasks=8 @daint:gpu+pgi +[ RUN ] osu_allreduce_test %mpi_tasks=4 @daint:gpu+intel +[ RUN ] osu_allreduce_test %mpi_tasks=4 @daint:gpu+pgi +[ RUN ] osu_allreduce_test %mpi_tasks=2 @daint:gpu+intel +[ RUN ] osu_allreduce_test %mpi_tasks=2 @daint:gpu+pgi +[ RUN ] osu_bandwidth_test @daint:gpu+intel +[ RUN ] osu_bandwidth_test @daint:gpu+pgi +[ RUN ] osu_latency_test @daint:gpu+intel +[ RUN ] osu_latency_test @daint:gpu+pgi +[ OK ] ( 5/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+gnu [compile: 0.025s run: 84.808s total: 85.279s] +[ OK ] ( 6/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+intel [compile: 0.021s run: 61.746s total: 62.582s] +[ OK ] ( 7/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+pgi [compile: 0.020s run: 58.596s total: 62.578s] +[ OK ] ( 8/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+gnu [compile: 0.024s run: 81.570s total: 85.360s] +[ OK ] ( 9/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+gnu [compile: 0.027s run: 78.317s total: 85.373s] +[ OK ] (10/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+gnu [compile: 0.025s run: 74.995s total: 85.378s] +[ OK ] (11/22) osu_latency_test @daint:gpu+gnu [compile: 0.023s run: 67.544s total: 85.354s] +[ OK ] (12/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+intel [compile: 0.020s run: 81.284s total: 88.299s] +[ OK ] (13/22) osu_bandwidth_test @daint:gpu+gnu [compile: 0.027s run: 103.550s total: 117.202s] +[ OK ] (14/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+intel [compile: 0.021s run: 74.634s total: 107.857s] +[ OK ] (15/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+intel [compile: 0.020s run: 68.403s total: 108.389s] +[ OK ] (16/22) osu_latency_test @daint:gpu+pgi [compile: 0.023s run: 53.269s total: 109.726s] +[ OK ] (17/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+pgi [compile: 0.020s run: 95.593s total: 110.404s] +[ OK ] (18/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+pgi [compile: 0.022s run: 74.523s total: 110.862s] +[ OK ] (19/22) osu_latency_test @daint:gpu+intel [compile: 0.020s run: 74.264s total: 127.566s] +[ OK ] (20/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+pgi [compile: 0.021s run: 85.042s total: 128.693s] +[ OK ] (21/22) osu_bandwidth_test @daint:gpu+intel [compile: 0.020s run: 126.015s total: 172.912s] +[ OK ] (22/22) osu_bandwidth_test @daint:gpu+pgi [compile: 0.023s run: 135.990s total: 185.967s] [----------] all spawned checks have finished -[  PASSED  ] Ran 22/22 test case(s) from 10 check(s) (0 failure(s), 0 skipped) -[==========] Finished on Thu Jan 20 11:32:08 2022 -Run report saved in '/users/user/.reframe/reports/run-report.json' -Log file(s) saved in '/tmp/rfm-4e8yn_rh.log' +[ PASSED ] Ran 22/22 test case(s) from 10 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Sat Jan 22 22:58:37 2022 +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/tmp/rfm-ii5sv1zr.log' diff --git a/docs/listings/osu_bench_list_concretized.txt b/docs/listings/osu_bench_list_concretized.txt index 34027c8aaf..cd4482f761 100644 --- a/docs/listings/osu_bench_list_concretized.txt +++ b/docs/listings/osu_bench_list_concretized.txt @@ -1,12 +1,12 @@ [ReFrame Setup] - version: 3.10.0-dev.2+d9b4d32b + version: 3.10.0-dev.3+605af31a command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -lC' launched by: user@host - working directory: '/users/user/Devel/reframe' - settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' [List of matched checks] - OSUAllreduceTest %mpi_tasks=16 @daint:gpu+gnu @@ -65,4 +65,4 @@ ^OSUDownloadTest @daint:login+builtin Concretized 22 test case(s) -Log file(s) saved in '/tmp/rfm-wzss58qz.log' +Log file(s) saved in '/tmp/rfm-l3eamaiy.log' diff --git a/docs/listings/osu_bench_list_concretized_gnu.txt b/docs/listings/osu_bench_list_concretized_gnu.txt index 34fa3de266..230add0428 100644 --- a/docs/listings/osu_bench_list_concretized_gnu.txt +++ b/docs/listings/osu_bench_list_concretized_gnu.txt @@ -1,32 +1,17 @@ [ReFrame Setup] - version: 3.10.0-dev.2+d9b4d32b - command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -l -p builtin -p gnu' + version: 3.10.0-dev.3+605af31a + command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -L -p builtin -p gnu' launched by: user@host - working directory: '/users/user/Devel/reframe' - settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' [List of matched checks] -- OSUAllreduceTest %mpi_tasks=16 - ^OSUBuildTest - ^OSUDownloadTest -- OSUAllreduceTest %mpi_tasks=8 - ^OSUBuildTest - ^OSUDownloadTest -- OSUAllreduceTest %mpi_tasks=4 - ^OSUBuildTest - ^OSUDownloadTest -- OSUAllreduceTest %mpi_tasks=2 - ^OSUBuildTest - ^OSUDownloadTest -- OSUBandwidthTest - ^OSUBuildTest - ^OSUDownloadTest -- OSULatencyTest - ^OSUBuildTest - ^OSUDownloadTest -Found 8 check(s) +- OSULatencyTest [id: OSULatencyTest, file: '/home/user/Devel/reframe/tutorials/deps/osu_benchmarks.py'] + ^OSUBuildTest [id: OSUBuildTest, file: '/home/user/Devel/reframe/tutorials/deps/osu_benchmarks.py'] + ^OSUDownloadTest [id: OSUDownloadTest, file: '/home/user/Devel/reframe/tutorials/deps/osu_benchmarks.py'] +Found 3 check(s) -Log file(s) saved in '/tmp/rfm-mizz6g1t.log' +Log file(s) saved in '/tmp/rfm-klltwsex.log' diff --git a/docs/listings/osu_latency_list.txt b/docs/listings/osu_latency_list.txt index 7aeb18a96d..c6e133c3ea 100644 --- a/docs/listings/osu_latency_list.txt +++ b/docs/listings/osu_latency_list.txt @@ -1,21 +1,17 @@ [ReFrame Setup] - version: 3.10.0-dev.2+e8bdbaaa + version: 3.10.0-dev.3+605af31a command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -l' launched by: user@host - working directory: '/users/user/Devel/reframe' - settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' [List of matched checks] - OSULatencyTest ^OSUBuildTest ^OSUDownloadTest - ^OSUBuildTest - ^OSUDownloadTest - ^OSUBuildTest - ^OSUDownloadTest Found 3 check(s) -Log file(s) saved in '/tmp/rfm-qrs60mvh.log' +Log file(s) saved in '/tmp/rfm-zc483csf.log' diff --git a/docs/listings/osu_latency_unresolved_deps.txt b/docs/listings/osu_latency_unresolved_deps.txt index 3b09140588..75718b9060 100644 --- a/docs/listings/osu_latency_unresolved_deps.txt +++ b/docs/listings/osu_latency_unresolved_deps.txt @@ -1,40 +1,40 @@ [ReFrame Setup] - version: 3.10.0-dev.2+a1df4e67 - command: './bin/reframe -c tutorials/deps/osu_benchmarks.py --system=daint:gpu -n OSULatencyTest -l' + version: 3.10.0-dev.3+605af31a + command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest --system=daint:gpu -l' launched by: user@host - working directory: '/users/user/Devel/reframe' - settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' ./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'gnu') -> 'OSUDownloadTest' ./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'intel') -> 'OSUDownloadTest' ./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'pgi') -> 'OSUDownloadTest' ./bin/reframe: skipping all dependent test cases - - ('OSUBuildTest', 'daint:gpu', 'gnu') - - ('OSUAllreduceTest_4', 'daint:gpu', 'gnu') - - ('OSUAllreduceTest_16', 'daint:gpu', 'gnu') - - ('OSUAllreduceTest_8', 'daint:gpu', 'gnu') - - ('OSULatencyTest', 'daint:gpu', 'gnu') - - ('OSUBuildTest', 'daint:gpu', 'intel') - ('OSUBuildTest', 'daint:gpu', 'pgi') - - ('OSUAllreduceTest_2', 'daint:gpu', 'intel') - - ('OSUAllreduceTest_2', 'daint:gpu', 'gnu') - - ('OSUBandwidthTest', 'daint:gpu', 'intel') - - ('OSUBandwidthTest', 'daint:gpu', 'gnu') - - ('OSUAllreduceTest_4', 'daint:gpu', 'pgi') - - ('OSUAllreduceTest_4', 'daint:gpu', 'intel') - - ('OSUBandwidthTest', 'daint:gpu', 'pgi') - - ('OSUAllreduceTest_16', 'daint:gpu', 'intel') + - ('OSUBuildTest', 'daint:gpu', 'intel') - ('OSUAllreduceTest_8', 'daint:gpu', 'pgi') - - ('OSUAllreduceTest_8', 'daint:gpu', 'intel') - ('OSUAllreduceTest_16', 'daint:gpu', 'pgi') + - ('OSUBuildTest', 'daint:gpu', 'gnu') + - ('OSUAllreduceTest_4', 'daint:gpu', 'intel') + - ('OSUAllreduceTest_8', 'daint:gpu', 'intel') + - ('OSUAllreduceTest_4', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_16', 'daint:gpu', 'intel') + - ('OSULatencyTest', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_8', 'daint:gpu', 'gnu') - ('OSUAllreduceTest_2', 'daint:gpu', 'pgi') + - ('OSUBandwidthTest', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_16', 'daint:gpu', 'gnu') + - ('OSUBandwidthTest', 'daint:gpu', 'intel') - ('OSULatencyTest', 'daint:gpu', 'intel') - - ('OSULatencyTest', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_2', 'daint:gpu', 'intel') + - ('OSUAllreduceTest_4', 'daint:gpu', 'gnu') + - ('OSUAllreduceTest_2', 'daint:gpu', 'gnu') + - ('OSUBandwidthTest', 'daint:gpu', 'gnu') + - ('OSULatencyTest', 'daint:gpu', 'gnu') [List of matched checks] Found 0 check(s) -Log file(s) saved in '/tmp/rfm-3a27s3qw.log' +Log file(s) saved in '/tmp/rfm-k1w20m9z.log' diff --git a/docs/listings/param_deps_list.txt b/docs/listings/param_deps_list.txt index 94232d5379..eb11b3968b 100644 --- a/docs/listings/param_deps_list.txt +++ b/docs/listings/param_deps_list.txt @@ -1,12 +1,12 @@ [ReFrame Setup] - version: 3.10.0-dev.2+1e1561f7 + version: 3.10.0-dev.3+605af31a command: './bin/reframe -c tutorials/deps/parameterized.py -l' launched by: user@host - working directory: '/users/user/Devel/reframe' - settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/deps/parameterized.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/deps/parameterized.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' [List of matched checks] - TestB @@ -22,4 +22,4 @@ - TestA %z=0 Found 11 check(s) -Log file(s) saved in '/tmp/rfm-79hx1u2k.log' +Log file(s) saved in '/tmp/rfm-iey58chw.log' diff --git a/docs/listings/stream4_daint.txt b/docs/listings/stream4_daint.txt index fa5517d485..1b800a70bb 100644 --- a/docs/listings/stream4_daint.txt +++ b/docs/listings/stream4_daint.txt @@ -1,48 +1,45 @@ [ReFrame Setup] - version: 3.10.0-dev.2+2a281443 + version: 3.10.0-dev.3+605af31a command: './bin/reframe -c tutorials/basics/stream/stream4.py -r --performance-report' launched by: user@host - working directory: '/users/user/Devel/reframe' - settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/basics/stream/stream4.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/basics/stream/stream4.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' [==========] Running 1 check(s) -[==========] Started on Wed Jan 19 18:40:59 2022 +[==========] Started on Sat Jan 22 22:47:28 2022 -[----------] started processing StreamMultiSysTest (StreamMultiSysTest) -[ RUN ] StreamMultiSysTest on daint:login using gnu -[ RUN ] StreamMultiSysTest on daint:login using intel -[ RUN ] StreamMultiSysTest on daint:login using pgi -[ RUN ] StreamMultiSysTest on daint:login using cray -[ RUN ] StreamMultiSysTest on daint:gpu using gnu -[ RUN ] StreamMultiSysTest on daint:gpu using intel -[ RUN ] StreamMultiSysTest on daint:gpu using pgi -[ RUN ] StreamMultiSysTest on daint:gpu using cray -[ RUN ] StreamMultiSysTest on daint:mc using gnu -[ RUN ] StreamMultiSysTest on daint:mc using intel -[ RUN ] StreamMultiSysTest on daint:mc using pgi -[ RUN ] StreamMultiSysTest on daint:mc using cray -[----------] finished processing StreamMultiSysTest (StreamMultiSysTest) - -[----------] waiting for spawned checks to finish -[ OK ] ( 1/12) StreamMultiSysTest @daint:login+pgi [compile: 10.793s run: 49.861s total: 60.881s] -[ OK ] ( 2/12) StreamMultiSysTest @daint:login+gnu [compile: 1.928s run: 72.907s total: 74.867s] -[ OK ] ( 3/12) StreamMultiSysTest @daint:login+intel [compile: 7.659s run: 63.146s total: 71.139s] -[ OK ] ( 4/12) StreamMultiSysTest @daint:login+cray [compile: 0.582s run: 48.996s total: 49.614s] -[ OK ] ( 5/12) StreamMultiSysTest @daint:mc+pgi [compile: 1.969s run: 71.561s total: 73.560s] -[ OK ] ( 6/12) StreamMultiSysTest @daint:mc+gnu [compile: 1.962s run: 87.247s total: 89.240s] -[ OK ] ( 7/12) StreamMultiSysTest @daint:mc+intel [compile: 2.176s run: 78.063s total: 80.268s] -[ OK ] ( 8/12) StreamMultiSysTest @daint:mc+cray [compile: 0.564s run: 68.645s total: 69.238s] -[ OK ] ( 9/12) StreamMultiSysTest @daint:gpu+pgi [compile: 2.529s run: 244.848s total: 247.413s] -[ OK ] (10/12) StreamMultiSysTest @daint:gpu+gnu [compile: 3.108s run: 257.580s total: 260.723s] -[ OK ] (11/12) StreamMultiSysTest @daint:gpu+intel [compile: 2.348s run: 251.428s total: 253.808s] -[ OK ] (12/12) StreamMultiSysTest @daint:gpu+cray [compile: 0.802s run: 241.325s total: 242.159s] +[----------] start processing checks +[ RUN ] StreamMultiSysTest @daint:login+gnu +[ RUN ] StreamMultiSysTest @daint:login+intel +[ RUN ] StreamMultiSysTest @daint:login+pgi +[ RUN ] StreamMultiSysTest @daint:login+cray +[ RUN ] StreamMultiSysTest @daint:gpu+gnu +[ RUN ] StreamMultiSysTest @daint:gpu+intel +[ RUN ] StreamMultiSysTest @daint:gpu+pgi +[ RUN ] StreamMultiSysTest @daint:gpu+cray +[ RUN ] StreamMultiSysTest @daint:mc+gnu +[ RUN ] StreamMultiSysTest @daint:mc+intel +[ RUN ] StreamMultiSysTest @daint:mc+pgi +[ RUN ] StreamMultiSysTest @daint:mc+cray +[ OK ] ( 1/12) StreamMultiSysTest @daint:login+gnu [compile: 4.024s run: 21.615s total: 28.185s] +[ OK ] ( 2/12) StreamMultiSysTest @daint:login+intel [compile: 3.410s run: 20.976s total: 28.208s] +[ OK ] ( 3/12) StreamMultiSysTest @daint:login+pgi [compile: 2.734s run: 20.235s total: 28.226s] +[ OK ] ( 4/12) StreamMultiSysTest @daint:login+cray [compile: 2.104s run: 19.571s total: 28.242s] +[ OK ] ( 5/12) StreamMultiSysTest @daint:gpu+gnu [compile: 2.102s run: 30.129s total: 38.813s] +[ OK ] ( 6/12) StreamMultiSysTest @daint:gpu+pgi [compile: 8.695s run: 22.117s total: 38.826s] +[ OK ] ( 7/12) StreamMultiSysTest @daint:gpu+cray [compile: 8.083s run: 19.050s total: 38.852s] +[ OK ] ( 8/12) StreamMultiSysTest @daint:gpu+intel [compile: 9.369s run: 37.641s total: 50.212s] +[ OK ] ( 9/12) StreamMultiSysTest @daint:mc+gnu [compile: 7.970s run: 28.955s total: 52.297s] +[ OK ] (10/12) StreamMultiSysTest @daint:mc+cray [compile: 20.508s run: 30.812s total: 65.951s] +[ OK ] (11/12) StreamMultiSysTest @daint:mc+pgi [compile: 21.186s run: 34.898s total: 66.325s] +[ OK ] (12/12) StreamMultiSysTest @daint:mc+intel [compile: 21.890s run: 62.451s total: 90.626s] [----------] all spawned checks have finished [ PASSED ] Ran 12/12 test case(s) from 1 check(s) (0 failure(s), 0 skipped) -[==========] Finished on Wed Jan 19 18:45:47 2022 +[==========] Finished on Sat Jan 22 22:48:59 2022 ============================================================================== PERFORMANCE REPORT ------------------------------------------------------------------------------ @@ -50,78 +47,78 @@ StreamMultiSysTest - daint:login - gnu * num_tasks: 1 - * Copy: 101679.8 MB/s - * Scale: 45191.5 MB/s - * Add: 54368.5 MB/s - * Triad: 60150.7 MB/s + * Copy: 108525.7 MB/s + * Scale: 76882.1 MB/s + * Add: 81155.7 MB/s + * Triad: 82433.2 MB/s - intel * num_tasks: 1 - * Copy: 91599.9 MB/s - * Scale: 84242.4 MB/s - * Add: 106380.9 MB/s - * Triad: 108751.1 MB/s + * Copy: 82341.7 MB/s + * Scale: 81330.6 MB/s + * Add: 72076.0 MB/s + * Triad: 101808.5 MB/s - pgi * num_tasks: 1 - * Copy: 34313.7 MB/s - * Scale: 27147.5 MB/s - * Add: 40151.0 MB/s - * Triad: 40265.4 MB/s + * Copy: 94336.0 MB/s + * Scale: 69096.9 MB/s + * Add: 73484.2 MB/s + * Triad: 73243.6 MB/s - cray * num_tasks: 1 - * Copy: 44660.8 MB/s - * Scale: 24224.9 MB/s - * Add: 32372.7 MB/s - * Triad: 42503.4 MB/s + * Copy: 114374.2 MB/s + * Scale: 76205.6 MB/s + * Add: 82184.5 MB/s + * Triad: 76086.3 MB/s - daint:gpu - gnu * num_tasks: 1 - * Copy: 42720.5 MB/s - * Scale: 38430.6 MB/s - * Add: 43645.7 MB/s - * Triad: 43969.6 MB/s + * Copy: 42963.4 MB/s + * Scale: 38504.8 MB/s + * Add: 43650.2 MB/s + * Triad: 43876.5 MB/s - intel * num_tasks: 1 - * Copy: 52676.1 MB/s - * Scale: 54405.8 MB/s - * Add: 59010.5 MB/s - * Triad: 59135.5 MB/s + * Copy: 52505.4 MB/s + * Scale: 54131.1 MB/s + * Add: 58918.8 MB/s + * Triad: 59048.6 MB/s - pgi * num_tasks: 1 - * Copy: 50671.7 MB/s - * Scale: 39562.9 MB/s - * Add: 43926.7 MB/s - * Triad: 44044.7 MB/s + * Copy: 50472.9 MB/s + * Scale: 39545.5 MB/s + * Add: 43881.6 MB/s + * Triad: 43972.4 MB/s - cray * num_tasks: 1 - * Copy: 50864.0 MB/s - * Scale: 39099.2 MB/s - * Add: 43314.9 MB/s - * Triad: 43936.4 MB/s + * Copy: 50610.2 MB/s + * Scale: 38990.9 MB/s + * Add: 43158.9 MB/s + * Triad: 43792.9 MB/s - daint:mc - gnu * num_tasks: 1 - * Copy: 48660.2 MB/s - * Scale: 38660.2 MB/s - * Add: 43688.1 MB/s - * Triad: 44030.3 MB/s + * Copy: 48650.7 MB/s + * Scale: 38618.4 MB/s + * Add: 43504.1 MB/s + * Triad: 44044.1 MB/s - intel * num_tasks: 1 - * Copy: 52582.7 MB/s - * Scale: 48775.1 MB/s - * Add: 57207.5 MB/s - * Triad: 57349.3 MB/s + * Copy: 52500.5 MB/s + * Scale: 48545.9 MB/s + * Add: 57150.3 MB/s + * Triad: 57272.4 MB/s - pgi * num_tasks: 1 - * Copy: 46134.0 MB/s - * Scale: 40549.6 MB/s - * Add: 44189.3 MB/s - * Triad: 44531.3 MB/s + * Copy: 46123.6 MB/s + * Scale: 40552.5 MB/s + * Add: 44147.7 MB/s + * Triad: 44521.9 MB/s - cray * num_tasks: 1 - * Copy: 46567.1 MB/s - * Scale: 39779.5 MB/s - * Add: 43429.1 MB/s - * Triad: 43814.4 MB/s + * Copy: 47094.0 MB/s + * Scale: 40080.4 MB/s + * Add: 43659.8 MB/s + * Triad: 44078.0 MB/s ------------------------------------------------------------------------------ -Run report saved in '/users/user/.reframe/reports/run-report.json' -Log file(s) saved in '/tmp/rfm-fuzfkpeh.log' +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/tmp/rfm-sua0bogo.log' diff --git a/tools/gendoclistings.py b/tools/gendoclistings.py index fcb81ec696..f1493f8e8b 100755 --- a/tools/gendoclistings.py +++ b/tools/gendoclistings.py @@ -15,8 +15,7 @@ def print_usage(): ListingInfo = collections.namedtuple( 'ListingInfo', - ['command', 'filename', 'tags', 'filters', 'env', 'xfail'], - defaults=[None, None, False] + ['command', 'filename', 'tags', 'filters', 'env', 'xfail'] ) @@ -28,9 +27,8 @@ def remove_system_opt(s): return s.replace(' --system=catalina', '') -def replace_paths(s): - cwd = os.getcwd() - return s.replace(cwd, '/path/to/reframe').replace(os.getenv('HOME'), '/home/user') +def replace_home(s): + return s.replace(os.getenv('HOME'), '/home/user') def replace_user(s): @@ -44,53 +42,191 @@ def replace_hostname(s): DEFAULT_FILTERS = [remove_nocolor_opt, remove_system_opt, - replace_paths, replace_user, replace_hostname] + replace_home, replace_user, replace_hostname] LISTINGS = { 'hello1': ListingInfo( - './bin/reframe --nocolor -c tutorials/basics/hello/hello1.py -r', + './bin/reframe -c tutorials/basics/hello/hello1.py -r', 'docs/listings/hello1.txt', - {'local'}, - DEFAULT_FILTERS + {'local', 'tutorial-basics'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=False ), 'run-report': ListingInfo( f'cat {os.getenv("HOME")}/.reframe/reports/run-report.json', 'docs/listings/run-report.json', - {'local'}, - DEFAULT_FILTERS + {'local', 'tutorial-basics'}, + DEFAULT_FILTERS, + env=None, + xfail=False ), 'hello2': ListingInfo( - './bin/reframe --nocolor -c tutorials/basics/hello/hello2.py -r', + './bin/reframe -c tutorials/basics/hello/hello2.py -r', 'docs/listings/hello2.txt', - {'local'}, + {'local', 'tutorial-basics'}, DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, xfail=True ), 'hello2_catalina': ListingInfo( - './bin/reframe -C tutorials/config/settings.py --system=catalina --nocolor -c tutorials/basics/hello/hello2.py -r', + './bin/reframe -C tutorials/config/settings.py --system=catalina -c tutorials/basics/hello/hello2.py -r', 'docs/listings/hello2_catalina.txt', - {'local'}, - DEFAULT_FILTERS + {'local', 'tutorial-basics'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=False ), 'hellomp1': ListingInfo( - './bin/reframe --system=catalina --nocolor -c tutorials/basics/hellomp/hellomp1.py -r', + './bin/reframe --system=catalina -c tutorials/basics/hellomp/hellomp1.py -r', 'docs/listings/hellomp1.txt', - {'local'}, + {'local', 'tutorial-basics'}, DEFAULT_FILTERS, env={ - 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py') - } + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False ), 'hellomp2': ListingInfo( - './bin/reframe --system=catalina --nocolor -c tutorials/basics/hellomp/hellomp2.py -r', + './bin/reframe --system=catalina -c tutorials/basics/hellomp/hellomp2.py -r', 'docs/listings/hellomp2.txt', - {'local'}, + {'local', 'tutorial-basics'}, DEFAULT_FILTERS, env={ - 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py') + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' }, xfail=True + ), + 'alltests_daint': ListingInfo( + './bin/reframe -c tutorials/basics/ -R -n "HelloMultiLangTest|HelloThreadedExtended2Test|StreamWithRefTest" --performance-report -r', + 'docs/listings/alltests_daint.txt', + {'remote', 'tutorial-basics'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'stream4_daint': ListingInfo( + './bin/reframe -c tutorials/basics/stream/stream4.py -r --performance-report', + 'docs/listings/stream4_daint.txt', + {'remote', 'tutorial-basics'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'osu_bench_deps': ListingInfo( + './bin/reframe -c tutorials/deps/osu_benchmarks.py -r', + 'docs/listings/osu_bench_deps.txt', + {'remote', 'tutorial-deps'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'osu_latency_list': ListingInfo( + './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -l', + 'docs/listings/osu_latency_list.txt', + {'remote', 'tutorial-deps'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'osu_latency_unresolved_deps': ListingInfo( + './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest --system=daint:gpu -l', + 'docs/listings/osu_latency_unresolved_deps.txt', + {'remote', 'tutorial-deps'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'osu_bench_list_concretized': ListingInfo( + './bin/reframe -c tutorials/deps/osu_benchmarks.py -lC', + 'docs/listings/osu_bench_list_concretized.txt', + {'remote', 'tutorial-deps'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'osu_bench_list_concretized_gnu': ListingInfo( + './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -L -p builtin -p gnu', + 'docs/listings/osu_bench_list_concretized_gnu.txt', + {'remote', 'tutorial-deps'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'param_deps_list': ListingInfo( + './bin/reframe -c tutorials/deps/parameterized.py -l', + 'docs/listings/param_deps_list.txt', + {'local', 'tutorial-deps'}, + DEFAULT_FILTERS, + env=None, + xfail=False + ), + 'osu_bench_fixtures_list': ListingInfo( + './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -l', + 'docs/listings/osu_bench_fixtures_list.txt', + {'remote', 'tutorial-fixtures'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py') + }, + xfail=False + ), + 'osu_bandwidth_concretized_daint': ListingInfo( + './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC', + 'docs/listings/osu_bandwidth_concretized_daint.txt', + {'remote', 'tutorial-fixtures'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'osu_bandwidth_concretized_daint_pgi': ListingInfo( + './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC -p pgi', + 'docs/listings/osu_bandwidth_concretized_daint_pgi.txt', + {'remote', 'tutorial-fixtures'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'osu_bench_fixtures_run': ListingInfo( + './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -r', + 'docs/listings/osu_bench_fixtures_run.txt', + {'remote', 'tutorial-fixtures'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False ) } @@ -104,7 +240,7 @@ def replace_hostname(s): choice = 'all' for name, info in LISTINGS.items(): - if choice != 'all' and choice != name: + if (choice != 'all' and choice not in info.tags and choice != name): continue print(f'Generating listing {name}...') From 4fc5b12c033e96d0fcdb565215ca98c453bb8fd2 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sat, 22 Jan 2022 23:14:22 +0100 Subject: [PATCH 55/62] Update tutorial listings --- docs/listings/osu_bench_fixtures_run.txt | 52 ++++++++++++------------ 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/docs/listings/osu_bench_fixtures_run.txt b/docs/listings/osu_bench_fixtures_run.txt index 694cb29dd6..1f8851671c 100644 --- a/docs/listings/osu_bench_fixtures_run.txt +++ b/docs/listings/osu_bench_fixtures_run.txt @@ -1,5 +1,5 @@ [ReFrame Setup] - version: 3.10.0-dev.3+605af31a + version: 3.10.0-dev.3+76e02667 command: './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -r' launched by: user@host working directory: '/home/user/Devel/reframe' @@ -9,23 +9,23 @@ output directory: '/home/user/Devel/reframe/output' [==========] Running 10 check(s) -[==========] Started on Sat Jan 22 22:54:32 2022 +[==========] Started on Sat Jan 22 23:08:13 2022 [----------] start processing checks [ RUN ] fetch_osu_benchmarks ~daint @daint:gpu+gnu -[ OK ] ( 1/22) fetch_osu_benchmarks ~daint @daint:gpu+gnu [compile: 0.019s run: 2.664s total: 2.716s] +[ OK ] ( 1/22) fetch_osu_benchmarks ~daint @daint:gpu+gnu [compile: 0.016s run: 2.757s total: 2.807s] [ RUN ] build_osu_benchmarks ~daint:gpu+gnu @daint:gpu+gnu [ RUN ] build_osu_benchmarks ~daint:gpu+intel @daint:gpu+intel [ RUN ] build_osu_benchmarks ~daint:gpu+pgi @daint:gpu+pgi -[ OK ] ( 2/22) build_osu_benchmarks ~daint:gpu+gnu @daint:gpu+gnu [compile: 30.544s run: 2.874s total: 33.480s] +[ OK ] ( 2/22) build_osu_benchmarks ~daint:gpu+gnu @daint:gpu+gnu [compile: 25.384s run: 2.389s total: 27.839s] [ RUN ] osu_allreduce_test %mpi_tasks=16 @daint:gpu+gnu [ RUN ] osu_allreduce_test %mpi_tasks=8 @daint:gpu+gnu [ RUN ] osu_allreduce_test %mpi_tasks=4 @daint:gpu+gnu [ RUN ] osu_allreduce_test %mpi_tasks=2 @daint:gpu+gnu [ RUN ] osu_bandwidth_test @daint:gpu+gnu [ RUN ] osu_latency_test @daint:gpu+gnu -[ OK ] ( 3/22) build_osu_benchmarks ~daint:gpu+intel @daint:gpu+intel [compile: 55.178s run: 0.340s total: 56.194s] -[ OK ] ( 4/22) build_osu_benchmarks ~daint:gpu+pgi @daint:gpu+pgi [compile: 54.528s run: 0.299s total: 56.200s] +[ OK ] ( 3/22) build_osu_benchmarks ~daint:gpu+intel @daint:gpu+intel [compile: 47.774s run: 0.313s total: 48.758s] +[ OK ] ( 4/22) build_osu_benchmarks ~daint:gpu+pgi @daint:gpu+pgi [compile: 47.127s run: 0.297s total: 48.765s] [ RUN ] osu_allreduce_test %mpi_tasks=16 @daint:gpu+intel [ RUN ] osu_allreduce_test %mpi_tasks=16 @daint:gpu+pgi [ RUN ] osu_allreduce_test %mpi_tasks=8 @daint:gpu+intel @@ -38,27 +38,27 @@ [ RUN ] osu_bandwidth_test @daint:gpu+pgi [ RUN ] osu_latency_test @daint:gpu+intel [ RUN ] osu_latency_test @daint:gpu+pgi -[ OK ] ( 5/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+gnu [compile: 0.025s run: 84.808s total: 85.279s] -[ OK ] ( 6/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+intel [compile: 0.021s run: 61.746s total: 62.582s] -[ OK ] ( 7/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+pgi [compile: 0.020s run: 58.596s total: 62.578s] -[ OK ] ( 8/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+gnu [compile: 0.024s run: 81.570s total: 85.360s] -[ OK ] ( 9/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+gnu [compile: 0.027s run: 78.317s total: 85.373s] -[ OK ] (10/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+gnu [compile: 0.025s run: 74.995s total: 85.378s] -[ OK ] (11/22) osu_latency_test @daint:gpu+gnu [compile: 0.023s run: 67.544s total: 85.354s] -[ OK ] (12/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+intel [compile: 0.020s run: 81.284s total: 88.299s] -[ OK ] (13/22) osu_bandwidth_test @daint:gpu+gnu [compile: 0.027s run: 103.550s total: 117.202s] -[ OK ] (14/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+intel [compile: 0.021s run: 74.634s total: 107.857s] -[ OK ] (15/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+intel [compile: 0.020s run: 68.403s total: 108.389s] -[ OK ] (16/22) osu_latency_test @daint:gpu+pgi [compile: 0.023s run: 53.269s total: 109.726s] -[ OK ] (17/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+pgi [compile: 0.020s run: 95.593s total: 110.404s] -[ OK ] (18/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+pgi [compile: 0.022s run: 74.523s total: 110.862s] -[ OK ] (19/22) osu_latency_test @daint:gpu+intel [compile: 0.020s run: 74.264s total: 127.566s] -[ OK ] (20/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+pgi [compile: 0.021s run: 85.042s total: 128.693s] -[ OK ] (21/22) osu_bandwidth_test @daint:gpu+intel [compile: 0.020s run: 126.015s total: 172.912s] -[ OK ] (22/22) osu_bandwidth_test @daint:gpu+pgi [compile: 0.023s run: 135.990s total: 185.967s] +[ OK ] ( 5/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+gnu [compile: 0.022s run: 63.846s total: 64.319s] +[ OK ] ( 6/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+gnu [compile: 0.024s run: 56.997s total: 64.302s] +[ OK ] ( 7/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+gnu [compile: 0.024s run: 56.187s total: 66.616s] +[ OK ] ( 8/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+gnu [compile: 0.026s run: 82.220s total: 86.255s] +[ OK ] ( 9/22) osu_bandwidth_test @daint:gpu+gnu [compile: 0.023s run: 128.535s total: 142.154s] +[ OK ] (10/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+pgi [compile: 0.023s run: 168.876s total: 185.476s] +[ OK ] (11/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+intel [compile: 0.020s run: 165.312s total: 185.461s] +[ OK ] (12/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+intel [compile: 0.019s run: 172.593s total: 186.044s] +[ OK ] (13/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+pgi [compile: 0.019s run: 162.499s total: 185.942s] +[ OK ] (14/22) osu_latency_test @daint:gpu+intel [compile: 0.020s run: 152.867s total: 185.853s] +[ OK ] (15/22) osu_latency_test @daint:gpu+pgi [compile: 0.020s run: 149.662s total: 185.853s] +[ OK ] (16/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+intel [compile: 0.020s run: 207.009s total: 207.831s] +[ OK ] (17/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+pgi [compile: 0.019s run: 203.753s total: 207.829s] +[ OK ] (18/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+pgi [compile: 0.019s run: 197.421s total: 207.783s] +[ OK ] (19/22) osu_latency_test @daint:gpu+gnu [compile: 0.024s run: 218.130s total: 234.892s] +[ OK ] (20/22) osu_bandwidth_test @daint:gpu+intel [compile: 0.020s run: 218.457s total: 244.995s] +[ OK ] (21/22) osu_bandwidth_test @daint:gpu+pgi [compile: 0.020s run: 215.273s total: 244.992s] +[ OK ] (22/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+intel [compile: 0.020s run: 267.367s total: 274.584s] [----------] all spawned checks have finished [ PASSED ] Ran 22/22 test case(s) from 10 check(s) (0 failure(s), 0 skipped) -[==========] Finished on Sat Jan 22 22:58:37 2022 +[==========] Finished on Sat Jan 22 23:13:40 2022 Run report saved in '/home/user/.reframe/reports/run-report.json' -Log file(s) saved in '/tmp/rfm-ii5sv1zr.log' +Log file(s) saved in '/tmp/rfm-6gbw7qzs.log' From 149af549ef3fc9bcb1c6bd2633e67f41938356b8 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sat, 22 Jan 2022 23:25:42 +0100 Subject: [PATCH 56/62] Update advanced tutorial listings --- docs/listings/maketest_mixin.txt | 14 +++++------ docs/listings/stream_params.txt | 14 +++++------ tools/gendoclistings.py | 43 +++++++++++++++++--------------- 3 files changed, 37 insertions(+), 34 deletions(-) diff --git a/docs/listings/maketest_mixin.txt b/docs/listings/maketest_mixin.txt index 588400368c..6af25537cd 100644 --- a/docs/listings/maketest_mixin.txt +++ b/docs/listings/maketest_mixin.txt @@ -1,12 +1,12 @@ [ReFrame Setup] - version: 3.10.0-dev.2+4cd9d2e9 + version: 3.10.0-dev.3+4fc5b12c command: './bin/reframe -c tutorials/advanced/makefiles/maketest_mixin.py -l' launched by: user@host - working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/advanced/makefiles/maketest_mixin.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' + working directory: '/home/user/Repositories/reframe' + settings file: '/home/user/Repositories/reframe/tutorials/config/settings.py' + check search path: '/home/user/Repositories/reframe/tutorials/advanced/makefiles/maketest_mixin.py' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' [List of matched checks] - MakeOnlyTestAlt %elem_type=double @@ -15,4 +15,4 @@ - MakefileTestAlt %elem_type=float Found 4 check(s) -Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-xv130jbu.log' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-4w95t2wt.log' diff --git a/docs/listings/stream_params.txt b/docs/listings/stream_params.txt index b31ffd3899..d5321b6f4e 100644 --- a/docs/listings/stream_params.txt +++ b/docs/listings/stream_params.txt @@ -1,12 +1,12 @@ [ReFrame Setup] - version: 3.10.0-dev.2+4cd9d2e9 + version: 3.10.0-dev.3+4fc5b12c command: './bin/reframe -c tutorials/advanced/parameterized/stream.py -l' launched by: user@host - working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' + working directory: '/home/user/Repositories/reframe' + settings file: '/home/user/Repositories/reframe/tutorials/config/settings.py' + check search path: '/home/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' [List of matched checks] - StreamMultiSysTest %num_bytes=536870912 @@ -22,4 +22,4 @@ - StreamMultiSysTest %num_bytes=524288 Found 11 check(s) -Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-93hsoek9.log' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-ka9llk6d.log' diff --git a/tools/gendoclistings.py b/tools/gendoclistings.py index f1493f8e8b..f921eb1df2 100755 --- a/tools/gendoclistings.py +++ b/tools/gendoclistings.py @@ -15,7 +15,7 @@ def print_usage(): ListingInfo = collections.namedtuple( 'ListingInfo', - ['command', 'filename', 'tags', 'filters', 'env', 'xfail'] + ['command', 'tags', 'filters', 'env', 'xfail'] ) @@ -48,7 +48,6 @@ def replace_hostname(s): LISTINGS = { 'hello1': ListingInfo( './bin/reframe -c tutorials/basics/hello/hello1.py -r', - 'docs/listings/hello1.txt', {'local', 'tutorial-basics'}, DEFAULT_FILTERS, env={'RFM_COLORIZE': 'n'}, @@ -56,7 +55,6 @@ def replace_hostname(s): ), 'run-report': ListingInfo( f'cat {os.getenv("HOME")}/.reframe/reports/run-report.json', - 'docs/listings/run-report.json', {'local', 'tutorial-basics'}, DEFAULT_FILTERS, env=None, @@ -64,7 +62,6 @@ def replace_hostname(s): ), 'hello2': ListingInfo( './bin/reframe -c tutorials/basics/hello/hello2.py -r', - 'docs/listings/hello2.txt', {'local', 'tutorial-basics'}, DEFAULT_FILTERS, env={'RFM_COLORIZE': 'n'}, @@ -72,7 +69,6 @@ def replace_hostname(s): ), 'hello2_catalina': ListingInfo( './bin/reframe -C tutorials/config/settings.py --system=catalina -c tutorials/basics/hello/hello2.py -r', - 'docs/listings/hello2_catalina.txt', {'local', 'tutorial-basics'}, DEFAULT_FILTERS, env={'RFM_COLORIZE': 'n'}, @@ -80,7 +76,6 @@ def replace_hostname(s): ), 'hellomp1': ListingInfo( './bin/reframe --system=catalina -c tutorials/basics/hellomp/hellomp1.py -r', - 'docs/listings/hellomp1.txt', {'local', 'tutorial-basics'}, DEFAULT_FILTERS, env={ @@ -91,7 +86,6 @@ def replace_hostname(s): ), 'hellomp2': ListingInfo( './bin/reframe --system=catalina -c tutorials/basics/hellomp/hellomp2.py -r', - 'docs/listings/hellomp2.txt', {'local', 'tutorial-basics'}, DEFAULT_FILTERS, env={ @@ -102,7 +96,6 @@ def replace_hostname(s): ), 'alltests_daint': ListingInfo( './bin/reframe -c tutorials/basics/ -R -n "HelloMultiLangTest|HelloThreadedExtended2Test|StreamWithRefTest" --performance-report -r', - 'docs/listings/alltests_daint.txt', {'remote', 'tutorial-basics'}, DEFAULT_FILTERS, env={ @@ -113,7 +106,6 @@ def replace_hostname(s): ), 'stream4_daint': ListingInfo( './bin/reframe -c tutorials/basics/stream/stream4.py -r --performance-report', - 'docs/listings/stream4_daint.txt', {'remote', 'tutorial-basics'}, DEFAULT_FILTERS, env={ @@ -124,7 +116,6 @@ def replace_hostname(s): ), 'osu_bench_deps': ListingInfo( './bin/reframe -c tutorials/deps/osu_benchmarks.py -r', - 'docs/listings/osu_bench_deps.txt', {'remote', 'tutorial-deps'}, DEFAULT_FILTERS, env={ @@ -135,7 +126,6 @@ def replace_hostname(s): ), 'osu_latency_list': ListingInfo( './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -l', - 'docs/listings/osu_latency_list.txt', {'remote', 'tutorial-deps'}, DEFAULT_FILTERS, env={ @@ -146,7 +136,6 @@ def replace_hostname(s): ), 'osu_latency_unresolved_deps': ListingInfo( './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest --system=daint:gpu -l', - 'docs/listings/osu_latency_unresolved_deps.txt', {'remote', 'tutorial-deps'}, DEFAULT_FILTERS, env={ @@ -157,7 +146,6 @@ def replace_hostname(s): ), 'osu_bench_list_concretized': ListingInfo( './bin/reframe -c tutorials/deps/osu_benchmarks.py -lC', - 'docs/listings/osu_bench_list_concretized.txt', {'remote', 'tutorial-deps'}, DEFAULT_FILTERS, env={ @@ -168,7 +156,6 @@ def replace_hostname(s): ), 'osu_bench_list_concretized_gnu': ListingInfo( './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -L -p builtin -p gnu', - 'docs/listings/osu_bench_list_concretized_gnu.txt', {'remote', 'tutorial-deps'}, DEFAULT_FILTERS, env={ @@ -179,7 +166,6 @@ def replace_hostname(s): ), 'param_deps_list': ListingInfo( './bin/reframe -c tutorials/deps/parameterized.py -l', - 'docs/listings/param_deps_list.txt', {'local', 'tutorial-deps'}, DEFAULT_FILTERS, env=None, @@ -187,7 +173,6 @@ def replace_hostname(s): ), 'osu_bench_fixtures_list': ListingInfo( './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -l', - 'docs/listings/osu_bench_fixtures_list.txt', {'remote', 'tutorial-fixtures'}, DEFAULT_FILTERS, env={ @@ -197,7 +182,6 @@ def replace_hostname(s): ), 'osu_bandwidth_concretized_daint': ListingInfo( './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC', - 'docs/listings/osu_bandwidth_concretized_daint.txt', {'remote', 'tutorial-fixtures'}, DEFAULT_FILTERS, env={ @@ -208,7 +192,6 @@ def replace_hostname(s): ), 'osu_bandwidth_concretized_daint_pgi': ListingInfo( './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC -p pgi', - 'docs/listings/osu_bandwidth_concretized_daint_pgi.txt', {'remote', 'tutorial-fixtures'}, DEFAULT_FILTERS, env={ @@ -219,7 +202,6 @@ def replace_hostname(s): ), 'osu_bench_fixtures_run': ListingInfo( './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -r', - 'docs/listings/osu_bench_fixtures_run.txt', {'remote', 'tutorial-fixtures'}, DEFAULT_FILTERS, env={ @@ -227,6 +209,26 @@ def replace_hostname(s): 'RFM_COLORIZE': 'n' }, xfail=False + ), + 'stream_params': ListingInfo( + './bin/reframe --system=catalina -c tutorials/advanced/parameterized/stream.py -l', + {'local', 'tutorial-advanced'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'maketest_mixin': ListingInfo( + './bin/reframe --system=catalina -c tutorials/advanced/makefiles/maketest_mixin.py -l', + {'local', 'tutorial-advanced'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False ) } @@ -261,5 +263,6 @@ def replace_hostname(s): output = f(output) # Write the listing - with open(info.filename, 'w') as fp: + filename = os.path.join('docs/listings', f'{name}.txt') + with open(filename, 'w') as fp: fp.write(output) From c558d3995b918b9ad944d4c9423cb4e5d5e4ecd4 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sun, 23 Jan 2022 00:13:44 +0100 Subject: [PATCH 57/62] Update tips & tricks tutorial listings --- docs/listings/deps_complex_run.txt | 117 ++++++++++++++++++ docs/listings/deps_rerun_t6.txt | 22 ++++ docs/listings/deps_run_t6.txt | 30 +++++ docs/listings/hello2_list_verbose.txt | 82 +++++++++++++ docs/listings/hello2_print_stdout.txt | 32 +++++ docs/listings/hello2_typo.txt | 16 +-- docs/listings/hello2_typo_stacktrace.txt | 24 ++-- docs/tutorial_tips_tricks.rst | 147 ++--------------------- tools/gendoclistings.py | 58 ++++++++- 9 files changed, 372 insertions(+), 156 deletions(-) create mode 100644 docs/listings/deps_complex_run.txt create mode 100644 docs/listings/deps_rerun_t6.txt create mode 100644 docs/listings/deps_run_t6.txt create mode 100644 docs/listings/hello2_list_verbose.txt create mode 100644 docs/listings/hello2_print_stdout.txt diff --git a/docs/listings/deps_complex_run.txt b/docs/listings/deps_complex_run.txt new file mode 100644 index 0000000000..d552a280d2 --- /dev/null +++ b/docs/listings/deps_complex_run.txt @@ -0,0 +1,117 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+149af549 + command: './bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py -r' + launched by: user@host + working directory: '/home/user/Repositories/reframe' + settings file: '' + check search path: '/home/user/Repositories/reframe/unittests/resources/checks_unlisted/deps_complex.py' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' + +[==========] Running 10 check(s) +[==========] Started on Sat Jan 22 23:44:18 2022 + +[----------] start processing checks +[ RUN ] T0 @generic:default+builtin +[ OK ] ( 1/10) T0 @generic:default+builtin [compile: 0.018s run: 0.292s total: 0.336s] +[ RUN ] T4 @generic:default+builtin +[ OK ] ( 2/10) T4 @generic:default+builtin [compile: 0.016s run: 0.336s total: 0.380s] +[ RUN ] T5 @generic:default+builtin +[ OK ] ( 3/10) T5 @generic:default+builtin [compile: 0.016s run: 0.389s total: 0.446s] +[ RUN ] T1 @generic:default+builtin +[ OK ] ( 4/10) T1 @generic:default+builtin [compile: 0.016s run: 0.459s total: 0.501s] +[ RUN ] T8 @generic:default+builtin +[ FAIL ] ( 5/10) T8 @generic:default+builtin [compile: n/a run: n/a total: 0.006s] +==> test failed during 'setup': test staged in '/home/user/Repositories/reframe/stage/generic/default/builtin/T8' +[ FAIL ] ( 6/10) T9 @generic:default+builtin [compile: n/a run: n/a total: n/a] +==> test failed during 'startup': test staged in None +[ RUN ] T6 @generic:default+builtin +[ OK ] ( 7/10) T6 @generic:default+builtin [compile: 0.016s run: 0.530s total: 0.584s] +[ RUN ] T2 @generic:default+builtin +[ RUN ] T3 @generic:default+builtin +[ FAIL ] ( 8/10) T2 @generic:default+builtin [compile: 0.019s run: 0.324s total: 0.424s] +==> test failed during 'sanity': test staged in '/home/user/Repositories/reframe/stage/generic/default/builtin/T2' +[ FAIL ] ( 9/10) T7 @generic:default+builtin [compile: n/a run: n/a total: n/a] +==> test failed during 'startup': test staged in None +[ OK ] (10/10) T3 @generic:default+builtin [compile: 0.017s run: 0.328s total: 0.403s] +[----------] all spawned checks have finished + +[ FAILED ] Ran 10/10 test case(s) from 10 check(s) (4 failure(s), 0 skipped) +[==========] Finished on Sat Jan 22 23:44:21 2022 + +============================================================================== +SUMMARY OF FAILURES +------------------------------------------------------------------------------ +FAILURE INFO for T8 + * Expanded name: T8 + * Description: T8 + * System partition: generic:default + * Environment: builtin + * Stage directory: /home/user/Repositories/reframe/stage/generic/default/builtin/T8 + * Node list: + * Job type: local (id=None) + * Dependencies (conceptual): ['T1'] + * Dependencies (actual): [('T1', 'generic:default', 'builtin')] + * Maintainers: [] + * Failing phase: setup + * Rerun with '-n T8 -p builtin --system generic:default -r' + * Reason: exception +Traceback (most recent call last): + File "/home/user/Repositories/reframe/reframe/frontend/executors/__init__.py", line 291, in _safe_call + return fn(*args, **kwargs) + File "/home/user/Repositories/reframe/reframe/core/hooks.py", line 82, in _fn + getattr(obj, h.__name__)() + File "/home/user/Repositories/reframe/reframe/core/hooks.py", line 32, in _fn + func(*args, **kwargs) + File "/home/user/Repositories/reframe/unittests/resources/checks_unlisted/deps_complex.py", line 180, in fail + raise Exception +Exception + +------------------------------------------------------------------------------ +FAILURE INFO for T9 + * Expanded name: T9 + * Description: T9 + * System partition: generic:default + * Environment: builtin + * Stage directory: None + * Node list: + * Job type: local (id=None) + * Dependencies (conceptual): ['T8'] + * Dependencies (actual): [('T8', 'generic:default', 'builtin')] + * Maintainers: [] + * Failing phase: startup + * Rerun with '-n T9 -p builtin --system generic:default -r' + * Reason: task dependency error: dependencies failed +------------------------------------------------------------------------------ +FAILURE INFO for T2 + * Expanded name: T2 + * Description: T2 + * System partition: generic:default + * Environment: builtin + * Stage directory: /home/user/Repositories/reframe/stage/generic/default/builtin/T2 + * Node list: tresa.localNone + * Job type: local (id=49427) + * Dependencies (conceptual): ['T6'] + * Dependencies (actual): [('T6', 'generic:default', 'builtin')] + * Maintainers: [] + * Failing phase: sanity + * Rerun with '-n T2 -p builtin --system generic:default -r' + * Reason: sanity error: 31 != 30 +------------------------------------------------------------------------------ +FAILURE INFO for T7 + * Expanded name: T7 + * Description: T7 + * System partition: generic:default + * Environment: builtin + * Stage directory: None + * Node list: + * Job type: local (id=None) + * Dependencies (conceptual): ['T2'] + * Dependencies (actual): [('T2', 'generic:default', 'builtin')] + * Maintainers: [] + * Failing phase: startup + * Rerun with '-n T7 -p builtin --system generic:default -r' + * Reason: task dependency error: dependencies failed +------------------------------------------------------------------------------ +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-92y3fr5s.log' diff --git a/docs/listings/deps_rerun_t6.txt b/docs/listings/deps_rerun_t6.txt new file mode 100644 index 0000000000..eb40028b16 --- /dev/null +++ b/docs/listings/deps_rerun_t6.txt @@ -0,0 +1,22 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+149af549 + command: './bin/reframe --restore-session --keep-stage-files -n T6 -r' + launched by: user@host + working directory: '/home/user/Repositories/reframe' + settings file: '' + check search path: '/home/user/Repositories/reframe/unittests/resources/checks_unlisted/deps_complex.py' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' + +[==========] Running 1 check(s) +[==========] Started on Sat Jan 22 23:44:25 2022 + +[----------] start processing checks +[ RUN ] T6 @generic:default+builtin +[ OK ] (1/1) T6 @generic:default+builtin [compile: 0.017s run: 0.286s total: 0.330s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 1/1 test case(s) from 1 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Sat Jan 22 23:44:25 2022 +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-mug0a4cb.log' diff --git a/docs/listings/deps_run_t6.txt b/docs/listings/deps_run_t6.txt new file mode 100644 index 0000000000..3bdab8c1b0 --- /dev/null +++ b/docs/listings/deps_run_t6.txt @@ -0,0 +1,30 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+149af549 + command: './bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py -n T6 -r' + launched by: user@host + working directory: '/home/user/Repositories/reframe' + settings file: '' + check search path: '/home/user/Repositories/reframe/unittests/resources/checks_unlisted/deps_complex.py' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' + +[==========] Running 5 check(s) +[==========] Started on Sat Jan 22 23:44:25 2022 + +[----------] start processing checks +[ RUN ] T0 @generic:default+builtin +[ OK ] (1/5) T0 @generic:default+builtin [compile: 0.017s run: 0.289s total: 0.331s] +[ RUN ] T4 @generic:default+builtin +[ OK ] (2/5) T4 @generic:default+builtin [compile: 0.018s run: 0.330s total: 0.374s] +[ RUN ] T5 @generic:default+builtin +[ OK ] (3/5) T5 @generic:default+builtin [compile: 0.018s run: 0.384s total: 0.442s] +[ RUN ] T1 @generic:default+builtin +[ OK ] (4/5) T1 @generic:default+builtin [compile: 0.018s run: 0.452s total: 0.494s] +[ RUN ] T6 @generic:default+builtin +[ OK ] (5/5) T6 @generic:default+builtin [compile: 0.018s run: 0.525s total: 0.582s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 5/5 test case(s) from 5 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Sat Jan 22 23:44:28 2022 +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-ktylyaqk.log' diff --git a/docs/listings/hello2_list_verbose.txt b/docs/listings/hello2_list_verbose.txt new file mode 100644 index 0000000000..b334a9d0be --- /dev/null +++ b/docs/listings/hello2_list_verbose.txt @@ -0,0 +1,82 @@ +Loading user configuration +Loading configuration file: 'tutorials/config/settings.py' +Detecting system +Looking for a matching configuration entry for system 'host' +Configuration found: picking system 'generic' +Selecting subconfig for 'generic' +Initializing runtime +Selecting subconfig for 'generic:default' +Initializing system partition 'default' +Selecting subconfig for 'generic' +Initializing system 'generic' +Initializing modules system 'nomod' +detecting topology info for generic:default +> found topology file '/home/user/.reframe/topology/generic-default/processor.json'; loading... +> device auto-detection is not supported +[ReFrame Environment] + RFM_CHECK_SEARCH_PATH= + RFM_CHECK_SEARCH_RECURSIVE= + RFM_CLEAN_STAGEDIR= + RFM_COLORIZE=n + RFM_COMPACT_TEST_NAMES=n + RFM_CONFIG_FILE= + RFM_DUMP_PIPELINE_PROGRESS= + RFM_GIT_TIMEOUT= + RFM_GRAYLOG_ADDRESS= + RFM_HTTPJSON_URL= + RFM_IGNORE_CHECK_CONFLICTS= + RFM_IGNORE_REQNODENOTAVAIL= + RFM_INSTALL_PREFIX=/home/user/Repositories/reframe + RFM_KEEP_STAGE_FILES= + RFM_MODULE_MAPPINGS= + RFM_MODULE_MAP_FILE= + RFM_NON_DEFAULT_CRAYPE= + RFM_OUTPUT_DIR= + RFM_PERFLOG_DIR= + RFM_PIPELINE_TIMEOUT= + RFM_PREFIX= + RFM_PURGE_ENVIRONMENT= + RFM_REMOTE_DETECT= + RFM_REMOTE_WORKDIR= + RFM_REPORT_FILE= + RFM_REPORT_JUNIT= + RFM_RESOLVE_MODULE_CONFLICTS= + RFM_SAVE_LOG_FILES= + RFM_STAGE_DIR= + RFM_SYSLOG_ADDRESS= + RFM_SYSTEM= + RFM_TIMESTAMP_DIRS= + RFM_TRAP_JOB_ERRORS= + RFM_UNLOAD_MODULES= + RFM_USER_MODULES= + RFM_USE_LOGIN_SHELL= + RFM_VERBOSE= +[ReFrame Setup] + version: 3.10.0-dev.3+149af549 + command: './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -l -vv' + launched by: user@host + working directory: '/home/user/Repositories/reframe' + settings file: 'tutorials/config/settings.py' + check search path: '/home/user/Repositories/reframe/tutorials/basics/hello/hello2.py' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' + +Looking for tests in '/home/user/Repositories/reframe/tutorials/basics/hello/hello2.py' +Validating '/home/user/Repositories/reframe/tutorials/basics/hello/hello2.py': OK + > Loaded 2 test(s) +Loaded 2 test(s) +Generated 2 test case(s) +Filtering test cases(s) by name: 2 remaining +Filtering test cases(s) by tags: 2 remaining +Filtering test cases(s) by other attributes: 2 remaining +Building and validating the full test DAG +Full test DAG: + ('HelloMultiLangTest_cpp', 'generic:default', 'builtin') -> [] + ('HelloMultiLangTest_c', 'generic:default', 'builtin') -> [] +Final number of test cases: 2 +[List of matched checks] +- HelloMultiLangTest %lang=cpp +- HelloMultiLangTest %lang=c +Found 2 check(s) + +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-fs1arce0.log' diff --git a/docs/listings/hello2_print_stdout.txt b/docs/listings/hello2_print_stdout.txt new file mode 100644 index 0000000000..75e1426db0 --- /dev/null +++ b/docs/listings/hello2_print_stdout.txt @@ -0,0 +1,32 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+149af549 + command: './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -r' + launched by: user@host + working directory: '/home/user/Repositories/reframe' + settings file: 'tutorials/config/settings.py' + check search path: '/home/user/Repositories/reframe/tutorials/basics/hello/hello2.py' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' + +[==========] Running 2 check(s) +[==========] Started on Sun Jan 23 00:11:07 2022 + +[----------] start processing checks +[ RUN ] HelloMultiLangTest %lang=cpp @catalina:default+gnu +[ RUN ] HelloMultiLangTest %lang=cpp @catalina:default+clang +[ RUN ] HelloMultiLangTest %lang=c @catalina:default+gnu +[ RUN ] HelloMultiLangTest %lang=c @catalina:default+clang +rfm_HelloMultiLangTest_cpp_job.out +[ OK ] (1/4) HelloMultiLangTest %lang=cpp @catalina:default+gnu [compile: 0.737s run: 0.748s total: 1.765s] +rfm_HelloMultiLangTest_cpp_job.out +[ OK ] (2/4) HelloMultiLangTest %lang=cpp @catalina:default+clang [compile: 0.735s run: 0.909s total: 1.928s] +rfm_HelloMultiLangTest_c_job.out +[ OK ] (3/4) HelloMultiLangTest %lang=c @catalina:default+gnu [compile: 0.719s run: 1.072s total: 2.090s] +rfm_HelloMultiLangTest_c_job.out +[ OK ] (4/4) HelloMultiLangTest %lang=c @catalina:default+clang [compile: 0.714s run: 1.074s total: 2.094s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 4/4 test case(s) from 2 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Sun Jan 23 00:11:10 2022 +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-jumlrg66.log' diff --git a/docs/listings/hello2_typo.txt b/docs/listings/hello2_typo.txt index a8f21aa2a9..282bba2d50 100644 --- a/docs/listings/hello2_typo.txt +++ b/docs/listings/hello2_typo.txt @@ -1,18 +1,18 @@ [ReFrame Setup] - version: 3.10.0-dev.2+cb5edd8b + version: 3.10.0-dev.3+149af549 command: './bin/reframe -c tutorials/basics/hello -R -l' launched by: user@host - working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/settings.py' - check search path: (R) '/Users/user/Repositories/reframe/tutorials/basics/hello' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' + working directory: '/home/user/Repositories/reframe' + settings file: '' + check search path: (R) '/home/user/Repositories/reframe/tutorials/basics/hello' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' -./bin/reframe: skipping test file '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py': name error: tutorials/basics/hello/hello2.py:13: name 'paramter' is not defined +./bin/reframe: skipping test file '/home/user/Repositories/reframe/tutorials/basics/hello/hello2.py': name error: tutorials/basics/hello/hello2.py:13: name 'paramter' is not defined lang = paramter(['c', 'cpp']) (rerun with '-v' for more information) [List of matched checks] - HelloTest Found 1 check(s) -Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-rt_ds_vp.log' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-bzqy3nc7.log' diff --git a/docs/listings/hello2_typo_stacktrace.txt b/docs/listings/hello2_typo_stacktrace.txt index 5114639732..b530626c20 100644 --- a/docs/listings/hello2_typo_stacktrace.txt +++ b/docs/listings/hello2_typo_stacktrace.txt @@ -1,20 +1,20 @@ [ReFrame Setup] - version: 3.10.0-dev.2+cb5edd8b + version: 3.10.0-dev.3+149af549 command: './bin/reframe -c tutorials/basics/hello -R -l -v' launched by: user@host - working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/settings.py' - check search path: (R) '/Users/user/Repositories/reframe/tutorials/basics/hello' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' + working directory: '/home/user/Repositories/reframe' + settings file: '' + check search path: (R) '/home/user/Repositories/reframe/tutorials/basics/hello' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' -./bin/reframe: skipping test file '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py': name error: tutorials/basics/hello/hello2.py:13: name 'paramter' is not defined +./bin/reframe: skipping test file '/home/user/Repositories/reframe/tutorials/basics/hello/hello2.py': name error: tutorials/basics/hello/hello2.py:13: name 'paramter' is not defined lang = paramter(['c', 'cpp']) (rerun with '-v' for more information) Traceback (most recent call last): - File "/Users/user/Repositories/reframe/reframe/frontend/loader.py", line 237, in load_from_file + File "/home/user/Repositories/reframe/reframe/frontend/loader.py", line 237, in load_from_file util.import_module_from_file(filename, force) - File "/Users/user/Repositories/reframe/reframe/utility/__init__.py", line 103, in import_module_from_file + File "/home/user/Repositories/reframe/reframe/utility/__init__.py", line 109, in import_module_from_file return importlib.import_module(module_name) File "/usr/local/Cellar/python@3.9/3.9.1_6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/importlib/__init__.py", line 127, in import_module return _bootstrap._gcd_import(name[level:], package, level) @@ -24,9 +24,9 @@ Traceback (most recent call last): File "", line 680, in _load_unlocked File "", line 790, in exec_module File "", line 228, in _call_with_frames_removed - File "/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py", line 12, in + File "/home/user/Repositories/reframe/tutorials/basics/hello/hello2.py", line 12, in class HelloMultiLangTest(rfm.RegressionTest): - File "/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py", line 13, in HelloMultiLangTest + File "/home/user/Repositories/reframe/tutorials/basics/hello/hello2.py", line 13, in HelloMultiLangTest lang = paramter(['c', 'cpp']) NameError: name 'paramter' is not defined @@ -40,4 +40,4 @@ Final number of test cases: 1 - HelloTest Found 1 check(s) -Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-do69ki6t.log' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-l21cjjas.log' diff --git a/docs/tutorial_tips_tricks.rst b/docs/tutorial_tips_tricks.rst index a4acc7b740..01e74bc751 100644 --- a/docs/tutorial_tips_tricks.rst +++ b/docs/tutorial_tips_tricks.rst @@ -40,7 +40,7 @@ As suggested by the warning message, passing :option:`-v` will give you the stac .. code:: bash - ./bin/reframe -c tutorials/basics/hello -R -lv + ./bin/reframe -c tutorials/basics/hello -R -l -v .. literalinclude:: listings/hello2_typo_stacktrace.txt :language: console @@ -89,20 +89,8 @@ If we run the test, we can see that the correct standard output filename will be ./bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -r -.. code-block:: none - - rfm_HelloMultiLangTest_cpp_job.out - [ OK ] (1/4) HelloMultiLangTest_cpp on catalina:default using gnu [compile: 0.677s run: 0.700s total: 1.394s] - rfm_HelloMultiLangTest_c_job.out - [ OK ] (2/4) HelloMultiLangTest_c on catalina:default using gnu [compile: 0.451s run: 1.788s total: 2.258s] - rfm_HelloMultiLangTest_c_job.out - [ OK ] (3/4) HelloMultiLangTest_c on catalina:default using clang [compile: 0.329s run: 1.585s total: 1.934s] - rfm_HelloMultiLangTest_cpp_job.out - [ OK ] (4/4) HelloMultiLangTest_cpp on catalina:default using clang [compile: 0.609s run: 0.373s total: 1.004s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 4 test case(s) from 2 check(s) (0 failure(s)) - [==========] Finished on Wed Jan 20 17:19:01 2021 +.. literalinclude:: listings/hello2_print_stdout.txt + :language: console Debugging sanity and performance patterns @@ -157,79 +145,10 @@ Let's try loading the ``tutorials/basics/hello/hello2.py`` file: .. code:: bash - ./bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -lvv - + ./bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -l -vv -.. code-block:: none - - Loading user configuration - Loading configuration file: 'tutorials/config/settings.py' - Detecting system - Looking for a matching configuration entry for system 'dhcp-133-191.cscs.ch' - Configuration found: picking system 'generic' - Selecting subconfig for 'generic' - Initializing runtime - Selecting subconfig for 'generic:default' - Initializing system partition 'default' - Selecting subconfig for 'generic' - Initializing system 'generic' - Initializing modules system 'nomod' - [ReFrame Environment] - RFM_CHECK_SEARCH_PATH= - RFM_CHECK_SEARCH_RECURSIVE= - RFM_CLEAN_STAGEDIR= - RFM_COLORIZE= - RFM_CONFIG_FILE=/Users/user/Repositories/reframe/tutorials/config/settings.py - RFM_GRAYLOG_ADDRESS= - RFM_IGNORE_CHECK_CONFLICTS= - RFM_IGNORE_REQNODENOTAVAIL= - RFM_INSTALL_PREFIX=/Users/user/Repositories/reframe - RFM_KEEP_STAGE_FILES= - RFM_MODULE_MAPPINGS= - RFM_MODULE_MAP_FILE= - RFM_NON_DEFAULT_CRAYPE= - RFM_OUTPUT_DIR= - RFM_PERFLOG_DIR= - RFM_PREFIX= - RFM_PURGE_ENVIRONMENT= - RFM_REPORT_FILE= - RFM_SAVE_LOG_FILES= - RFM_STAGE_DIR= - RFM_SYSLOG_ADDRESS= - RFM_SYSTEM= - RFM_TIMESTAMP_DIRS= - RFM_UNLOAD_MODULES= - RFM_USER_MODULES= - RFM_USE_LOGIN_SHELL= - RFM_VERBOSE= - [ReFrame Setup] - version: 3.4-dev2 (rev: 33a97c81) - command: './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -lvv' - launched by: user@dhcp-133-191.cscs.ch - working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - Looking for tests in '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py' - Validating '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py': OK - > Loaded 2 test(s) - Loaded 2 test(s) - Generated 2 test case(s) - Filtering test cases(s) by name: 2 remaining - Filtering test cases(s) by tags: 2 remaining - Filtering test cases(s) by other attributes: 2 remaining - Building and validating the full test DAG - Full test DAG: - ('HelloMultiLangTest_c', 'generic:default', 'builtin') -> [] - ('HelloMultiLangTest_cpp', 'generic:default', 'builtin') -> [] - Final number of test cases: 2 - [List of matched checks] - - HelloMultiLangTest_c (found in '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py') - - HelloMultiLangTest_cpp (found in '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py') - Found 2 check(s) - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-3956_dlu.log' +.. literalinclude:: listings/hello2_list_verbose.txt + :language: console You can see all the different phases ReFrame's frontend goes through when loading a test. The first "strange" thing to notice in this log is that ReFrame picked the generic system configuration. @@ -374,30 +293,8 @@ Let's run the whole test DAG: ./bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py -r -.. code-block:: none - - - - [ OK ] ( 1/10) T0 on generic:default using builtin [compile: 0.014s run: 0.297s total: 0.337s] - [ OK ] ( 2/10) T4 on generic:default using builtin [compile: 0.010s run: 0.171s total: 0.207s] - [ OK ] ( 3/10) T5 on generic:default using builtin [compile: 0.010s run: 0.192s total: 0.225s] - [ OK ] ( 4/10) T1 on generic:default using builtin [compile: 0.008s run: 0.198s total: 0.226s] - [ FAIL ] ( 5/10) T8 on generic:default using builtin [compile: n/a run: n/a total: 0.003s] - ==> test failed during 'setup': test staged in '/Users/user/Repositories/reframe/stage/generic/default/builtin/T8' - [ FAIL ] ( 6/10) T9 [compile: n/a run: n/a total: n/a] - ==> test failed during 'startup': test staged in '' - [ OK ] ( 7/10) T6 on generic:default using builtin [compile: 0.007s run: 0.224s total: 0.262s] - [ OK ] ( 8/10) T3 on generic:default using builtin [compile: 0.007s run: 0.211s total: 0.235s] - [ FAIL ] ( 9/10) T2 on generic:default using builtin [compile: 0.011s run: 0.318s total: 0.389s] - ==> test failed during 'sanity': test staged in '/Users/user/Repositories/reframe/stage/generic/default/builtin/T2' - [ FAIL ] (10/10) T7 [compile: n/a run: n/a total: n/a] - ==> test failed during 'startup': test staged in '' - [----------] all spawned checks have finished - - [ FAILED ] Ran 10 test case(s) from 10 check(s) (4 failure(s)) - [==========] Finished on Thu Jan 21 13:58:43 2021 - - +.. literalinclude:: listings/deps_complex_run.txt + :language: console You can restore the run session and run only the failed test cases as follows: @@ -433,18 +330,8 @@ Let's try to rerun the :class:`T6` test from the previous test dependency chain: Notice how only the :class:`T6` test was rerun and none of its dependencies, since they were simply restored: -.. code-block:: none - - [==========] Running 1 check(s) - [==========] Started on Thu Jan 21 14:27:18 2021 - - [----------] start processing checks - [ RUN ] T6 on generic:default using builtin - [ OK ] (1/1) T6 on generic:default using builtin [compile: 0.012s run: 0.428s total: 0.464s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 1 test case(s) from 1 check(s) (0 failure(s)) - [==========] Finished on Thu Jan 21 14:27:19 2021 +.. literalinclude:: listings/deps_rerun_t6.txt + :language: console If we tried to run :class:`T6` without restoring the session, we would have to rerun also the whole dependency chain, i.e., also :class:`T5`, :class:`T1`, :class:`T4` and :class:`T0`. @@ -453,18 +340,8 @@ If we tried to run :class:`T6` without restoring the session, we would have to r ./bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py -n T6 -r -.. code-block:: none - - [ OK ] (1/5) T0 on generic:default using builtin [compile: 0.012s run: 0.424s total: 0.464s] - [ OK ] (2/5) T4 on generic:default using builtin [compile: 0.011s run: 0.348s total: 0.381s] - [ OK ] (3/5) T5 on generic:default using builtin [compile: 0.007s run: 0.225s total: 0.248s] - [ OK ] (4/5) T1 on generic:default using builtin [compile: 0.009s run: 0.235s total: 0.267s] - [ OK ] (5/5) T6 on generic:default using builtin [compile: 0.010s run: 0.265s total: 0.297s] - [----------] all spawned checks have finished - - - [ PASSED ] Ran 5 test case(s) from 5 check(s) (0 failure(s)) - [==========] Finished on Thu Jan 21 14:32:09 2021 +.. literalinclude:: listings/deps_run_t6.txt + :language: console .. _generate-ci-pipeline: diff --git a/tools/gendoclistings.py b/tools/gendoclistings.py index f921eb1df2..165e84cabb 100755 --- a/tools/gendoclistings.py +++ b/tools/gendoclistings.py @@ -229,11 +229,67 @@ def replace_hostname(s): 'RFM_COLORIZE': 'n' }, xfail=False + ), + 'hello2_typo': ListingInfo( + 'sed -ie "s/parameter/paramter/g" tutorials/basics/hello/hello2.py && ' + './bin/reframe -c tutorials/basics/hello -R -l && ' + 'mv tutorials/basics/hello/hello2.pye tutorials/basics/hello/hello2.py', + {'local', 'tutorial-tips-n-tricks'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=False + ), + 'hello2_typo_stacktrace': ListingInfo( + 'sed -ie "s/parameter/paramter/g" tutorials/basics/hello/hello2.py && ' + './bin/reframe -c tutorials/basics/hello -R -l -v && ' + 'mv tutorials/basics/hello/hello2.pye tutorials/basics/hello/hello2.py', + {'local', 'tutorial-tips-n-tricks'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=False + ), + 'hello2_print_stdout': ListingInfo( + 'sed -ie "s/self\.stdout/sn.print(self.stdout)/g" tutorials/basics/hello/hello2.py && ' + './bin/reframe --system=catalina -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -r && ' + 'mv tutorials/basics/hello/hello2.pye tutorials/basics/hello/hello2.py', + {'local', 'tutorial-tips-n-tricks'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=False + ), + 'hello2_list_verbose': ListingInfo( + './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -l -vv', + {'local', 'tutorial-tips-n-tricks'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=False + ), + 'deps_complex_run': ListingInfo( + './bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py -r', + {'local', 'tutorial-tips-n-tricks'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=True + ), + 'deps_rerun_t6': ListingInfo( + './bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py --keep-stage-files -r > /dev/null || ' + './bin/reframe --restore-session --keep-stage-files -n T6 -r', + {'local', 'tutorial-tips-n-tricks'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=False + ), + 'deps_run_t6': ListingInfo( + './bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py -n T6 -r', + {'local', 'tutorial-tips-n-tricks'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=False ) } -runcmd = functools.partial(osext.run_command, log=False) +runcmd = functools.partial(osext.run_command, log=False, shell=True) if __name__ == '__main__': try: From bd99dbaa600d90df4d0b2c9ed8f22e6a15e320d5 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sun, 23 Jan 2022 00:17:48 +0100 Subject: [PATCH 58/62] Remove references to mysettings.py --- docs/tutorial_advanced.rst | 2 +- docs/tutorial_basics.rst | 10 +++++++--- docs/tutorial_build_automation.rst | 6 ------ 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/docs/tutorial_advanced.rst b/docs/tutorial_advanced.rst index a24dbc73b2..0316c813ec 100644 --- a/docs/tutorial_advanced.rst +++ b/docs/tutorial_advanced.rst @@ -9,7 +9,7 @@ Finally, to avoid specifying the tutorial configuration file each time, make sur .. code:: bash - export RFM_CONFIG_FILE=$(pwd)/tutorials/config/mysettings.py + export RFM_CONFIG_FILE=$(pwd)/tutorials/config/settings.py diff --git a/docs/tutorial_basics.rst b/docs/tutorial_basics.rst index c793746420..a606124123 100644 --- a/docs/tutorial_basics.rst +++ b/docs/tutorial_basics.rst @@ -218,6 +218,10 @@ Note that you should *not* edit this configuration file in place. cp reframe/core/settings.py tutorials/config/mysettings.py +.. note:: + You may also use edit directly the supplied ``tutorials/config/settings.py`` file, which is the actual configuration file against which the various tutorials have been evaluated. + + Here is how the new configuration file looks like with the needed additions highlighted: .. literalinclude:: ../tutorials/config/settings.py @@ -249,7 +253,7 @@ Let's now rerun our "Hello, World!" tests: .. code-block:: console - ./bin/reframe -C tutorials/config/mysettings.py -c tutorials/basics/hello/hello2.py -r + ./bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -r .. literalinclude:: listings/hello2_catalina.txt @@ -262,7 +266,7 @@ Since we don't want to type it throughout the tutorial, we will now set it in th .. code-block:: console - export RFM_CONFIG_FILE=$(pwd)/tutorials/config/mysettings.py + export RFM_CONFIG_FILE=$(pwd)/tutorials/config/settings.py A Multithreaded "Hello, World!" @@ -646,7 +650,7 @@ We will only do so with the final versions of the tests from the previous sectio .. code-block:: console - export RFM_CONFIG_FILE=$(pwd)/tutorials/config/mysettings.py + export RFM_CONFIG_FILE=$(pwd)/tutorials/config/settings.py ./bin/reframe -c tutorials/basics/ -R -n 'HelloMultiLangTest|HelloThreadedExtended2Test|StreamWithRefTest' --performance-report -r .. literalinclude:: listings/alltests_daint.txt diff --git a/docs/tutorial_build_automation.rst b/docs/tutorial_build_automation.rst index ed91b60967..c0ba98f031 100644 --- a/docs/tutorial_build_automation.rst +++ b/docs/tutorial_build_automation.rst @@ -5,12 +5,6 @@ Tutorial 5: Using Build Automation Tools As a Build System In this tutorial we will present how to use `Easybuild `__ and `Spack `__ as a build system for a ReFrame test. The example uses the configuration file presented in :doc:`tutorial_basics`, which you can find in ``tutorials/config/settings.py``. We also assume that the reader is already familiar with the concepts presented in the basic tutorial and has a working knowledge of EasyBuild and Spack. -Finally, to avoid specifying the tutorial configuration file each time you run the test, make sure to export it here: - -.. code:: bash - - export RFM_CONFIG_FILE=$(pwd)/tutorials/config/mysettings.py - Using EasyBuild to Build the Test Code From a74beac7b22b3e7c32218f54d7a22e96aa3810f9 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sun, 23 Jan 2022 00:22:46 +0100 Subject: [PATCH 59/62] Remove unused imports --- unittests/utility.py | 1 - 1 file changed, 1 deletion(-) diff --git a/unittests/utility.py b/unittests/utility.py index b68375e282..de4ab22f89 100644 --- a/unittests/utility.py +++ b/unittests/utility.py @@ -12,7 +12,6 @@ import os import sys -import reframe as rfm import reframe.core.config as config import reframe.core.modules as modules import reframe.core.runtime as rt From 32182924e46e843167f6c137f96d9109d9d15644 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sun, 23 Jan 2022 10:46:23 +0100 Subject: [PATCH 60/62] Fix PEP8 issues --- tools/gendoclistings.py | 93 ++++++++++++++++++++++++----------------- 1 file changed, 54 insertions(+), 39 deletions(-) diff --git a/tools/gendoclistings.py b/tools/gendoclistings.py index 165e84cabb..1ecf3af034 100755 --- a/tools/gendoclistings.py +++ b/tools/gendoclistings.py @@ -3,14 +3,13 @@ import collections import functools import os -import re import socket import sys import reframe.utility.osext as osext def print_usage(): - print(f'Usage: {sys.argv[0]} [local|remote|all|]') + print(f'Usage: {sys.argv[0]} [all||]') ListingInfo = collections.namedtuple( @@ -68,48 +67,52 @@ def replace_hostname(s): xfail=True ), 'hello2_catalina': ListingInfo( - './bin/reframe -C tutorials/config/settings.py --system=catalina -c tutorials/basics/hello/hello2.py -r', + './bin/reframe -C tutorials/config/settings.py --system=catalina -c tutorials/basics/hello/hello2.py -r', # noqa: E501 {'local', 'tutorial-basics'}, DEFAULT_FILTERS, env={'RFM_COLORIZE': 'n'}, xfail=False ), 'hellomp1': ListingInfo( - './bin/reframe --system=catalina -c tutorials/basics/hellomp/hellomp1.py -r', + './bin/reframe --system=catalina -c tutorials/basics/hellomp/hellomp1.py -r', # noqa: E501 {'local', 'tutorial-basics'}, DEFAULT_FILTERS, env={ - 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), 'RFM_COLORIZE': 'n' }, xfail=False ), 'hellomp2': ListingInfo( - './bin/reframe --system=catalina -c tutorials/basics/hellomp/hellomp2.py -r', + './bin/reframe --system=catalina -c tutorials/basics/hellomp/hellomp2.py -r', # noqa: E501 {'local', 'tutorial-basics'}, DEFAULT_FILTERS, env={ - 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), 'RFM_COLORIZE': 'n' }, xfail=True ), 'alltests_daint': ListingInfo( - './bin/reframe -c tutorials/basics/ -R -n "HelloMultiLangTest|HelloThreadedExtended2Test|StreamWithRefTest" --performance-report -r', + './bin/reframe -c tutorials/basics/ -R -n "HelloMultiLangTest|HelloThreadedExtended2Test|StreamWithRefTest" --performance-report -r', # noqa: E501 {'remote', 'tutorial-basics'}, DEFAULT_FILTERS, env={ - 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), 'RFM_COLORIZE': 'n' }, xfail=False ), 'stream4_daint': ListingInfo( - './bin/reframe -c tutorials/basics/stream/stream4.py -r --performance-report', + './bin/reframe -c tutorials/basics/stream/stream4.py -r --performance-report', # noqa: E501 {'remote', 'tutorial-basics'}, DEFAULT_FILTERS, env={ - 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), 'RFM_COLORIZE': 'n' }, xfail=False @@ -119,27 +122,30 @@ def replace_hostname(s): {'remote', 'tutorial-deps'}, DEFAULT_FILTERS, env={ - 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), 'RFM_COLORIZE': 'n' }, xfail=False ), 'osu_latency_list': ListingInfo( - './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -l', + './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -l', # noqa: E501 {'remote', 'tutorial-deps'}, DEFAULT_FILTERS, env={ - 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), 'RFM_COLORIZE': 'n' }, xfail=False ), 'osu_latency_unresolved_deps': ListingInfo( - './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest --system=daint:gpu -l', + './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest --system=daint:gpu -l', # noqa: E501 {'remote', 'tutorial-deps'}, DEFAULT_FILTERS, env={ - 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), 'RFM_COLORIZE': 'n' }, xfail=False @@ -149,17 +155,19 @@ def replace_hostname(s): {'remote', 'tutorial-deps'}, DEFAULT_FILTERS, env={ - 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), 'RFM_COLORIZE': 'n' }, xfail=False ), 'osu_bench_list_concretized_gnu': ListingInfo( - './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -L -p builtin -p gnu', + './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -L -p builtin -p gnu', # noqa: E501 {'remote', 'tutorial-deps'}, DEFAULT_FILTERS, env={ - 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), 'RFM_COLORIZE': 'n' }, xfail=False @@ -176,26 +184,29 @@ def replace_hostname(s): {'remote', 'tutorial-fixtures'}, DEFAULT_FILTERS, env={ - 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py') + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py') }, xfail=False ), 'osu_bandwidth_concretized_daint': ListingInfo( - './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC', + './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC', # noqa: E501 {'remote', 'tutorial-fixtures'}, DEFAULT_FILTERS, env={ - 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), 'RFM_COLORIZE': 'n' }, xfail=False ), 'osu_bandwidth_concretized_daint_pgi': ListingInfo( - './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC -p pgi', + './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC -p pgi', # noqa: E501 {'remote', 'tutorial-fixtures'}, DEFAULT_FILTERS, env={ - 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), 'RFM_COLORIZE': 'n' }, xfail=False @@ -205,27 +216,30 @@ def replace_hostname(s): {'remote', 'tutorial-fixtures'}, DEFAULT_FILTERS, env={ - 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), 'RFM_COLORIZE': 'n' }, xfail=False ), 'stream_params': ListingInfo( - './bin/reframe --system=catalina -c tutorials/advanced/parameterized/stream.py -l', + './bin/reframe --system=catalina -c tutorials/advanced/parameterized/stream.py -l', # noqa: E501 {'local', 'tutorial-advanced'}, DEFAULT_FILTERS, env={ - 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), 'RFM_COLORIZE': 'n' }, xfail=False ), 'maketest_mixin': ListingInfo( - './bin/reframe --system=catalina -c tutorials/advanced/makefiles/maketest_mixin.py -l', + './bin/reframe --system=catalina -c tutorials/advanced/makefiles/maketest_mixin.py -l', # noqa: E501 {'local', 'tutorial-advanced'}, DEFAULT_FILTERS, env={ - 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), 'tutorials/config/settings.py'), + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), 'RFM_COLORIZE': 'n' }, xfail=False @@ -233,7 +247,7 @@ def replace_hostname(s): 'hello2_typo': ListingInfo( 'sed -ie "s/parameter/paramter/g" tutorials/basics/hello/hello2.py && ' './bin/reframe -c tutorials/basics/hello -R -l && ' - 'mv tutorials/basics/hello/hello2.pye tutorials/basics/hello/hello2.py', + 'mv tutorials/basics/hello/hello2.pye tutorials/basics/hello/hello2.py', # noqa: E501 {'local', 'tutorial-tips-n-tricks'}, DEFAULT_FILTERS, env={'RFM_COLORIZE': 'n'}, @@ -242,37 +256,37 @@ def replace_hostname(s): 'hello2_typo_stacktrace': ListingInfo( 'sed -ie "s/parameter/paramter/g" tutorials/basics/hello/hello2.py && ' './bin/reframe -c tutorials/basics/hello -R -l -v && ' - 'mv tutorials/basics/hello/hello2.pye tutorials/basics/hello/hello2.py', + 'mv tutorials/basics/hello/hello2.pye tutorials/basics/hello/hello2.py', # noqa: E501 {'local', 'tutorial-tips-n-tricks'}, DEFAULT_FILTERS, env={'RFM_COLORIZE': 'n'}, xfail=False ), 'hello2_print_stdout': ListingInfo( - 'sed -ie "s/self\.stdout/sn.print(self.stdout)/g" tutorials/basics/hello/hello2.py && ' - './bin/reframe --system=catalina -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -r && ' - 'mv tutorials/basics/hello/hello2.pye tutorials/basics/hello/hello2.py', + 'sed -ie "s/self\.stdout/sn.print(self.stdout)/g" tutorials/basics/hello/hello2.py && ' # noqa: E501 + './bin/reframe --system=catalina -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -r && ' # noqa: E501 + 'mv tutorials/basics/hello/hello2.pye tutorials/basics/hello/hello2.py', # noqa: E501 {'local', 'tutorial-tips-n-tricks'}, DEFAULT_FILTERS, env={'RFM_COLORIZE': 'n'}, xfail=False ), 'hello2_list_verbose': ListingInfo( - './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -l -vv', + './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -l -vv', # noqa: E501 {'local', 'tutorial-tips-n-tricks'}, DEFAULT_FILTERS, env={'RFM_COLORIZE': 'n'}, xfail=False ), 'deps_complex_run': ListingInfo( - './bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py -r', + './bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py -r', # noqa: E501 {'local', 'tutorial-tips-n-tricks'}, DEFAULT_FILTERS, env={'RFM_COLORIZE': 'n'}, xfail=True ), 'deps_rerun_t6': ListingInfo( - './bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py --keep-stage-files -r > /dev/null || ' + './bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py --keep-stage-files -r > /dev/null || ' # noqa: E501 './bin/reframe --restore-session --keep-stage-files -n T6 -r', {'local', 'tutorial-tips-n-tricks'}, DEFAULT_FILTERS, @@ -280,7 +294,7 @@ def replace_hostname(s): xfail=False ), 'deps_run_t6': ListingInfo( - './bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py -n T6 -r', + './bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py -n T6 -r', # noqa: E501 {'local', 'tutorial-tips-n-tricks'}, DEFAULT_FILTERS, env={'RFM_COLORIZE': 'n'}, @@ -310,7 +324,8 @@ def replace_hostname(s): completed = runcmd(info.command, check=not info.xfail) if info.xfail and completed.returncode == 0: - print(f'{info.command} should have failed, but it did not; skipping...') + print(f'{info.command} should have failed, but it did not; ' + f'skipping...') continue # Apply filters From caddaba81f45aef9836abd79977adf6e0fc3256d Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sun, 23 Jan 2022 11:23:26 +0100 Subject: [PATCH 61/62] Address PR comments --- docs/tutorial_deps.rst | 2 +- reframe/core/meta.py | 3 --- reframe/schemas/runreport.json | 11 +++-------- tutorials/basics/stream/stream3.py | 2 +- unittests/test_parameters.py | 1 - 5 files changed, 5 insertions(+), 14 deletions(-) diff --git a/docs/tutorial_deps.rst b/docs/tutorial_deps.rst index 0703df7995..afef9dfdea 100644 --- a/docs/tutorial_deps.rst +++ b/docs/tutorial_deps.rst @@ -150,7 +150,7 @@ As a result, its immediate dependency :class:`OSUBuildTest` will be skipped, whi Listing Dependencies -------------------- -As shown in the listing of :class:`OSULatencyTest` before, the full dependency chain of the test is listed along with test. +As shown in the listing of :class:`OSULatencyTest` before, the full dependency chain of the test is listed along with the test. Each target dependency is printed in a new line prefixed by the ``^`` character and indented proportionally to its level. If a target dependency appears in multiple paths, it will only be listed once. diff --git a/reframe/core/meta.py b/reframe/core/meta.py index da43baf443..642c4dc4e6 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -482,9 +482,6 @@ def __call__(cls, *args, **kwargs): if not isinstance(fixt_vars, collections.abc.Mapping): raise TypeError("'fixt_vars' argument must be a mapping") - # Intercept is_fixture argument to flag an instance as a fixture - # is_fixture = kwargs.pop('is_fixture', False) - obj = cls.__new__(cls, *args, **kwargs) # Insert the var and param spaces diff --git a/reframe/schemas/runreport.json b/reframe/schemas/runreport.json index 6575b65609..caabfdad9e 100644 --- a/reframe/schemas/runreport.json +++ b/reframe/schemas/runreport.json @@ -98,10 +98,7 @@ "time_total": {"type": ["number", "null"]}, "unique_name": {"type": "string"} }, - "required": [ - "environment", "fail_phase", "fail_reason", "filename", - "result", "stagedir", "system", "time_total", "unique_name" - ] + "required": ["environment", "stagedir", "system", "unique_name"] } }, "type": "object", @@ -125,7 +122,7 @@ "version": {"type": "string"}, "workdir": {"type": "string"} }, - "required": ["data_version", "hostname", "time_elapsed", "time_start"] + "required": ["data_version"] }, "restored_cases": { "type": "array", @@ -145,9 +142,7 @@ "items": {"$ref": "#/defs/testcase_type"} } }, - "required": [ - "num_cases", "num_failures", "num_aborted", "runid", "testcases" - ] + "required": ["testcases"] } } }, diff --git a/tutorials/basics/stream/stream3.py b/tutorials/basics/stream/stream3.py index f16a5edf71..249a0a8094 100644 --- a/tutorials/basics/stream/stream3.py +++ b/tutorials/basics/stream/stream3.py @@ -23,7 +23,7 @@ class StreamWithRefTest(rfm.RegressionTest): } reference = { 'catalina': { - 'Copy': (55200, -0.05, 0.05, 'MB/s'), + 'Copy': (25200, -0.05, 0.05, 'MB/s'), 'Scale': (16800, -0.05, 0.05, 'MB/s'), 'Add': (18500, -0.05, 0.05, 'MB/s'), 'Triad': (18800, -0.05, 0.05, 'MB/s') diff --git a/unittests/test_parameters.py b/unittests/test_parameters.py index c29ad5c419..5cec18cda0 100644 --- a/unittests/test_parameters.py +++ b/unittests/test_parameters.py @@ -50,7 +50,6 @@ def test_abstract_param(): class MyTest(Abstract): pass - print(MyTest.param_space) assert MyTest.param_space['P0'] == () assert MyTest.param_space['P1'] == ('b',) From 5a6e35bd0e7112cc636de89e53aa50895fe78b76 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Sun, 23 Jan 2022 19:23:19 +0100 Subject: [PATCH 62/62] Fix unit tests --- unittests/test_policies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unittests/test_policies.py b/unittests/test_policies.py index 0b43d8f419..66dc4924c2 100644 --- a/unittests/test_policies.py +++ b/unittests/test_policies.py @@ -279,7 +279,7 @@ def test_runall(make_runner, make_cases, common_exec_ctx, tmp_path): runreport.load_report(tmp_path / 'invalid.json') # Generate a report that does not comply to the schema - del report['session_info']['hostname'] + del report['session_info']['data_version'] with open(tmp_path / 'invalid-version.json', 'w') as fp: jsonext.dump(report, fp)