diff --git a/cscs-checks/system/io/ior_check.py b/cscs-checks/system/io/ior_check.py index da8eff96a4..7255741b88 100644 --- a/cscs-checks/system/io/ior_check.py +++ b/cscs-checks/system/io/ior_check.py @@ -5,7 +5,6 @@ import getpass import os -import re import reframe as rfm import reframe.utility.sanity as sn @@ -173,5 +172,6 @@ def set_perf_patterns(self): } @run_after('init') - def set_dependency(self): - self.depends_on(re.sub(r'IorReadCheck', 'IorWriteCheck', self.name)) + def set_deps(self): + variant = IorWriteCheck.get_variant_nums(base_dir=self.base_dir)[0] + self.depends_on(IorWriteCheck.variant_name(variant)) diff --git a/cscs-checks/tools/profiling_and_debugging/notool.py b/cscs-checks/tools/profiling_and_debugging/notool.py index f2a21333d0..52acd68260 100644 --- a/cscs-checks/tools/profiling_and_debugging/notool.py +++ b/cscs-checks/tools/profiling_and_debugging/notool.py @@ -32,7 +32,6 @@ class JacobiNoToolHybrid(rfm.RegressionTest): @run_after('init') def set_descr_name(self): self.descr = f'Jacobi (without tool) {self.lang} check' - self.name = f'{type(self).__name__}_{self.lang.replace("+", "p")}' @run_after('init') def remove_buggy_prgenv(self): diff --git a/docs/listings/alltests_daint.txt b/docs/listings/alltests_daint.txt new file mode 100644 index 0000000000..48192eadaa --- /dev/null +++ b/docs/listings/alltests_daint.txt @@ -0,0 +1,130 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+605af31a + command: './bin/reframe -c tutorials/basics/ -R -n HelloMultiLangTest|HelloThreadedExtended2Test|StreamWithRefTest --performance-report -r' + launched by: user@host + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: (R) '/home/user/Devel/reframe/tutorials/basics' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' + +[==========] Running 4 check(s) +[==========] Started on Sat Jan 22 22:43:38 2022 + +[----------] start processing checks +[ RUN ] HelloMultiLangTest %lang=cpp @daint:login+builtin +[ RUN ] HelloMultiLangTest %lang=cpp @daint:login+gnu +[ RUN ] HelloMultiLangTest %lang=cpp @daint:login+intel +[ RUN ] HelloMultiLangTest %lang=cpp @daint:login+pgi +[ RUN ] HelloMultiLangTest %lang=cpp @daint:login+cray +[ RUN ] HelloMultiLangTest %lang=cpp @daint:gpu+gnu +[ RUN ] HelloMultiLangTest %lang=cpp @daint:gpu+intel +[ RUN ] HelloMultiLangTest %lang=cpp @daint:gpu+pgi +[ RUN ] HelloMultiLangTest %lang=cpp @daint:gpu+cray +[ RUN ] HelloMultiLangTest %lang=cpp @daint:mc+gnu +[ RUN ] HelloMultiLangTest %lang=cpp @daint:mc+intel +[ RUN ] HelloMultiLangTest %lang=cpp @daint:mc+pgi +[ RUN ] HelloMultiLangTest %lang=cpp @daint:mc+cray +[ RUN ] HelloMultiLangTest %lang=c @daint:login+builtin +[ RUN ] HelloMultiLangTest %lang=c @daint:login+gnu +[ RUN ] HelloMultiLangTest %lang=c @daint:login+intel +[ RUN ] HelloMultiLangTest %lang=c @daint:login+pgi +[ RUN ] HelloMultiLangTest %lang=c @daint:login+cray +[ RUN ] HelloMultiLangTest %lang=c @daint:gpu+gnu +[ RUN ] HelloMultiLangTest %lang=c @daint:gpu+intel +[ RUN ] HelloMultiLangTest %lang=c @daint:gpu+pgi +[ RUN ] HelloMultiLangTest %lang=c @daint:gpu+cray +[ RUN ] HelloMultiLangTest %lang=c @daint:mc+gnu +[ RUN ] HelloMultiLangTest %lang=c @daint:mc+intel +[ RUN ] HelloMultiLangTest %lang=c @daint:mc+pgi +[ RUN ] HelloMultiLangTest %lang=c @daint:mc+cray +[ RUN ] HelloThreadedExtended2Test @daint:login+builtin +[ RUN ] HelloThreadedExtended2Test @daint:login+gnu +[ RUN ] HelloThreadedExtended2Test @daint:login+intel +[ RUN ] HelloThreadedExtended2Test @daint:login+pgi +[ RUN ] HelloThreadedExtended2Test @daint:login+cray +[ RUN ] HelloThreadedExtended2Test @daint:gpu+gnu +[ RUN ] HelloThreadedExtended2Test @daint:gpu+intel +[ RUN ] HelloThreadedExtended2Test @daint:gpu+pgi +[ RUN ] HelloThreadedExtended2Test @daint:gpu+cray +[ RUN ] HelloThreadedExtended2Test @daint:mc+gnu +[ RUN ] HelloThreadedExtended2Test @daint:mc+intel +[ RUN ] HelloThreadedExtended2Test @daint:mc+pgi +[ RUN ] HelloThreadedExtended2Test @daint:mc+cray +[ RUN ] StreamWithRefTest @daint:login+gnu +[ RUN ] StreamWithRefTest @daint:gpu+gnu +[ RUN ] StreamWithRefTest @daint:mc+gnu +[ OK ] ( 1/42) HelloMultiLangTest %lang=cpp @daint:login+builtin [compile: 4.053s run: 36.016s total: 43.208s] +[ OK ] ( 2/42) HelloMultiLangTest %lang=cpp @daint:login+gnu [compile: 4.047s run: 36.009s total: 43.203s] +[ OK ] ( 3/42) HelloMultiLangTest %lang=cpp @daint:login+intel [compile: 3.431s run: 35.376s total: 43.206s] +[ OK ] ( 4/42) HelloMultiLangTest %lang=cpp @daint:login+pgi [compile: 2.758s run: 34.675s total: 43.208s] +[ OK ] ( 5/42) HelloMultiLangTest %lang=cpp @daint:login+cray [compile: 2.149s run: 34.052s total: 43.211s] +[ OK ] ( 6/42) HelloMultiLangTest %lang=cpp @daint:gpu+gnu [compile: 2.139s run: 60.830s total: 69.995s] +[ OK ] ( 7/42) HelloMultiLangTest %lang=cpp @daint:gpu+intel [compile: 8.863s run: 55.184s total: 70.004s] +[ OK ] ( 8/42) HelloMultiLangTest %lang=c @daint:login+builtin [compile: 32.460s run: 18.053s total: 69.949s] +[ OK ] ( 9/42) HelloMultiLangTest %lang=c @daint:login+gnu [compile: 27.081s run: 18.051s total: 69.954s] +[ OK ] (10/42) HelloMultiLangTest %lang=c @daint:login+intel [compile: 39.615s run: 32.065s total: 87.922s] +[ OK ] (11/42) HelloMultiLangTest %lang=c @daint:login+pgi [compile: 38.873s run: 31.356s total: 87.926s] +[ OK ] (12/42) HelloMultiLangTest %lang=c @daint:login+cray [compile: 38.265s run: 30.731s total: 87.931s] +[ OK ] (13/42) HelloThreadedExtended2Test @daint:login+builtin [compile: 12.837s run: 7.254s total: 92.404s] +[ OK ] (14/42) HelloThreadedExtended2Test @daint:login+gnu [compile: 31.377s run: 31.894s total: 119.747s] +[ OK ] (15/42) HelloThreadedExtended2Test @daint:login+intel [compile: 30.708s run: 31.252s total: 119.749s] +[ OK ] (16/42) HelloThreadedExtended2Test @daint:login+pgi [compile: 18.581s run: 30.571s total: 119.753s] +[ OK ] (17/42) HelloThreadedExtended2Test @daint:login+cray [compile: 17.981s run: 29.963s total: 119.756s] +[ OK ] (18/42) HelloMultiLangTest %lang=cpp @daint:mc+intel [compile: 33.792s run: 87.427s total: 130.572s] +[ OK ] (19/42) HelloMultiLangTest %lang=cpp @daint:mc+pgi [compile: 33.120s run: 84.192s total: 130.591s] +[ OK ] (20/42) HelloMultiLangTest %lang=cpp @daint:mc+cray [compile: 32.474s run: 81.119s total: 130.609s] +[ OK ] (21/42) HelloMultiLangTest %lang=c @daint:mc+pgi [compile: 13.468s run: 51.389s total: 130.540s] +[ OK ] (22/42) HelloMultiLangTest %lang=c @daint:mc+cray [compile: 12.847s run: 48.146s total: 130.559s] +[ OK ] (23/42) HelloMultiLangTest %lang=cpp @daint:gpu+pgi [compile: 8.167s run: 120.870s total: 138.874s] +[ OK ] (24/42) HelloMultiLangTest %lang=cpp @daint:gpu+cray [compile: 7.412s run: 109.470s total: 138.883s] +[ OK ] (25/42) HelloMultiLangTest %lang=c @daint:gpu+gnu [compile: 13.293s run: 81.519s total: 138.729s] +[ OK ] (26/42) HelloMultiLangTest %lang=c @daint:gpu+cray [compile: 11.378s run: 74.651s total: 138.736s] +[ OK ] (27/42) HelloMultiLangTest %lang=c @daint:mc+gnu [compile: 25.399s run: 65.789s total: 138.749s] +[ OK ] (28/42) HelloMultiLangTest %lang=c @daint:gpu+intel [compile: 12.677s run: 79.097s total: 139.421s] +[ OK ] (29/42) HelloMultiLangTest %lang=c @daint:gpu+pgi [compile: 23.579s run: 69.505s total: 139.432s] +[ OK ] (30/42) HelloThreadedExtended2Test @daint:gpu+gnu [compile: 22.616s run: 46.878s total: 139.268s] +[ OK ] (31/42) HelloThreadedExtended2Test @daint:gpu+pgi [compile: 21.265s run: 40.181s total: 139.267s] +[ OK ] (32/42) HelloThreadedExtended2Test @daint:gpu+cray [compile: 20.642s run: 37.158s total: 139.275s] +[ OK ] (33/42) HelloThreadedExtended2Test @daint:mc+gnu [compile: 4.691s run: 30.273s total: 139.280s] +[ OK ] (34/42) HelloThreadedExtended2Test @daint:mc+intel [compile: 28.304s run: 19.597s total: 139.281s] +[ OK ] (35/42) StreamWithRefTest @daint:login+gnu [compile: 24.257s run: 10.594s total: 139.286s] +[ OK ] (36/42) HelloMultiLangTest %lang=c @daint:mc+intel [compile: 14.135s run: 70.976s total: 146.961s] +[ OK ] (37/42) HelloMultiLangTest %lang=cpp @daint:mc+gnu [compile: 7.397s run: 194.065s total: 229.737s] +[ OK ] (38/42) HelloThreadedExtended2Test @daint:gpu+intel [compile: 21.956s run: 133.885s total: 229.342s] +[ OK ] (39/42) HelloThreadedExtended2Test @daint:mc+pgi [compile: 27.596s run: 106.403s total: 229.264s] +[ OK ] (40/42) HelloThreadedExtended2Test @daint:mc+cray [compile: 26.958s run: 103.318s total: 229.274s] +[ OK ] (41/42) StreamWithRefTest @daint:gpu+gnu [compile: 38.940s run: 98.873s total: 229.279s] +[ OK ] (42/42) StreamWithRefTest @daint:mc+gnu [compile: 38.304s run: 94.811s total: 229.299s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 42/42 test case(s) from 4 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Sat Jan 22 22:47:28 2022 +============================================================================== +PERFORMANCE REPORT +------------------------------------------------------------------------------ +StreamWithRefTest +- daint:login + - gnu + * num_tasks: 1 + * Copy: 67915.3 MB/s + * Scale: 37485.6 MB/s + * Add: 39545.5 MB/s + * Triad: 39906.2 MB/s +- daint:gpu + - gnu + * num_tasks: 1 + * Copy: 50553.4 MB/s + * Scale: 34780.1 MB/s + * Add: 38043.6 MB/s + * Triad: 38522.2 MB/s +- daint:mc + - gnu + * num_tasks: 1 + * Copy: 48200.9 MB/s + * Scale: 31370.4 MB/s + * Add: 33000.2 MB/s + * Triad: 33205.5 MB/s +------------------------------------------------------------------------------ +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/tmp/rfm-n3d18lq9.log' diff --git a/docs/listings/deps_complex_run.txt b/docs/listings/deps_complex_run.txt new file mode 100644 index 0000000000..d552a280d2 --- /dev/null +++ b/docs/listings/deps_complex_run.txt @@ -0,0 +1,117 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+149af549 + command: './bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py -r' + launched by: user@host + working directory: '/home/user/Repositories/reframe' + settings file: '' + check search path: '/home/user/Repositories/reframe/unittests/resources/checks_unlisted/deps_complex.py' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' + +[==========] Running 10 check(s) +[==========] Started on Sat Jan 22 23:44:18 2022 + +[----------] start processing checks +[ RUN ] T0 @generic:default+builtin +[ OK ] ( 1/10) T0 @generic:default+builtin [compile: 0.018s run: 0.292s total: 0.336s] +[ RUN ] T4 @generic:default+builtin +[ OK ] ( 2/10) T4 @generic:default+builtin [compile: 0.016s run: 0.336s total: 0.380s] +[ RUN ] T5 @generic:default+builtin +[ OK ] ( 3/10) T5 @generic:default+builtin [compile: 0.016s run: 0.389s total: 0.446s] +[ RUN ] T1 @generic:default+builtin +[ OK ] ( 4/10) T1 @generic:default+builtin [compile: 0.016s run: 0.459s total: 0.501s] +[ RUN ] T8 @generic:default+builtin +[ FAIL ] ( 5/10) T8 @generic:default+builtin [compile: n/a run: n/a total: 0.006s] +==> test failed during 'setup': test staged in '/home/user/Repositories/reframe/stage/generic/default/builtin/T8' +[ FAIL ] ( 6/10) T9 @generic:default+builtin [compile: n/a run: n/a total: n/a] +==> test failed during 'startup': test staged in None +[ RUN ] T6 @generic:default+builtin +[ OK ] ( 7/10) T6 @generic:default+builtin [compile: 0.016s run: 0.530s total: 0.584s] +[ RUN ] T2 @generic:default+builtin +[ RUN ] T3 @generic:default+builtin +[ FAIL ] ( 8/10) T2 @generic:default+builtin [compile: 0.019s run: 0.324s total: 0.424s] +==> test failed during 'sanity': test staged in '/home/user/Repositories/reframe/stage/generic/default/builtin/T2' +[ FAIL ] ( 9/10) T7 @generic:default+builtin [compile: n/a run: n/a total: n/a] +==> test failed during 'startup': test staged in None +[ OK ] (10/10) T3 @generic:default+builtin [compile: 0.017s run: 0.328s total: 0.403s] +[----------] all spawned checks have finished + +[ FAILED ] Ran 10/10 test case(s) from 10 check(s) (4 failure(s), 0 skipped) +[==========] Finished on Sat Jan 22 23:44:21 2022 + +============================================================================== +SUMMARY OF FAILURES +------------------------------------------------------------------------------ +FAILURE INFO for T8 + * Expanded name: T8 + * Description: T8 + * System partition: generic:default + * Environment: builtin + * Stage directory: /home/user/Repositories/reframe/stage/generic/default/builtin/T8 + * Node list: + * Job type: local (id=None) + * Dependencies (conceptual): ['T1'] + * Dependencies (actual): [('T1', 'generic:default', 'builtin')] + * Maintainers: [] + * Failing phase: setup + * Rerun with '-n T8 -p builtin --system generic:default -r' + * Reason: exception +Traceback (most recent call last): + File "/home/user/Repositories/reframe/reframe/frontend/executors/__init__.py", line 291, in _safe_call + return fn(*args, **kwargs) + File "/home/user/Repositories/reframe/reframe/core/hooks.py", line 82, in _fn + getattr(obj, h.__name__)() + File "/home/user/Repositories/reframe/reframe/core/hooks.py", line 32, in _fn + func(*args, **kwargs) + File "/home/user/Repositories/reframe/unittests/resources/checks_unlisted/deps_complex.py", line 180, in fail + raise Exception +Exception + +------------------------------------------------------------------------------ +FAILURE INFO for T9 + * Expanded name: T9 + * Description: T9 + * System partition: generic:default + * Environment: builtin + * Stage directory: None + * Node list: + * Job type: local (id=None) + * Dependencies (conceptual): ['T8'] + * Dependencies (actual): [('T8', 'generic:default', 'builtin')] + * Maintainers: [] + * Failing phase: startup + * Rerun with '-n T9 -p builtin --system generic:default -r' + * Reason: task dependency error: dependencies failed +------------------------------------------------------------------------------ +FAILURE INFO for T2 + * Expanded name: T2 + * Description: T2 + * System partition: generic:default + * Environment: builtin + * Stage directory: /home/user/Repositories/reframe/stage/generic/default/builtin/T2 + * Node list: tresa.localNone + * Job type: local (id=49427) + * Dependencies (conceptual): ['T6'] + * Dependencies (actual): [('T6', 'generic:default', 'builtin')] + * Maintainers: [] + * Failing phase: sanity + * Rerun with '-n T2 -p builtin --system generic:default -r' + * Reason: sanity error: 31 != 30 +------------------------------------------------------------------------------ +FAILURE INFO for T7 + * Expanded name: T7 + * Description: T7 + * System partition: generic:default + * Environment: builtin + * Stage directory: None + * Node list: + * Job type: local (id=None) + * Dependencies (conceptual): ['T2'] + * Dependencies (actual): [('T2', 'generic:default', 'builtin')] + * Maintainers: [] + * Failing phase: startup + * Rerun with '-n T7 -p builtin --system generic:default -r' + * Reason: task dependency error: dependencies failed +------------------------------------------------------------------------------ +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-92y3fr5s.log' diff --git a/docs/listings/deps_rerun_t6.txt b/docs/listings/deps_rerun_t6.txt new file mode 100644 index 0000000000..eb40028b16 --- /dev/null +++ b/docs/listings/deps_rerun_t6.txt @@ -0,0 +1,22 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+149af549 + command: './bin/reframe --restore-session --keep-stage-files -n T6 -r' + launched by: user@host + working directory: '/home/user/Repositories/reframe' + settings file: '' + check search path: '/home/user/Repositories/reframe/unittests/resources/checks_unlisted/deps_complex.py' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' + +[==========] Running 1 check(s) +[==========] Started on Sat Jan 22 23:44:25 2022 + +[----------] start processing checks +[ RUN ] T6 @generic:default+builtin +[ OK ] (1/1) T6 @generic:default+builtin [compile: 0.017s run: 0.286s total: 0.330s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 1/1 test case(s) from 1 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Sat Jan 22 23:44:25 2022 +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-mug0a4cb.log' diff --git a/docs/listings/deps_run_t6.txt b/docs/listings/deps_run_t6.txt new file mode 100644 index 0000000000..3bdab8c1b0 --- /dev/null +++ b/docs/listings/deps_run_t6.txt @@ -0,0 +1,30 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+149af549 + command: './bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py -n T6 -r' + launched by: user@host + working directory: '/home/user/Repositories/reframe' + settings file: '' + check search path: '/home/user/Repositories/reframe/unittests/resources/checks_unlisted/deps_complex.py' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' + +[==========] Running 5 check(s) +[==========] Started on Sat Jan 22 23:44:25 2022 + +[----------] start processing checks +[ RUN ] T0 @generic:default+builtin +[ OK ] (1/5) T0 @generic:default+builtin [compile: 0.017s run: 0.289s total: 0.331s] +[ RUN ] T4 @generic:default+builtin +[ OK ] (2/5) T4 @generic:default+builtin [compile: 0.018s run: 0.330s total: 0.374s] +[ RUN ] T5 @generic:default+builtin +[ OK ] (3/5) T5 @generic:default+builtin [compile: 0.018s run: 0.384s total: 0.442s] +[ RUN ] T1 @generic:default+builtin +[ OK ] (4/5) T1 @generic:default+builtin [compile: 0.018s run: 0.452s total: 0.494s] +[ RUN ] T6 @generic:default+builtin +[ OK ] (5/5) T6 @generic:default+builtin [compile: 0.018s run: 0.525s total: 0.582s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 5/5 test case(s) from 5 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Sat Jan 22 23:44:28 2022 +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-ktylyaqk.log' diff --git a/docs/listings/hello1.txt b/docs/listings/hello1.txt new file mode 100644 index 0000000000..ebc7eabcf5 --- /dev/null +++ b/docs/listings/hello1.txt @@ -0,0 +1,22 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+c22440c1 + command: './bin/reframe -c tutorials/basics/hello/hello1.py -r' + launched by: user@host + working directory: '/path/to/reframe' + settings file: '' + check search path: '/path/to/reframe/tutorials/basics/hello/hello1.py' + stage directory: '/path/to/reframe/stage' + output directory: '/path/to/reframe/output' + +[==========] Running 1 check(s) +[==========] Started on Sat Jan 22 13:21:50 2022 + +[----------] start processing checks +[ RUN ] HelloTest @generic:default+builtin +[ OK ] (1/1) HelloTest @generic:default+builtin [compile: 0.272s run: 0.359s total: 0.784s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 1/1 test case(s) from 1 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Sat Jan 22 13:21:51 2022 +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-8c6ybdvg.log' diff --git a/docs/listings/hello2.txt b/docs/listings/hello2.txt new file mode 100644 index 0000000000..8056ae9797 --- /dev/null +++ b/docs/listings/hello2.txt @@ -0,0 +1,44 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+c22440c1 + command: './bin/reframe -c tutorials/basics/hello/hello2.py -r' + launched by: user@host + working directory: '/path/to/reframe' + settings file: '' + check search path: '/path/to/reframe/tutorials/basics/hello/hello2.py' + stage directory: '/path/to/reframe/stage' + output directory: '/path/to/reframe/output' + +[==========] Running 2 check(s) +[==========] Started on Sat Jan 22 13:21:51 2022 + +[----------] start processing checks +[ RUN ] HelloMultiLangTest %lang=cpp @generic:default+builtin +[ RUN ] HelloMultiLangTest %lang=c @generic:default+builtin +[ FAIL ] (1/2) HelloMultiLangTest %lang=cpp @generic:default+builtin [compile: 0.006s run: n/a total: 0.043s] +==> test failed during 'compile': test staged in '/path/to/reframe/stage/generic/default/builtin/HelloMultiLangTest_cpp' +[ OK ] (2/2) HelloMultiLangTest %lang=c @generic:default+builtin [compile: 0.268s run: 0.368s total: 0.813s] +[----------] all spawned checks have finished + +[ FAILED ] Ran 2/2 test case(s) from 2 check(s) (1 failure(s), 0 skipped) +[==========] Finished on Sat Jan 22 13:21:52 2022 + +============================================================================== +SUMMARY OF FAILURES +------------------------------------------------------------------------------ +FAILURE INFO for HelloMultiLangTest_cpp + * Expanded name: HelloMultiLangTest %lang=cpp + * Description: HelloMultiLangTest %lang=cpp + * System partition: generic:default + * Environment: builtin + * Stage directory: /path/to/reframe/stage/generic/default/builtin/HelloMultiLangTest_cpp + * Node list: + * Job type: local (id=None) + * Dependencies (conceptual): [] + * Dependencies (actual): [] + * Maintainers: [] + * Failing phase: compile + * Rerun with '-n HelloMultiLangTest_cpp -p builtin --system generic:default -r' + * Reason: build system error: I do not know how to compile a C++ program +------------------------------------------------------------------------------ +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-tse_opq0.log' diff --git a/docs/listings/hello2_catalina.txt b/docs/listings/hello2_catalina.txt new file mode 100644 index 0000000000..36ca1a3dd3 --- /dev/null +++ b/docs/listings/hello2_catalina.txt @@ -0,0 +1,28 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+c22440c1 + command: './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -r' + launched by: user@host + working directory: '/path/to/reframe' + settings file: 'tutorials/config/settings.py' + check search path: '/path/to/reframe/tutorials/basics/hello/hello2.py' + stage directory: '/path/to/reframe/stage' + output directory: '/path/to/reframe/output' + +[==========] Running 2 check(s) +[==========] Started on Sat Jan 22 13:21:53 2022 + +[----------] start processing checks +[ RUN ] HelloMultiLangTest %lang=cpp @catalina:default+gnu +[ RUN ] HelloMultiLangTest %lang=cpp @catalina:default+clang +[ RUN ] HelloMultiLangTest %lang=c @catalina:default+gnu +[ RUN ] HelloMultiLangTest %lang=c @catalina:default+clang +[ OK ] (1/4) HelloMultiLangTest %lang=c @catalina:default+gnu [compile: 0.360s run: 0.511s total: 1.135s] +[ OK ] (2/4) HelloMultiLangTest %lang=c @catalina:default+clang [compile: 0.359s run: 0.514s total: 1.139s] +[ OK ] (3/4) HelloMultiLangTest %lang=cpp @catalina:default+gnu [compile: 0.563s run: 0.549s total: 1.343s] +[ OK ] (4/4) HelloMultiLangTest %lang=cpp @catalina:default+clang [compile: 0.564s run: 0.551s total: 1.346s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 4/4 test case(s) from 2 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Sat Jan 22 13:21:54 2022 +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-iehz9eub.log' diff --git a/docs/listings/hello2_list_verbose.txt b/docs/listings/hello2_list_verbose.txt new file mode 100644 index 0000000000..b334a9d0be --- /dev/null +++ b/docs/listings/hello2_list_verbose.txt @@ -0,0 +1,82 @@ +Loading user configuration +Loading configuration file: 'tutorials/config/settings.py' +Detecting system +Looking for a matching configuration entry for system 'host' +Configuration found: picking system 'generic' +Selecting subconfig for 'generic' +Initializing runtime +Selecting subconfig for 'generic:default' +Initializing system partition 'default' +Selecting subconfig for 'generic' +Initializing system 'generic' +Initializing modules system 'nomod' +detecting topology info for generic:default +> found topology file '/home/user/.reframe/topology/generic-default/processor.json'; loading... +> device auto-detection is not supported +[ReFrame Environment] + RFM_CHECK_SEARCH_PATH= + RFM_CHECK_SEARCH_RECURSIVE= + RFM_CLEAN_STAGEDIR= + RFM_COLORIZE=n + RFM_COMPACT_TEST_NAMES=n + RFM_CONFIG_FILE= + RFM_DUMP_PIPELINE_PROGRESS= + RFM_GIT_TIMEOUT= + RFM_GRAYLOG_ADDRESS= + RFM_HTTPJSON_URL= + RFM_IGNORE_CHECK_CONFLICTS= + RFM_IGNORE_REQNODENOTAVAIL= + RFM_INSTALL_PREFIX=/home/user/Repositories/reframe + RFM_KEEP_STAGE_FILES= + RFM_MODULE_MAPPINGS= + RFM_MODULE_MAP_FILE= + RFM_NON_DEFAULT_CRAYPE= + RFM_OUTPUT_DIR= + RFM_PERFLOG_DIR= + RFM_PIPELINE_TIMEOUT= + RFM_PREFIX= + RFM_PURGE_ENVIRONMENT= + RFM_REMOTE_DETECT= + RFM_REMOTE_WORKDIR= + RFM_REPORT_FILE= + RFM_REPORT_JUNIT= + RFM_RESOLVE_MODULE_CONFLICTS= + RFM_SAVE_LOG_FILES= + RFM_STAGE_DIR= + RFM_SYSLOG_ADDRESS= + RFM_SYSTEM= + RFM_TIMESTAMP_DIRS= + RFM_TRAP_JOB_ERRORS= + RFM_UNLOAD_MODULES= + RFM_USER_MODULES= + RFM_USE_LOGIN_SHELL= + RFM_VERBOSE= +[ReFrame Setup] + version: 3.10.0-dev.3+149af549 + command: './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -l -vv' + launched by: user@host + working directory: '/home/user/Repositories/reframe' + settings file: 'tutorials/config/settings.py' + check search path: '/home/user/Repositories/reframe/tutorials/basics/hello/hello2.py' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' + +Looking for tests in '/home/user/Repositories/reframe/tutorials/basics/hello/hello2.py' +Validating '/home/user/Repositories/reframe/tutorials/basics/hello/hello2.py': OK + > Loaded 2 test(s) +Loaded 2 test(s) +Generated 2 test case(s) +Filtering test cases(s) by name: 2 remaining +Filtering test cases(s) by tags: 2 remaining +Filtering test cases(s) by other attributes: 2 remaining +Building and validating the full test DAG +Full test DAG: + ('HelloMultiLangTest_cpp', 'generic:default', 'builtin') -> [] + ('HelloMultiLangTest_c', 'generic:default', 'builtin') -> [] +Final number of test cases: 2 +[List of matched checks] +- HelloMultiLangTest %lang=cpp +- HelloMultiLangTest %lang=c +Found 2 check(s) + +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-fs1arce0.log' diff --git a/docs/listings/hello2_print_stdout.txt b/docs/listings/hello2_print_stdout.txt new file mode 100644 index 0000000000..75e1426db0 --- /dev/null +++ b/docs/listings/hello2_print_stdout.txt @@ -0,0 +1,32 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+149af549 + command: './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -r' + launched by: user@host + working directory: '/home/user/Repositories/reframe' + settings file: 'tutorials/config/settings.py' + check search path: '/home/user/Repositories/reframe/tutorials/basics/hello/hello2.py' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' + +[==========] Running 2 check(s) +[==========] Started on Sun Jan 23 00:11:07 2022 + +[----------] start processing checks +[ RUN ] HelloMultiLangTest %lang=cpp @catalina:default+gnu +[ RUN ] HelloMultiLangTest %lang=cpp @catalina:default+clang +[ RUN ] HelloMultiLangTest %lang=c @catalina:default+gnu +[ RUN ] HelloMultiLangTest %lang=c @catalina:default+clang +rfm_HelloMultiLangTest_cpp_job.out +[ OK ] (1/4) HelloMultiLangTest %lang=cpp @catalina:default+gnu [compile: 0.737s run: 0.748s total: 1.765s] +rfm_HelloMultiLangTest_cpp_job.out +[ OK ] (2/4) HelloMultiLangTest %lang=cpp @catalina:default+clang [compile: 0.735s run: 0.909s total: 1.928s] +rfm_HelloMultiLangTest_c_job.out +[ OK ] (3/4) HelloMultiLangTest %lang=c @catalina:default+gnu [compile: 0.719s run: 1.072s total: 2.090s] +rfm_HelloMultiLangTest_c_job.out +[ OK ] (4/4) HelloMultiLangTest %lang=c @catalina:default+clang [compile: 0.714s run: 1.074s total: 2.094s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 4/4 test case(s) from 2 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Sun Jan 23 00:11:10 2022 +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-jumlrg66.log' diff --git a/docs/listings/hello2_typo.txt b/docs/listings/hello2_typo.txt new file mode 100644 index 0000000000..282bba2d50 --- /dev/null +++ b/docs/listings/hello2_typo.txt @@ -0,0 +1,18 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+149af549 + command: './bin/reframe -c tutorials/basics/hello -R -l' + launched by: user@host + working directory: '/home/user/Repositories/reframe' + settings file: '' + check search path: (R) '/home/user/Repositories/reframe/tutorials/basics/hello' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' + +./bin/reframe: skipping test file '/home/user/Repositories/reframe/tutorials/basics/hello/hello2.py': name error: tutorials/basics/hello/hello2.py:13: name 'paramter' is not defined + lang = paramter(['c', 'cpp']) + (rerun with '-v' for more information) +[List of matched checks] +- HelloTest +Found 1 check(s) + +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-bzqy3nc7.log' diff --git a/docs/listings/hello2_typo_stacktrace.txt b/docs/listings/hello2_typo_stacktrace.txt new file mode 100644 index 0000000000..b530626c20 --- /dev/null +++ b/docs/listings/hello2_typo_stacktrace.txt @@ -0,0 +1,43 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+149af549 + command: './bin/reframe -c tutorials/basics/hello -R -l -v' + launched by: user@host + working directory: '/home/user/Repositories/reframe' + settings file: '' + check search path: (R) '/home/user/Repositories/reframe/tutorials/basics/hello' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' + +./bin/reframe: skipping test file '/home/user/Repositories/reframe/tutorials/basics/hello/hello2.py': name error: tutorials/basics/hello/hello2.py:13: name 'paramter' is not defined + lang = paramter(['c', 'cpp']) + (rerun with '-v' for more information) +Traceback (most recent call last): + File "/home/user/Repositories/reframe/reframe/frontend/loader.py", line 237, in load_from_file + util.import_module_from_file(filename, force) + File "/home/user/Repositories/reframe/reframe/utility/__init__.py", line 109, in import_module_from_file + return importlib.import_module(module_name) + File "/usr/local/Cellar/python@3.9/3.9.1_6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/importlib/__init__.py", line 127, in import_module + return _bootstrap._gcd_import(name[level:], package, level) + File "", line 1030, in _gcd_import + File "", line 1007, in _find_and_load + File "", line 986, in _find_and_load_unlocked + File "", line 680, in _load_unlocked + File "", line 790, in exec_module + File "", line 228, in _call_with_frames_removed + File "/home/user/Repositories/reframe/tutorials/basics/hello/hello2.py", line 12, in + class HelloMultiLangTest(rfm.RegressionTest): + File "/home/user/Repositories/reframe/tutorials/basics/hello/hello2.py", line 13, in HelloMultiLangTest + lang = paramter(['c', 'cpp']) +NameError: name 'paramter' is not defined + +Loaded 1 test(s) +Generated 1 test case(s) +Filtering test cases(s) by name: 1 remaining +Filtering test cases(s) by tags: 1 remaining +Filtering test cases(s) by other attributes: 1 remaining +Final number of test cases: 1 +[List of matched checks] +- HelloTest +Found 1 check(s) + +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-l21cjjas.log' diff --git a/docs/listings/hello2_verbose_load.txt b/docs/listings/hello2_verbose_load.txt new file mode 100644 index 0000000000..b97ee20a93 --- /dev/null +++ b/docs/listings/hello2_verbose_load.txt @@ -0,0 +1,80 @@ +Loading user configuration +Loading configuration file: 'tutorials/config/settings.py' +Detecting system +Looking for a matching configuration entry for system '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa' +Configuration found: picking system 'generic' +Selecting subconfig for 'generic' +Initializing runtime +Selecting subconfig for 'generic:default' +Initializing system partition 'default' +Selecting subconfig for 'generic' +Initializing system 'generic' +Initializing modules system 'nomod' +detecting topology info for generic:default +> found topology file '/Users/user/.reframe/topology/generic-default/processor.json'; loading... +> device auto-detection is not supported +[ReFrame Environment] + RFM_CHECK_SEARCH_PATH= + RFM_CHECK_SEARCH_RECURSIVE= + RFM_CLEAN_STAGEDIR= + RFM_COLORIZE= + RFM_COMPACT_TEST_NAMES= + RFM_CONFIG_FILE=tutorials/config/settings.py + RFM_GIT_TIMEOUT= + RFM_GRAYLOG_ADDRESS= + RFM_HTTPJSON_URL= + RFM_IGNORE_CHECK_CONFLICTS= + RFM_IGNORE_REQNODENOTAVAIL= + RFM_INSTALL_PREFIX=/Users/user/Repositories/reframe + RFM_KEEP_STAGE_FILES= + RFM_MODULE_MAPPINGS= + RFM_MODULE_MAP_FILE= + RFM_NON_DEFAULT_CRAYPE= + RFM_OUTPUT_DIR= + RFM_PERFLOG_DIR= + RFM_PREFIX= + RFM_PURGE_ENVIRONMENT= + RFM_REMOTE_DETECT= + RFM_REMOTE_WORKDIR= + RFM_REPORT_FILE= + RFM_REPORT_JUNIT= + RFM_RESOLVE_MODULE_CONFLICTS= + RFM_SAVE_LOG_FILES= + RFM_STAGE_DIR= + RFM_SYSLOG_ADDRESS= + RFM_SYSTEM= + RFM_TIMESTAMP_DIRS= + RFM_TRAP_JOB_ERRORS= + RFM_UNLOAD_MODULES= + RFM_USER_MODULES= + RFM_USE_LOGIN_SHELL= + RFM_VERBOSE= +[ReFrame Setup] + version: 3.10.0-dev.2+cb5edd8b + command: './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -l -vv' + launched by: user@host + working directory: '/Users/user/Repositories/reframe' + settings file: 'tutorials/config/settings.py' + check search path: '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py' + stage directory: '/Users/user/Repositories/reframe/stage' + output directory: '/Users/user/Repositories/reframe/output' + +Looking for tests in '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py' +Validating '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py': OK + > Loaded 2 test(s) +Loaded 2 test(s) +Generated 2 test case(s) +Filtering test cases(s) by name: 2 remaining +Filtering test cases(s) by tags: 2 remaining +Filtering test cases(s) by other attributes: 2 remaining +Building and validating the full test DAG +Full test DAG: + ('HelloMultiLangTest_cpp', 'generic:default', 'builtin') -> [] + ('HelloMultiLangTest_c', 'generic:default', 'builtin') -> [] +Final number of test cases: 2 +[List of matched checks] +- HelloMultiLangTest %lang=cpp +- HelloMultiLangTest %lang=c +Found 2 check(s) + +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-fpjj5gru.log' diff --git a/docs/listings/hellomp1.txt b/docs/listings/hellomp1.txt new file mode 100644 index 0000000000..b032b8decf --- /dev/null +++ b/docs/listings/hellomp1.txt @@ -0,0 +1,24 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+c22440c1 + command: './bin/reframe -c tutorials/basics/hellomp/hellomp1.py -r' + launched by: user@host + working directory: '/path/to/reframe' + settings file: '/path/to/reframe/tutorials/config/settings.py' + check search path: '/path/to/reframe/tutorials/basics/hellomp/hellomp1.py' + stage directory: '/path/to/reframe/stage' + output directory: '/path/to/reframe/output' + +[==========] Running 1 check(s) +[==========] Started on Sat Jan 22 13:21:54 2022 + +[----------] start processing checks +[ RUN ] HelloThreadedTest @catalina:default+gnu +[ RUN ] HelloThreadedTest @catalina:default+clang +[ OK ] (1/2) HelloThreadedTest @catalina:default+gnu [compile: 0.963s run: 0.296s total: 1.418s] +[ OK ] (2/2) HelloThreadedTest @catalina:default+clang [compile: 0.760s run: 0.434s total: 1.421s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 2/2 test case(s) from 1 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Sat Jan 22 13:21:56 2022 +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-chq08zds.log' diff --git a/docs/listings/hellomp2.txt b/docs/listings/hellomp2.txt new file mode 100644 index 0000000000..c8a4b3918e --- /dev/null +++ b/docs/listings/hellomp2.txt @@ -0,0 +1,60 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+c22440c1 + command: './bin/reframe -c tutorials/basics/hellomp/hellomp2.py -r' + launched by: user@host + working directory: '/path/to/reframe' + settings file: '/path/to/reframe/tutorials/config/settings.py' + check search path: '/path/to/reframe/tutorials/basics/hellomp/hellomp2.py' + stage directory: '/path/to/reframe/stage' + output directory: '/path/to/reframe/output' + +[==========] Running 1 check(s) +[==========] Started on Sat Jan 22 13:21:56 2022 + +[----------] start processing checks +[ RUN ] HelloThreadedExtendedTest @catalina:default+gnu +[ RUN ] HelloThreadedExtendedTest @catalina:default+clang +[ FAIL ] (1/2) HelloThreadedExtendedTest @catalina:default+clang [compile: 0.761s run: 0.413s total: 1.401s] +==> test failed during 'sanity': test staged in '/path/to/reframe/stage/catalina/default/clang/HelloThreadedExtendedTest' +[ FAIL ] (2/2) HelloThreadedExtendedTest @catalina:default+gnu [compile: 0.962s run: 0.412s total: 1.538s] +==> test failed during 'sanity': test staged in '/path/to/reframe/stage/catalina/default/gnu/HelloThreadedExtendedTest' +[----------] all spawned checks have finished + +[ FAILED ] Ran 2/2 test case(s) from 1 check(s) (2 failure(s), 0 skipped) +[==========] Finished on Sat Jan 22 13:21:58 2022 + +============================================================================== +SUMMARY OF FAILURES +------------------------------------------------------------------------------ +FAILURE INFO for HelloThreadedExtendedTest + * Expanded name: HelloThreadedExtendedTest + * Description: HelloThreadedExtendedTest + * System partition: catalina:default + * Environment: gnu + * Stage directory: /path/to/reframe/stage/catalina/default/gnu/HelloThreadedExtendedTest + * Node list: tresa.localNone + * Job type: local (id=43387) + * Dependencies (conceptual): [] + * Dependencies (actual): [] + * Maintainers: [] + * Failing phase: sanity + * Rerun with '-n HelloThreadedExtendedTest -p gnu --system catalina:default -r' + * Reason: sanity error: 7 != 16 +------------------------------------------------------------------------------ +FAILURE INFO for HelloThreadedExtendedTest + * Expanded name: HelloThreadedExtendedTest + * Description: HelloThreadedExtendedTest + * System partition: catalina:default + * Environment: clang + * Stage directory: /path/to/reframe/stage/catalina/default/clang/HelloThreadedExtendedTest + * Node list: tresa.localNone + * Job type: local (id=43384) + * Dependencies (conceptual): [] + * Dependencies (actual): [] + * Maintainers: [] + * Failing phase: sanity + * Rerun with '-n HelloThreadedExtendedTest -p clang --system catalina:default -r' + * Reason: sanity error: 11 != 16 +------------------------------------------------------------------------------ +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-31lkxfie.log' diff --git a/docs/listings/maketest_mixin.txt b/docs/listings/maketest_mixin.txt new file mode 100644 index 0000000000..6af25537cd --- /dev/null +++ b/docs/listings/maketest_mixin.txt @@ -0,0 +1,18 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+4fc5b12c + command: './bin/reframe -c tutorials/advanced/makefiles/maketest_mixin.py -l' + launched by: user@host + working directory: '/home/user/Repositories/reframe' + settings file: '/home/user/Repositories/reframe/tutorials/config/settings.py' + check search path: '/home/user/Repositories/reframe/tutorials/advanced/makefiles/maketest_mixin.py' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' + +[List of matched checks] +- MakeOnlyTestAlt %elem_type=double +- MakeOnlyTestAlt %elem_type=float +- MakefileTestAlt %elem_type=double +- MakefileTestAlt %elem_type=float +Found 4 check(s) + +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-4w95t2wt.log' diff --git a/docs/listings/osu_bandwidth_concretized_daint.txt b/docs/listings/osu_bandwidth_concretized_daint.txt new file mode 100644 index 0000000000..de00dc2e9f --- /dev/null +++ b/docs/listings/osu_bandwidth_concretized_daint.txt @@ -0,0 +1,23 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+605af31a + command: './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC' + launched by: user@host + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' + +[List of matched checks] +- osu_bandwidth_test @daint:gpu+gnu + ^build_osu_benchmarks ~daint:gpu+gnu @daint:gpu+gnu + ^fetch_osu_benchmarks ~daint @daint:gpu+gnu +- osu_bandwidth_test @daint:gpu+intel + ^build_osu_benchmarks ~daint:gpu+intel @daint:gpu+intel + ^fetch_osu_benchmarks ~daint @daint:gpu+gnu +- osu_bandwidth_test @daint:gpu+pgi + ^build_osu_benchmarks ~daint:gpu+pgi @daint:gpu+pgi + ^fetch_osu_benchmarks ~daint @daint:gpu+gnu +Concretized 7 test case(s) + +Log file(s) saved in '/tmp/rfm-uza91jj1.log' diff --git a/docs/listings/osu_bandwidth_concretized_daint_pgi.txt b/docs/listings/osu_bandwidth_concretized_daint_pgi.txt new file mode 100644 index 0000000000..ab6c51f915 --- /dev/null +++ b/docs/listings/osu_bandwidth_concretized_daint_pgi.txt @@ -0,0 +1,17 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+605af31a + command: './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC -p pgi' + launched by: user@host + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' + +[List of matched checks] +- osu_bandwidth_test @daint:gpu+pgi + ^build_osu_benchmarks ~daint:gpu+pgi @daint:gpu+pgi + ^fetch_osu_benchmarks ~daint @daint:gpu+pgi +Concretized 3 test case(s) + +Log file(s) saved in '/tmp/rfm-dnfdagj8.log' diff --git a/docs/listings/osu_bench_deps.txt b/docs/listings/osu_bench_deps.txt new file mode 100644 index 0000000000..4f54bba5a8 --- /dev/null +++ b/docs/listings/osu_bench_deps.txt @@ -0,0 +1,64 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+605af31a + command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -r' + launched by: user@host + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' + +[==========] Running 8 check(s) +[==========] Started on Sat Jan 22 22:49:00 2022 + +[----------] start processing checks +[ RUN ] OSUDownloadTest @daint:login+builtin +[ OK ] ( 1/22) OSUDownloadTest @daint:login+builtin [compile: 0.017s run: 1.547s total: 1.594s] +[ RUN ] OSUBuildTest @daint:gpu+gnu +[ RUN ] OSUBuildTest @daint:gpu+intel +[ RUN ] OSUBuildTest @daint:gpu+pgi +[ OK ] ( 2/22) OSUBuildTest @daint:gpu+gnu [compile: 28.351s run: 2.614s total: 31.045s] +[ RUN ] OSUAllreduceTest %mpi_tasks=16 @daint:gpu+gnu +[ RUN ] OSUAllreduceTest %mpi_tasks=8 @daint:gpu+gnu +[ RUN ] OSUAllreduceTest %mpi_tasks=4 @daint:gpu+gnu +[ RUN ] OSUAllreduceTest %mpi_tasks=2 @daint:gpu+gnu +[ RUN ] OSUBandwidthTest @daint:gpu+gnu +[ RUN ] OSULatencyTest @daint:gpu+gnu +[ OK ] ( 3/22) OSUBuildTest @daint:gpu+intel [compile: 56.259s run: 0.294s total: 57.548s] +[ OK ] ( 4/22) OSUBuildTest @daint:gpu+pgi [compile: 55.287s run: 0.274s total: 57.549s] +[ RUN ] OSUAllreduceTest %mpi_tasks=16 @daint:gpu+intel +[ RUN ] OSUAllreduceTest %mpi_tasks=16 @daint:gpu+pgi +[ RUN ] OSUAllreduceTest %mpi_tasks=8 @daint:gpu+intel +[ RUN ] OSUAllreduceTest %mpi_tasks=8 @daint:gpu+pgi +[ RUN ] OSUAllreduceTest %mpi_tasks=4 @daint:gpu+intel +[ RUN ] OSUAllreduceTest %mpi_tasks=4 @daint:gpu+pgi +[ RUN ] OSUAllreduceTest %mpi_tasks=2 @daint:gpu+intel +[ RUN ] OSUAllreduceTest %mpi_tasks=2 @daint:gpu+pgi +[ RUN ] OSUBandwidthTest @daint:gpu+intel +[ RUN ] OSUBandwidthTest @daint:gpu+pgi +[ RUN ] OSULatencyTest @daint:gpu+intel +[ RUN ] OSULatencyTest @daint:gpu+pgi +[ OK ] ( 5/22) OSUAllreduceTest %mpi_tasks=8 @daint:gpu+gnu [compile: 0.019s run: 62.714s total: 66.672s] +[ OK ] ( 6/22) OSUAllreduceTest %mpi_tasks=16 @daint:gpu+gnu [compile: 0.021s run: 66.653s total: 67.092s] +[ OK ] ( 7/22) OSUAllreduceTest %mpi_tasks=4 @daint:gpu+gnu [compile: 0.019s run: 59.875s total: 67.058s] +[ OK ] ( 8/22) OSULatencyTest @daint:gpu+gnu [compile: 0.022s run: 81.297s total: 102.720s] +[ OK ] ( 9/22) OSUAllreduceTest %mpi_tasks=2 @daint:gpu+gnu [compile: 0.023s run: 97.213s total: 107.661s] +[ OK ] (10/22) OSUAllreduceTest %mpi_tasks=16 @daint:gpu+intel [compile: 0.017s run: 80.743s total: 81.586s] +[ OK ] (11/22) OSUAllreduceTest %mpi_tasks=16 @daint:gpu+pgi [compile: 0.017s run: 141.746s total: 145.957s] +[ OK ] (12/22) OSUAllreduceTest %mpi_tasks=8 @daint:gpu+intel [compile: 0.016s run: 138.667s total: 145.944s] +[ OK ] (13/22) OSUAllreduceTest %mpi_tasks=8 @daint:gpu+pgi [compile: 0.017s run: 135.257s total: 145.938s] +[ OK ] (14/22) OSUBandwidthTest @daint:gpu+gnu [compile: 0.034s run: 156.112s total: 172.474s] +[ OK ] (15/22) OSUAllreduceTest %mpi_tasks=4 @daint:gpu+intel [compile: 0.017s run: 173.876s total: 187.629s] +[ OK ] (16/22) OSUAllreduceTest %mpi_tasks=2 @daint:gpu+pgi [compile: 0.016s run: 171.544s total: 194.752s] +[ OK ] (17/22) OSUAllreduceTest %mpi_tasks=2 @daint:gpu+intel [compile: 0.017s run: 175.095s total: 195.082s] +[ OK ] (18/22) OSULatencyTest @daint:gpu+pgi [compile: 0.017s run: 159.422s total: 195.672s] +[ OK ] (19/22) OSULatencyTest @daint:gpu+intel [compile: 0.017s run: 163.070s total: 196.207s] +[ OK ] (20/22) OSUAllreduceTest %mpi_tasks=4 @daint:gpu+pgi [compile: 0.016s run: 180.370s total: 197.379s] +[ OK ] (21/22) OSUBandwidthTest @daint:gpu+intel [compile: 0.017s run: 240.385s total: 266.772s] +[ OK ] (22/22) OSUBandwidthTest @daint:gpu+pgi [compile: 0.018s run: 236.944s total: 266.766s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 22/22 test case(s) from 8 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Sat Jan 22 22:54:26 2022 +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/tmp/rfm-15ghvao1.log' diff --git a/docs/listings/osu_bench_fixtures_list.txt b/docs/listings/osu_bench_fixtures_list.txt new file mode 100644 index 0000000000..c863cdcc63 --- /dev/null +++ b/docs/listings/osu_bench_fixtures_list.txt @@ -0,0 +1,56 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+605af31a + command: './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -l' + launched by: user@host + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' + +[List of matched checks] +- osu_allreduce_test %mpi_tasks=16 + ^build_osu_benchmarks ~daint:gpu+gnu + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+intel + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+pgi + ^fetch_osu_benchmarks ~daint +- osu_allreduce_test %mpi_tasks=8 + ^build_osu_benchmarks ~daint:gpu+gnu + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+intel + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+pgi + ^fetch_osu_benchmarks ~daint +- osu_allreduce_test %mpi_tasks=4 + ^build_osu_benchmarks ~daint:gpu+gnu + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+intel + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+pgi + ^fetch_osu_benchmarks ~daint +- osu_allreduce_test %mpi_tasks=2 + ^build_osu_benchmarks ~daint:gpu+gnu + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+intel + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+pgi + ^fetch_osu_benchmarks ~daint +- osu_bandwidth_test + ^build_osu_benchmarks ~daint:gpu+gnu + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+intel + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+pgi + ^fetch_osu_benchmarks ~daint +- osu_latency_test + ^build_osu_benchmarks ~daint:gpu+gnu + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+intel + ^fetch_osu_benchmarks ~daint + ^build_osu_benchmarks ~daint:gpu+pgi + ^fetch_osu_benchmarks ~daint +Found 6 check(s) + +Log file(s) saved in '/tmp/rfm-eopdze64.log' diff --git a/docs/listings/osu_bench_fixtures_run.txt b/docs/listings/osu_bench_fixtures_run.txt new file mode 100644 index 0000000000..1f8851671c --- /dev/null +++ b/docs/listings/osu_bench_fixtures_run.txt @@ -0,0 +1,64 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+76e02667 + command: './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -r' + launched by: user@host + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' + +[==========] Running 10 check(s) +[==========] Started on Sat Jan 22 23:08:13 2022 + +[----------] start processing checks +[ RUN ] fetch_osu_benchmarks ~daint @daint:gpu+gnu +[ OK ] ( 1/22) fetch_osu_benchmarks ~daint @daint:gpu+gnu [compile: 0.016s run: 2.757s total: 2.807s] +[ RUN ] build_osu_benchmarks ~daint:gpu+gnu @daint:gpu+gnu +[ RUN ] build_osu_benchmarks ~daint:gpu+intel @daint:gpu+intel +[ RUN ] build_osu_benchmarks ~daint:gpu+pgi @daint:gpu+pgi +[ OK ] ( 2/22) build_osu_benchmarks ~daint:gpu+gnu @daint:gpu+gnu [compile: 25.384s run: 2.389s total: 27.839s] +[ RUN ] osu_allreduce_test %mpi_tasks=16 @daint:gpu+gnu +[ RUN ] osu_allreduce_test %mpi_tasks=8 @daint:gpu+gnu +[ RUN ] osu_allreduce_test %mpi_tasks=4 @daint:gpu+gnu +[ RUN ] osu_allreduce_test %mpi_tasks=2 @daint:gpu+gnu +[ RUN ] osu_bandwidth_test @daint:gpu+gnu +[ RUN ] osu_latency_test @daint:gpu+gnu +[ OK ] ( 3/22) build_osu_benchmarks ~daint:gpu+intel @daint:gpu+intel [compile: 47.774s run: 0.313s total: 48.758s] +[ OK ] ( 4/22) build_osu_benchmarks ~daint:gpu+pgi @daint:gpu+pgi [compile: 47.127s run: 0.297s total: 48.765s] +[ RUN ] osu_allreduce_test %mpi_tasks=16 @daint:gpu+intel +[ RUN ] osu_allreduce_test %mpi_tasks=16 @daint:gpu+pgi +[ RUN ] osu_allreduce_test %mpi_tasks=8 @daint:gpu+intel +[ RUN ] osu_allreduce_test %mpi_tasks=8 @daint:gpu+pgi +[ RUN ] osu_allreduce_test %mpi_tasks=4 @daint:gpu+intel +[ RUN ] osu_allreduce_test %mpi_tasks=4 @daint:gpu+pgi +[ RUN ] osu_allreduce_test %mpi_tasks=2 @daint:gpu+intel +[ RUN ] osu_allreduce_test %mpi_tasks=2 @daint:gpu+pgi +[ RUN ] osu_bandwidth_test @daint:gpu+intel +[ RUN ] osu_bandwidth_test @daint:gpu+pgi +[ RUN ] osu_latency_test @daint:gpu+intel +[ RUN ] osu_latency_test @daint:gpu+pgi +[ OK ] ( 5/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+gnu [compile: 0.022s run: 63.846s total: 64.319s] +[ OK ] ( 6/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+gnu [compile: 0.024s run: 56.997s total: 64.302s] +[ OK ] ( 7/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+gnu [compile: 0.024s run: 56.187s total: 66.616s] +[ OK ] ( 8/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+gnu [compile: 0.026s run: 82.220s total: 86.255s] +[ OK ] ( 9/22) osu_bandwidth_test @daint:gpu+gnu [compile: 0.023s run: 128.535s total: 142.154s] +[ OK ] (10/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+pgi [compile: 0.023s run: 168.876s total: 185.476s] +[ OK ] (11/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+intel [compile: 0.020s run: 165.312s total: 185.461s] +[ OK ] (12/22) osu_allreduce_test %mpi_tasks=4 @daint:gpu+intel [compile: 0.019s run: 172.593s total: 186.044s] +[ OK ] (13/22) osu_allreduce_test %mpi_tasks=2 @daint:gpu+pgi [compile: 0.019s run: 162.499s total: 185.942s] +[ OK ] (14/22) osu_latency_test @daint:gpu+intel [compile: 0.020s run: 152.867s total: 185.853s] +[ OK ] (15/22) osu_latency_test @daint:gpu+pgi [compile: 0.020s run: 149.662s total: 185.853s] +[ OK ] (16/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+intel [compile: 0.020s run: 207.009s total: 207.831s] +[ OK ] (17/22) osu_allreduce_test %mpi_tasks=16 @daint:gpu+pgi [compile: 0.019s run: 203.753s total: 207.829s] +[ OK ] (18/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+pgi [compile: 0.019s run: 197.421s total: 207.783s] +[ OK ] (19/22) osu_latency_test @daint:gpu+gnu [compile: 0.024s run: 218.130s total: 234.892s] +[ OK ] (20/22) osu_bandwidth_test @daint:gpu+intel [compile: 0.020s run: 218.457s total: 244.995s] +[ OK ] (21/22) osu_bandwidth_test @daint:gpu+pgi [compile: 0.020s run: 215.273s total: 244.992s] +[ OK ] (22/22) osu_allreduce_test %mpi_tasks=8 @daint:gpu+intel [compile: 0.020s run: 267.367s total: 274.584s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 22/22 test case(s) from 10 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Sat Jan 22 23:13:40 2022 +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/tmp/rfm-6gbw7qzs.log' diff --git a/docs/listings/osu_bench_list_concretized.txt b/docs/listings/osu_bench_list_concretized.txt new file mode 100644 index 0000000000..cd4482f761 --- /dev/null +++ b/docs/listings/osu_bench_list_concretized.txt @@ -0,0 +1,68 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+605af31a + command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -lC' + launched by: user@host + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' + +[List of matched checks] +- OSUAllreduceTest %mpi_tasks=16 @daint:gpu+gnu + ^OSUBuildTest @daint:gpu+gnu + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=16 @daint:gpu+intel + ^OSUBuildTest @daint:gpu+intel + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=16 @daint:gpu+pgi + ^OSUBuildTest @daint:gpu+pgi + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=8 @daint:gpu+gnu + ^OSUBuildTest @daint:gpu+gnu + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=8 @daint:gpu+intel + ^OSUBuildTest @daint:gpu+intel + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=8 @daint:gpu+pgi + ^OSUBuildTest @daint:gpu+pgi + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=4 @daint:gpu+gnu + ^OSUBuildTest @daint:gpu+gnu + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=4 @daint:gpu+intel + ^OSUBuildTest @daint:gpu+intel + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=4 @daint:gpu+pgi + ^OSUBuildTest @daint:gpu+pgi + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=2 @daint:gpu+gnu + ^OSUBuildTest @daint:gpu+gnu + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=2 @daint:gpu+intel + ^OSUBuildTest @daint:gpu+intel + ^OSUDownloadTest @daint:login+builtin +- OSUAllreduceTest %mpi_tasks=2 @daint:gpu+pgi + ^OSUBuildTest @daint:gpu+pgi + ^OSUDownloadTest @daint:login+builtin +- OSUBandwidthTest @daint:gpu+gnu + ^OSUBuildTest @daint:gpu+gnu + ^OSUDownloadTest @daint:login+builtin +- OSUBandwidthTest @daint:gpu+intel + ^OSUBuildTest @daint:gpu+intel + ^OSUDownloadTest @daint:login+builtin +- OSUBandwidthTest @daint:gpu+pgi + ^OSUBuildTest @daint:gpu+pgi + ^OSUDownloadTest @daint:login+builtin +- OSULatencyTest @daint:gpu+gnu + ^OSUBuildTest @daint:gpu+gnu + ^OSUDownloadTest @daint:login+builtin +- OSULatencyTest @daint:gpu+intel + ^OSUBuildTest @daint:gpu+intel + ^OSUDownloadTest @daint:login+builtin +- OSULatencyTest @daint:gpu+pgi + ^OSUBuildTest @daint:gpu+pgi + ^OSUDownloadTest @daint:login+builtin +Concretized 22 test case(s) + +Log file(s) saved in '/tmp/rfm-l3eamaiy.log' diff --git a/docs/listings/osu_bench_list_concretized_gnu.txt b/docs/listings/osu_bench_list_concretized_gnu.txt new file mode 100644 index 0000000000..230add0428 --- /dev/null +++ b/docs/listings/osu_bench_list_concretized_gnu.txt @@ -0,0 +1,17 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+605af31a + command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -L -p builtin -p gnu' + launched by: user@host + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' + +[List of matched checks] +- OSULatencyTest [id: OSULatencyTest, file: '/home/user/Devel/reframe/tutorials/deps/osu_benchmarks.py'] + ^OSUBuildTest [id: OSUBuildTest, file: '/home/user/Devel/reframe/tutorials/deps/osu_benchmarks.py'] + ^OSUDownloadTest [id: OSUDownloadTest, file: '/home/user/Devel/reframe/tutorials/deps/osu_benchmarks.py'] +Found 3 check(s) + +Log file(s) saved in '/tmp/rfm-klltwsex.log' diff --git a/docs/listings/osu_latency_list.txt b/docs/listings/osu_latency_list.txt new file mode 100644 index 0000000000..c6e133c3ea --- /dev/null +++ b/docs/listings/osu_latency_list.txt @@ -0,0 +1,17 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+605af31a + command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -l' + launched by: user@host + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' + +[List of matched checks] +- OSULatencyTest + ^OSUBuildTest + ^OSUDownloadTest +Found 3 check(s) + +Log file(s) saved in '/tmp/rfm-zc483csf.log' diff --git a/docs/listings/osu_latency_unresolved_deps.txt b/docs/listings/osu_latency_unresolved_deps.txt new file mode 100644 index 0000000000..75718b9060 --- /dev/null +++ b/docs/listings/osu_latency_unresolved_deps.txt @@ -0,0 +1,40 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+605af31a + command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest --system=daint:gpu -l' + launched by: user@host + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' + +./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'gnu') -> 'OSUDownloadTest' +./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'intel') -> 'OSUDownloadTest' +./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'pgi') -> 'OSUDownloadTest' +./bin/reframe: skipping all dependent test cases + - ('OSUBuildTest', 'daint:gpu', 'pgi') + - ('OSUBuildTest', 'daint:gpu', 'intel') + - ('OSUAllreduceTest_8', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_16', 'daint:gpu', 'pgi') + - ('OSUBuildTest', 'daint:gpu', 'gnu') + - ('OSUAllreduceTest_4', 'daint:gpu', 'intel') + - ('OSUAllreduceTest_8', 'daint:gpu', 'intel') + - ('OSUAllreduceTest_4', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_16', 'daint:gpu', 'intel') + - ('OSULatencyTest', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_8', 'daint:gpu', 'gnu') + - ('OSUAllreduceTest_2', 'daint:gpu', 'pgi') + - ('OSUBandwidthTest', 'daint:gpu', 'pgi') + - ('OSUAllreduceTest_16', 'daint:gpu', 'gnu') + - ('OSUBandwidthTest', 'daint:gpu', 'intel') + - ('OSULatencyTest', 'daint:gpu', 'intel') + - ('OSUAllreduceTest_2', 'daint:gpu', 'intel') + - ('OSUAllreduceTest_4', 'daint:gpu', 'gnu') + - ('OSUAllreduceTest_2', 'daint:gpu', 'gnu') + - ('OSUBandwidthTest', 'daint:gpu', 'gnu') + - ('OSULatencyTest', 'daint:gpu', 'gnu') + +[List of matched checks] +Found 0 check(s) + +Log file(s) saved in '/tmp/rfm-k1w20m9z.log' diff --git a/docs/listings/param_deps_list.txt b/docs/listings/param_deps_list.txt new file mode 100644 index 0000000000..eb11b3968b --- /dev/null +++ b/docs/listings/param_deps_list.txt @@ -0,0 +1,25 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+605af31a + command: './bin/reframe -c tutorials/deps/parameterized.py -l' + launched by: user@host + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/deps/parameterized.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' + +[List of matched checks] +- TestB + ^TestA %z=9 + ^TestA %z=8 + ^TestA %z=7 + ^TestA %z=6 +- TestA %z=5 +- TestA %z=4 +- TestA %z=3 +- TestA %z=2 +- TestA %z=1 +- TestA %z=0 +Found 11 check(s) + +Log file(s) saved in '/tmp/rfm-iey58chw.log' diff --git a/docs/listings/perflogs.txt b/docs/listings/perflogs.txt new file mode 100644 index 0000000000..6906425949 --- /dev/null +++ b/docs/listings/perflogs.txt @@ -0,0 +1,8 @@ +2022-01-19T17:17:15|reframe 3.10.0-dev.2+bf404ae1|StreamWithRefTest @catalina:default+gnu|jobid=34545|Copy=24672.4|ref=25200 (l=-0.05, u=0.05)|MB/s +2022-01-19T17:17:15|reframe 3.10.0-dev.2+bf404ae1|StreamWithRefTest @catalina:default+gnu|jobid=34545|Scale=16834.0|ref=16800 (l=-0.05, u=0.05)|MB/s +2022-01-19T17:17:15|reframe 3.10.0-dev.2+bf404ae1|StreamWithRefTest @catalina:default+gnu|jobid=34545|Add=18376.3|ref=18500 (l=-0.05, u=0.05)|MB/s +2022-01-19T17:17:15|reframe 3.10.0-dev.2+bf404ae1|StreamWithRefTest @catalina:default+gnu|jobid=34545|Triad=19071.7|ref=18800 (l=-0.05, u=0.05)|MB/s +2022-01-19T17:18:52|reframe 3.10.0-dev.2+bf404ae1|StreamWithRefTest @catalina:default+gnu|jobid=34622|Copy=24584.3|ref=55200 (l=-0.05, u=0.05)|MB/s +2022-01-19T17:18:52|reframe 3.10.0-dev.2+bf404ae1|StreamWithRefTest @catalina:default+gnu|jobid=34622|Scale=16767.3|ref=16800 (l=-0.05, u=0.05)|MB/s +2022-01-19T17:18:52|reframe 3.10.0-dev.2+bf404ae1|StreamWithRefTest @catalina:default+gnu|jobid=34622|Add=18409.5|ref=18500 (l=-0.05, u=0.05)|MB/s +2022-01-19T17:18:52|reframe 3.10.0-dev.2+bf404ae1|StreamWithRefTest @catalina:default+gnu|jobid=34622|Triad=18959.5|ref=18800 (l=-0.05, u=0.05)|MB/s diff --git a/docs/listings/run-report.json b/docs/listings/run-report.json new file mode 100644 index 0000000000..a31169ecd1 --- /dev/null +++ b/docs/listings/run-report.json @@ -0,0 +1,65 @@ +{ + "session_info": { + "cmdline": "./bin/reframe -c tutorials/basics/hello/hello1.py -r", + "config_file": "", + "data_version": "2.0", + "hostname": "host", + "prefix_output": "/path/to/reframe/output", + "prefix_stage": "/path/to/reframe/stage", + "user": "user", + "version": "3.10.0-dev.3+c22440c1", + "workdir": "/path/to/reframe", + "time_start": "2022-01-22T13:21:50+0100", + "time_end": "2022-01-22T13:21:51+0100", + "time_elapsed": 0.8124568462371826, + "num_cases": 1, + "num_failures": 0 + }, + "runs": [ + { + "num_cases": 1, + "num_failures": 0, + "num_aborted": 0, + "num_skipped": 0, + "runid": 0, + "testcases": [ + { + "build_stderr": "rfm_HelloTest_build.err", + "build_stdout": "rfm_HelloTest_build.out", + "dependencies_actual": [], + "dependencies_conceptual": [], + "description": "HelloTest", + "display_name": "HelloTest", + "filename": "/path/to/reframe/tutorials/basics/hello/hello1.py", + "environment": "builtin", + "fail_phase": null, + "fail_reason": null, + "jobid": "43152", + "job_stderr": "rfm_HelloTest_job.err", + "job_stdout": "rfm_HelloTest_job.out", + "maintainers": [], + "name": "HelloTest", + "nodelist": [ + "tresa.local" + ], + "outputdir": "/path/to/reframe/output/generic/default/builtin/HelloTest", + "perfvars": null, + "prefix": "/path/to/reframe/tutorials/basics/hello", + "result": "success", + "stagedir": "/path/to/reframe/stage/generic/default/builtin/HelloTest", + "scheduler": "local", + "system": "generic:default", + "tags": [], + "time_compile": 0.27164483070373535, + "time_performance": 0.00010180473327636719, + "time_run": 0.3764667510986328, + "time_sanity": 0.0006909370422363281, + "time_setup": 0.007919073104858398, + "time_total": 0.8006880283355713, + "unique_name": "HelloTest" + } + ] + } + ], + "restored_cases": [] +} diff --git a/docs/listings/stream1.txt b/docs/listings/stream1.txt new file mode 100644 index 0000000000..85b7c47808 --- /dev/null +++ b/docs/listings/stream1.txt @@ -0,0 +1,37 @@ +[ReFrame Setup] + version: 3.10.0-dev.2+bf404ae1 + command: './bin/reframe -c tutorials/basics/stream/stream1.py -r --performance-report' + launched by: user@host + working directory: '/Users/user/Repositories/reframe' + settings file: 'tutorials/config/mysettings.py' + check search path: '/Users/user/Repositories/reframe/tutorials/basics/stream/stream1.py' + stage directory: '/Users/user/Repositories/reframe/stage' + output directory: '/Users/user/Repositories/reframe/output' + +[==========] Running 1 check(s) +[==========] Started on Wed Jan 19 17:13:35 2022 + +[----------] started processing StreamTest (StreamTest) +[ RUN ] StreamTest on catalina:default using gnu +[----------] finished processing StreamTest (StreamTest) + +[----------] waiting for spawned checks to finish +[ OK ] (1/1) StreamTest @catalina:default+gnu [compile: 1.260s run: 2.844s total: 4.136s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 1/1 test case(s) from 1 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Wed Jan 19 17:13:39 2022 +============================================================================== +PERFORMANCE REPORT +------------------------------------------------------------------------------ +StreamTest +- catalina:default + - gnu + * num_tasks: 1 + * Copy: 23864.2 MB/s + * Scale: 16472.6 MB/s + * Add: 18265.5 MB/s + * Triad: 18632.3 MB/s +------------------------------------------------------------------------------ +Run report saved in '/Users/user/.reframe/reports/run-report.json' +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-o1wls55_.log' diff --git a/docs/listings/stream3_failure_only.txt b/docs/listings/stream3_failure_only.txt new file mode 100644 index 0000000000..2611791793 --- /dev/null +++ b/docs/listings/stream3_failure_only.txt @@ -0,0 +1,14 @@ +FAILURE INFO for StreamWithRefTest + * Expanded name: StreamWithRefTest + * Description: StreamWithRefTest + * System partition: catalina:default + * Environment: gnu + * Stage directory: /Users/user/Repositories/reframe/stage/catalina/default/gnu/StreamWithRefTest + * Node list: vpn-39 + * Job type: local (id=34622) + * Dependencies (conceptual): [] + * Dependencies (actual): [] + * Maintainers: [] + * Failing phase: performance + * Rerun with '-n StreamWithRefTest -p gnu --system catalina:default -r' + * Reason: performance error: failed to meet reference: Copy=24584.3, expected 55200 (l=52440.0, u=57960.0) diff --git a/docs/listings/stream4_daint.txt b/docs/listings/stream4_daint.txt new file mode 100644 index 0000000000..1b800a70bb --- /dev/null +++ b/docs/listings/stream4_daint.txt @@ -0,0 +1,124 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+605af31a + command: './bin/reframe -c tutorials/basics/stream/stream4.py -r --performance-report' + launched by: user@host + working directory: '/home/user/Devel/reframe' + settings file: '/home/user/Devel/reframe/tutorials/config/settings.py' + check search path: '/home/user/Devel/reframe/tutorials/basics/stream/stream4.py' + stage directory: '/home/user/Devel/reframe/stage' + output directory: '/home/user/Devel/reframe/output' + +[==========] Running 1 check(s) +[==========] Started on Sat Jan 22 22:47:28 2022 + +[----------] start processing checks +[ RUN ] StreamMultiSysTest @daint:login+gnu +[ RUN ] StreamMultiSysTest @daint:login+intel +[ RUN ] StreamMultiSysTest @daint:login+pgi +[ RUN ] StreamMultiSysTest @daint:login+cray +[ RUN ] StreamMultiSysTest @daint:gpu+gnu +[ RUN ] StreamMultiSysTest @daint:gpu+intel +[ RUN ] StreamMultiSysTest @daint:gpu+pgi +[ RUN ] StreamMultiSysTest @daint:gpu+cray +[ RUN ] StreamMultiSysTest @daint:mc+gnu +[ RUN ] StreamMultiSysTest @daint:mc+intel +[ RUN ] StreamMultiSysTest @daint:mc+pgi +[ RUN ] StreamMultiSysTest @daint:mc+cray +[ OK ] ( 1/12) StreamMultiSysTest @daint:login+gnu [compile: 4.024s run: 21.615s total: 28.185s] +[ OK ] ( 2/12) StreamMultiSysTest @daint:login+intel [compile: 3.410s run: 20.976s total: 28.208s] +[ OK ] ( 3/12) StreamMultiSysTest @daint:login+pgi [compile: 2.734s run: 20.235s total: 28.226s] +[ OK ] ( 4/12) StreamMultiSysTest @daint:login+cray [compile: 2.104s run: 19.571s total: 28.242s] +[ OK ] ( 5/12) StreamMultiSysTest @daint:gpu+gnu [compile: 2.102s run: 30.129s total: 38.813s] +[ OK ] ( 6/12) StreamMultiSysTest @daint:gpu+pgi [compile: 8.695s run: 22.117s total: 38.826s] +[ OK ] ( 7/12) StreamMultiSysTest @daint:gpu+cray [compile: 8.083s run: 19.050s total: 38.852s] +[ OK ] ( 8/12) StreamMultiSysTest @daint:gpu+intel [compile: 9.369s run: 37.641s total: 50.212s] +[ OK ] ( 9/12) StreamMultiSysTest @daint:mc+gnu [compile: 7.970s run: 28.955s total: 52.297s] +[ OK ] (10/12) StreamMultiSysTest @daint:mc+cray [compile: 20.508s run: 30.812s total: 65.951s] +[ OK ] (11/12) StreamMultiSysTest @daint:mc+pgi [compile: 21.186s run: 34.898s total: 66.325s] +[ OK ] (12/12) StreamMultiSysTest @daint:mc+intel [compile: 21.890s run: 62.451s total: 90.626s] +[----------] all spawned checks have finished + +[ PASSED ] Ran 12/12 test case(s) from 1 check(s) (0 failure(s), 0 skipped) +[==========] Finished on Sat Jan 22 22:48:59 2022 +============================================================================== +PERFORMANCE REPORT +------------------------------------------------------------------------------ +StreamMultiSysTest +- daint:login + - gnu + * num_tasks: 1 + * Copy: 108525.7 MB/s + * Scale: 76882.1 MB/s + * Add: 81155.7 MB/s + * Triad: 82433.2 MB/s + - intel + * num_tasks: 1 + * Copy: 82341.7 MB/s + * Scale: 81330.6 MB/s + * Add: 72076.0 MB/s + * Triad: 101808.5 MB/s + - pgi + * num_tasks: 1 + * Copy: 94336.0 MB/s + * Scale: 69096.9 MB/s + * Add: 73484.2 MB/s + * Triad: 73243.6 MB/s + - cray + * num_tasks: 1 + * Copy: 114374.2 MB/s + * Scale: 76205.6 MB/s + * Add: 82184.5 MB/s + * Triad: 76086.3 MB/s +- daint:gpu + - gnu + * num_tasks: 1 + * Copy: 42963.4 MB/s + * Scale: 38504.8 MB/s + * Add: 43650.2 MB/s + * Triad: 43876.5 MB/s + - intel + * num_tasks: 1 + * Copy: 52505.4 MB/s + * Scale: 54131.1 MB/s + * Add: 58918.8 MB/s + * Triad: 59048.6 MB/s + - pgi + * num_tasks: 1 + * Copy: 50472.9 MB/s + * Scale: 39545.5 MB/s + * Add: 43881.6 MB/s + * Triad: 43972.4 MB/s + - cray + * num_tasks: 1 + * Copy: 50610.2 MB/s + * Scale: 38990.9 MB/s + * Add: 43158.9 MB/s + * Triad: 43792.9 MB/s +- daint:mc + - gnu + * num_tasks: 1 + * Copy: 48650.7 MB/s + * Scale: 38618.4 MB/s + * Add: 43504.1 MB/s + * Triad: 44044.1 MB/s + - intel + * num_tasks: 1 + * Copy: 52500.5 MB/s + * Scale: 48545.9 MB/s + * Add: 57150.3 MB/s + * Triad: 57272.4 MB/s + - pgi + * num_tasks: 1 + * Copy: 46123.6 MB/s + * Scale: 40552.5 MB/s + * Add: 44147.7 MB/s + * Triad: 44521.9 MB/s + - cray + * num_tasks: 1 + * Copy: 47094.0 MB/s + * Scale: 40080.4 MB/s + * Add: 43659.8 MB/s + * Triad: 44078.0 MB/s +------------------------------------------------------------------------------ +Run report saved in '/home/user/.reframe/reports/run-report.json' +Log file(s) saved in '/tmp/rfm-sua0bogo.log' diff --git a/docs/listings/stream_params.txt b/docs/listings/stream_params.txt new file mode 100644 index 0000000000..d5321b6f4e --- /dev/null +++ b/docs/listings/stream_params.txt @@ -0,0 +1,25 @@ +[ReFrame Setup] + version: 3.10.0-dev.3+4fc5b12c + command: './bin/reframe -c tutorials/advanced/parameterized/stream.py -l' + launched by: user@host + working directory: '/home/user/Repositories/reframe' + settings file: '/home/user/Repositories/reframe/tutorials/config/settings.py' + check search path: '/home/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py' + stage directory: '/home/user/Repositories/reframe/stage' + output directory: '/home/user/Repositories/reframe/output' + +[List of matched checks] +- StreamMultiSysTest %num_bytes=536870912 +- StreamMultiSysTest %num_bytes=268435456 +- StreamMultiSysTest %num_bytes=134217728 +- StreamMultiSysTest %num_bytes=67108864 +- StreamMultiSysTest %num_bytes=33554432 +- StreamMultiSysTest %num_bytes=16777216 +- StreamMultiSysTest %num_bytes=8388608 +- StreamMultiSysTest %num_bytes=4194304 +- StreamMultiSysTest %num_bytes=2097152 +- StreamMultiSysTest %num_bytes=1048576 +- StreamMultiSysTest %num_bytes=524288 +Found 11 check(s) + +Log file(s) saved in '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-ka9llk6d.log' diff --git a/docs/manpage.rst b/docs/manpage.rst index baec1f350e..114587641e 100644 --- a/docs/manpage.rst +++ b/docs/manpage.rst @@ -114,11 +114,25 @@ This happens recursively so that if test ``T1`` depends on ``T2`` and ``T2`` dep Filter tests by name. ``NAME`` is interpreted as a `Python Regular Expression `__; - any test whose name matches ``NAME`` will be selected. + any test whose *display name* matches ``NAME`` will be selected. + The display name of a test encodes also any parameterization information. + See :ref:`test_naming_scheme` for more details on how the tests are automatically named by the framework. + + Before matching, any whitespace will be removed from the display name of the test. This option may be specified multiple times, in which case tests with *any* of the specified names will be selected: ``-n NAME1 -n NAME2`` is therefore equivalent to ``-n 'NAME1|NAME2'``. + If the special notation ``@`` is passed as the ``NAME`` argument, then an exact match will be performed selecting the variant ``variant_num`` of the test ``test_name``. + + .. note:: + + Fixtures cannot be selected. + + .. versionchanged:: 3.10.0 + + The option's behaviour was adapted and extended in order to work with the updated test naming scheme. + .. option:: -p, --prgenv=NAME Filter tests by programming environment. @@ -189,15 +203,42 @@ An action must always be specified. .. versionadded:: 3.4.1 -.. option:: -L, --list-detailed +.. option:: --describe + + Print a detailed description of the `selected tests <#test-filtering>`__ in JSON format and exit. + + .. note:: + The generated test description corresponds to its state after it has been initialized. + If any of its attributes are changed or set during its execution, their updated values will not be shown by this listing. + + .. versionadded:: 3.10.0 + + +.. option:: -L, --list-detailed[=T|C] + + List selected tests providing more details for each test. - List selected tests providing detailed information per test. + The unique id of each test (see also :attr:`~reframe.core.pipeline.RegressionTest.unique_name`) as well as the file where each test is defined are printed. -.. option:: -l, --list + This option accepts optionally a single argument denoting what type of listing is requested. + Please refer to :option:`-l` for an explanation of this argument. - List selected tests. + .. versionadded:: 3.10.0 + Support for different types of listing is added. - A single line per test is printed. +.. option:: -l, --list[=T|C] + + List selected tests and their dependencies. + + This option accepts optionally a single argument denoting what type of listing is requested. + There are two types of possible listings: + + - *Regular test listing* (``T``, the default): This type of listing lists the tests and their dependencies or fixtures using their :attr:`~reframe.core.pipeline.RegressionTest.display_name`. A test that is listed as a dependency of another test will not be listed separately. + - *Concretized test case listing* (``C``): This type of listing lists the exact test cases and their dependencies as they have been concretized for the current system and environment combinations. + This listing shows practically the exact test DAG that will be executed. + + .. versionadded:: 3.10.0 + Support for different types of listing is added. .. option:: --list-tags @@ -211,7 +252,11 @@ An action must always be specified. Execute the selected tests. -If more than one action options are specified, :option:`-l` precedes :option:`-L`, which in turn precedes :option:`-r`. +If more than one action options are specified, the precedence order is the following: + + .. code-block:: console + + --describe > --list-detailed > --list > --list-tags > --ci-generate ---------------------------------- @@ -766,6 +811,121 @@ Miscellaneous options This option can also be set using the :envvar:`RFM_VERBOSE` environment variable or the :js:attr:`verbose` general configuration parameter. +.. _test_naming_scheme: + +Test Naming Scheme +------------------ + +.. versionadded:: 3.10.0 + +This section describes the new test naming scheme which will replace the current one in ReFrame 4.0. +It can be enabled by setting the :envvar:`RFM_COMPACT_TEST_NAMES` environment variable. + +Each ReFrame test is assigned a unique name, which will be used internally by the framework to reference the test. +Any test-specific path component will use that name, too. +It is formed as follows for the various types of tests: + +- *Regular tests*: The unique name is simply the test class name. + This implies that you cannot load two tests with the same class name within the same run session even if these tests reside in separate directories. +- *Parameterized tests*: The unique name is formed by the test class name followed by an ``_`` and the variant number of the test. + Each point in the parameter space of the test is assigned a unique variant number. +- *Fixtures*: The unique name is formed by the test class name followed by an ``_`` and a hash. + The hash is constructed by combining the information of the fixture variant (if the fixture is parameterized), the fixture's scope and any fixture variables that were explicitly set. + +Since unique names can be cryptic, they are not listed by the :option:`-l` option, but are listed when a detailed listing is requested by using the :option:`-L` option. + +A human readable version of the test name, which is called the *display name*, is also constructed for each test. +This name encodes all the parameterization information as well as the fixture-specific information (scopes, variables). +The format of the display name is the following in BNF notation: + +.. code-block:: bnf + + ::= ()* ()? + ::= "%" "=" + ::= ( ".")* + ::= "~" + ::= ("+" )* + + ::= (* as in Python *) + ::= (* string *) + ::= (* string *) + ::= (* string *) + ::= (* string *) + ::= (* string *) + +The following is an example of a fictitious complex test that is itself parameterized and depends on parameterized fixtures as well. + +.. code-block:: python + + import reframe as rfm + + + class MyFixture(rfm.RunOnlyRegressionTest): + p = parameter([1, 2]) + + + class X(rfm.RunOnlyRegressionTest): + foo = variable(int, value=1) + + + @rfm.simple_test + class TestA(rfm.RunOnlyRegressionTest): + f = fixture(MyFixture, scope='test', action='join') + x = parameter([3, 4]) + t = fixture(MyFixture, scope='test') + l = fixture(X, scope='environment', variables={'foo': 10}) + valid_systems = ['*'] + valid_prog_environs = ['*'] + + +Here is how this test is listed where the various components of the display name can be seen: + +.. code-block:: console + + - TestA %x=4 %l.foo=10 %t.p=2 + ^MyFixture %p=1 ~TestA_4_1 + ^MyFixture %p=2 ~TestA_4_1 + ^X %foo=10 ~generic:default+builtin + - TestA %x=3 %l.foo=10 %t.p=2 + ^MyFixture %p=1 ~TestA_3_1 + ^MyFixture %p=2 ~TestA_3_1 + ^X %foo=10 ~generic:default+builtin + - TestA %x=4 %l.foo=10 %t.p=1 + ^MyFixture %p=2 ~TestA_4_0 + ^MyFixture %p=1 ~TestA_4_0 + ^X %foo=10 ~generic:default+builtin + - TestA %x=3 %l.foo=10 %t.p=1 + ^MyFixture %p=2 ~TestA_3_0 + ^MyFixture %p=1 ~TestA_3_0 + ^X %foo=10 ~generic:default+builtin + Found 4 check(s) + +Display names may not always be unique. +In the following example: + +.. code-block:: python + + class MyTest(RegressionTest): + p = parameter([1, 1, 1]) + +This generates three different tests with different unique names, but their display name is the same for all: ``MyTest %p=1``. +Notice that this example leads to a name conflict with the old naming scheme, since all tests would be named ``MyTest_1``. + + +-------------------------------------- +Differences from the old naming scheme +-------------------------------------- + +Prior to version 3.10, ReFrame used to encode the parameter values of an instance of parameterized test in its name. +It did so by taking the string representation of the value and replacing any non-alphanumeric character with an underscore. +This could lead to very large and hard to read names when a test defined multiple parameters or the parameter type was more complex. +Very large test names meant also very large path names which could also lead to problems and random failures. +Fixtures followed a similar naming pattern making them hard to debug. + +The old naming scheme is still the default for parameterized tests (but not for fixtures) and will remain so until ReFrame 4.0, in order to ensure backward compatibility. +However, users are advised to enable the new naming scheme by setting the :envvar:`RFM_COMPACT_TEST_NAMES` environment variable. + + Environment ----------- @@ -837,7 +997,7 @@ Here is an alphabetical list of the environment variables recognized by ReFrame: .. envvar:: RFM_COMPACT_TEST_NAMES - Enable the compact test naming scheme. + Enable the new test naming scheme. .. table:: :align: left diff --git a/docs/regression_test_api.rst b/docs/regression_test_api.rst index d0f686e771..4e62e14c3a 100644 --- a/docs/regression_test_api.rst +++ b/docs/regression_test_api.rst @@ -51,7 +51,7 @@ In essence, these builtins exert control over the test creation, and they allow p1 = [parameter([1, 2])] # Undefined behavior -.. py:function:: RegressionMixin.parameter(values=None, inherit_params=False, filter_params=None) +.. py:function:: RegressionMixin.parameter(values=None, inherit_params=False, filter_params=None, fmt=None) Inserts or modifies a regression test parameter. At the class level, these parameters are stored in a separate namespace referred to as the *parameter space*. @@ -125,6 +125,12 @@ In essence, these builtins exert control over the test creation, and they allow This function must accept a single iterable argument and return an iterable. It will be called with the inherited parameter values and it must return the filtered set of parameter values. This function will only have an effect if used with ``inherit_params=True``. + :param fmt: A formatting function that will be used to format the values of this parameter in the test's :attr:`~reframe.core.pipeline.RegressionTest.display_name`. + This function should take as argument the parameter value and return a string representation of the value. + If the returned value is not a string, it will be converted using the :py:func:`str` function. + + .. versionadded:: 3.10.0 + The ``fmt`` argument is added. .. py:function:: RegressionMixin.variable(*types, value=None, field=None, **kwargs) @@ -657,86 +663,12 @@ Therefore, classes that derive from the base :class:`~reframe.core.pipeline.Regr .. py:attribute:: RegressionMixin.num_variants - Total number of unique test variants in a class. - - -.. py:function:: RegressionMixin.get_variant_info(cls, variant_num, *, recurse=False, max_depth=None) - - Get the raw variant data for a given variant index. - This function returns a dictionary with the variant data such as the parameter values and the fixture variants. - The parameter space information is presented in a sub-dictionary under the ``'params'`` key, gathering all the parameter values associated with the given variant number. - Similarly, the information on the test's fixtures is gathered in another sub-dictionary under the ``'fixtures'`` key. - By default, this sub-dictionary shows a tuple for each fixture, containing the respective fixture variants associated with the given ``variant_num``. - These tuples may only contain more than one fixture variant index if the fixture was declared with a `join` action (see the :func:`~RegressionMixin.fixture` documentation for more information). - However, when ``recurse`` is set to ``True``, each fixture entry with a single-element tuple will be expanded to show the full fixture variant information. - By default, the recursion will traverse the full fixture tree, but this recursion depth can be limited with the ``max_depth`` argument. - See the example below. - - .. code:: python - - class Foo(rfm.RegressionTest): - p0 = parameter(range(2)) - ... - - class Bar(rfm.RegressionTest): - p0 = parameter(range(3)) - ... - - class MyTest(rfm.RegressionTest): - p1 = parameter(['a', 'b']) - f0 = fixture(Foo, action='fork') - f1 = fixture(Bar, action='join') - ... - - # Get the raw info for variant 0 - without recursion - MyTest.get_variant_info(0, recursive=False) - # { - # 'params': {'p1': 'a'}, - # 'fixtures': { - # 'f0': (0,), - # 'f1': (0, 1, 2,) - # } - # } - - # Get the raw info for variant 0 - show the full tree - MyTest.get_variant_info(0, recursive=True) - # { - # 'params': {'p1': 'a'}, - # 'fixtures': { - # 'f0': { - # 'params': {'p0': 0}, - # 'fixtures': {} - # }, - # 'f1': (0, 1, 2,) - # } - # } - - :param variant_num: An integer in the range of [0, cls.num_variants). - :param recurse: Flag to control the recursion through the fixture space. - :param max_depth: Set the recursion limit. When the ``recurse`` argument is set to ``False``, this option has no effect. - - -.. py:function:: RegressionMixin.get_variant_nums(cls, **conditions) - - Get the variant numbers that meet the specified conditions. - The given conditions enable filtering the parameter space of the test. - These can be specified by passing key-value pairs with the parameter name to filter and an associated callable that returns ``True`` when the filtering condition is met. Multiple conditions are supported. - However, filtering the fixture space is not allowed. - - .. code-block:: python - - # Get the variant numbers where my_param is lower than 4 - cls.get_variant_nums(my_param=lambda x: x < 4) - - :param conditions: keyword arguments where the key is the test parameter name and the value is a unary function that evaluates a bool condition on the parameter value. - + Total number of variants of the test. -.. py:function:: RegressionMixin.fullname(cls, variant_num=None) +.. automethod:: reframe.core.pipeline.RegressionMixin.get_variant_nums - Return the full unique name of a test for a given test variant number. - If no ``variant_num`` is provided, this function returns the qualified class name. +.. automethod:: reframe.core.pipeline.RegressionMixin.variant_name - :param variant_num: An integer in the range of [0, cls.num_variants). ------------------------ diff --git a/docs/tutorial_advanced.rst b/docs/tutorial_advanced.rst index ef6b9f7108..0316c813ec 100644 --- a/docs/tutorial_advanced.rst +++ b/docs/tutorial_advanced.rst @@ -9,7 +9,7 @@ Finally, to avoid specifying the tutorial configuration file each time, make sur .. code:: bash - export RFM_CONFIG_FILE=$(pwd)/tutorials/config/mysettings.py + export RFM_CONFIG_FILE=$(pwd)/tutorials/config/settings.py @@ -44,36 +44,13 @@ Let's try listing the generated tests: ./bin/reframe -c tutorials/advanced/parameterized/stream.py -l -.. code-block:: none +.. literalinclude:: listings/stream_params.txt + :language: console - [ReFrame Setup] - version: 3.6.0-dev.0+2f8e5b3b - command: './bin/reframe -c tutorials/advanced/parameterized/stream.py -l' - launched by: user@tresa.local - working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - [List of matched checks] - - StreamMultiSysTest_2097152 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_67108864 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_1048576 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_536870912 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_4194304 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_33554432 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_8388608 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_268435456 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_16777216 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_524288 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - - StreamMultiSysTest_134217728 (found in '/Users/user/Repositories/reframe/tutorials/advanced/parameterized/stream.py') - Found 11 check(s) - - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-s_ty1l50.log' - - -ReFrame generates 11 tests from the single parameterized test that we have written and names them by appending a string representation of the parameter value. +ReFrame generates 11 tests from the single parameterized test. +When listing parameterized tests, ReFrame adds the list of parameters after the base test name using the notation ``%=``. +Each generated test gets also a unique name. +For more details on how the test names are generated for various types of tests, please refer to :ref:`test_naming_scheme`. Test parameterization in ReFrame is very powerful since you can parameterize your tests on anything and you can create complex parameterization spaces. A common pattern is to parameterize a test on the environment module that loads a software in order to test different versions of it. @@ -316,27 +293,8 @@ Notice how the parameters are expanded in each of the individual tests: ./bin/reframe -c tutorials/advanced/makefiles/maketest_mixin.py -l - -.. code-block:: none - - [ReFrame Setup] - version: 3.6.0-dev.0+2f8e5b3b - command: './bin/reframe -c tutorials/advanced/makefiles/maketest_mixin.py -l' - launched by: user@tresa.local - working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/advanced/makefiles/maketest_mixin.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - [List of matched checks] - - MakeOnlyTestAlt_double (found in '/Users/user/Repositories/reframe/tutorials/advanced/makefiles/maketest_mixin.py') - - MakeOnlyTestAlt_float (found in '/Users/user/Repositories/reframe/tutorials/advanced/makefiles/maketest_mixin.py') - - MakefileTestAlt_double (found in '/Users/user/Repositories/reframe/tutorials/advanced/makefiles/maketest_mixin.py') - - MakefileTestAlt_float (found in '/Users/user/Repositories/reframe/tutorials/advanced/makefiles/maketest_mixin.py') - Found 4 check(s) - - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-e384bvkd.log' +.. literalinclude:: listings/maketest_mixin.txt + :language: console diff --git a/docs/tutorial_basics.rst b/docs/tutorial_basics.rst index 08f8f536f4..a606124123 100644 --- a/docs/tutorial_basics.rst +++ b/docs/tutorial_basics.rst @@ -98,29 +98,8 @@ Now it's time to run our first test: ./bin/reframe -c tutorials/basics/hello/hello1.py -r -.. code-block:: none - - [ReFrame Setup] - version: 3.3-dev0 (rev: 5d246bff) - command: './bin/reframe -c tutorials/basics/hello/hello1.py -r' - launched by: user@tresa.local - working directory: '/Users/user/Repositories/reframe' - settings file: '' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/hello/hello1.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - [==========] Running 1 check(s) - [==========] Started on Mon Oct 12 18:23:30 2020 - - [----------] start processing checks - [ RUN ] HelloTest on generic:default using builtin - [ OK ] (1/1) HelloTest on generic:default using builtin [compile: 0.389s run: 0.406s total: 0.811s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 1 test case(s) from 1 check(s) (0 failure(s)) - [==========] Finished on Mon Oct 12 18:23:31 2020 - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-00lf_tbi.log' +.. literalinclude:: listings/hello1.txt + :language: console Perfect! We have verified that we have a functioning C compiler in our system. @@ -152,64 +131,7 @@ Here are the contents of the report file for our first ReFrame run: cat ~/.reframe/reports/run-report.json -.. code-block:: javascript - - { - "session_info": { - "cmdline": "./bin/reframe -c tutorials/basics/hello/hello1.py -r", - "config_file": "", - "data_version": "1.0", - "hostname": "dhcp-133-44.cscs.ch", - "prefix_output": "/Users/user/Repositories/reframe/output", - "prefix_stage": "/Users/user/Repositories/reframe/stage", - "user": "user", - "version": "3.1-dev2 (rev: 272e1aae)", - "workdir": "/Users/user/Repositories/reframe", - "time_start": "2020-07-24T11:05:46+0200", - "time_end": "2020-07-24T11:05:47+0200", - "time_elapsed": 0.7293069362640381, - "num_cases": 1, - "num_failures": 0 - }, - "runs": [ - { - "num_cases": 1, - "num_failures": 0, - "runid": 0, - "testcases": [ - { - "build_stderr": "rfm_HelloTest_build.err", - "build_stdout": "rfm_HelloTest_build.out", - "description": "HelloTest", - "environment": "builtin", - "fail_reason": null, - "fail_phase": null, - "jobid": 85063, - "job_stderr": "rfm_HelloTest_job.err", - "job_stdout": "rfm_HelloTest_job.out", - "name": "HelloTest", - "maintainers": [], - "nodelist": [ - "dhcp-133-44.cscs.ch" - ], - "outputdir": "/Users/user/Repositories/reframe/output/generic/default/builtin/HelloTest", - "perfvars": null, - "result": "success", - "stagedir": null, - "scheduler": "local", - "system": "generic:default", - "tags": [], - "time_compile": 0.3776402473449707, - "time_performance": 4.506111145019531e-05, - "time_run": 0.2992382049560547, - "time_sanity": 0.0005609989166259766, - "time_setup": 0.0031709671020507812, - "time_total": 0.7213571071624756 - } - ] - } - ] - } +.. literalinclude:: listings/run-report.json More of "Hello, World!" @@ -265,51 +187,8 @@ Let's run the test now: ./bin/reframe -c tutorials/basics/hello/hello2.py -r -.. code-block:: none - - [ReFrame Setup] - version: 3.6.0-dev.0+a3d0b0cd - command: './bin/reframe -c tutorials/basics/hello/hello2.py -r' - launched by: user@tresa.local - working directory: '/Users/user/Repositories/reframe' - settings file: '' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - [==========] Running 2 check(s) - [==========] Started on Tue Mar 9 23:25:22 2021 - - [----------] start processing checks - [ RUN ] HelloMultiLangTest_c on generic:default using builtin - [ RUN ] HelloMultiLangTest_cpp on generic:default using builtin - [ FAIL ] (1/2) HelloMultiLangTest_cpp on generic:default using builtin [compile: 0.006s run: n/a total: 0.023s] - ==> test failed during 'compile': test staged in '/Users/user/Repositories/reframe/stage/generic/default/builtin/HelloMultiLangTest_cpp' - [ OK ] (2/2) HelloMultiLangTest_c on generic:default using builtin [compile: 0.981s run: 0.468s total: 1.475s] - [----------] all spawned checks have finished - - [ FAILED ] Ran 2/2 test case(s) from 2 check(s) (1 failure(s)) - [==========] Finished on Tue Mar 9 23:25:23 2021 - - ============================================================================== - SUMMARY OF FAILURES - ------------------------------------------------------------------------------ - FAILURE INFO for HelloMultiLangTest_cpp - * Test Description: HelloMultiLangTest_cpp - * System partition: generic:default - * Environment: builtin - * Stage directory: /Users/user/Repositories/reframe/stage/generic/default/builtin/HelloMultiLangTest_cpp - * Node list: None - * Job type: local (id=None) - * Dependencies (conceptual): [] - * Dependencies (actual): [] - * Maintainers: [] - * Failing phase: compile - * Rerun with '-n HelloMultiLangTest_cpp -p builtin --system generic:default -r' - * Reason: build system error: I do not know how to compile a C++ program - ------------------------------------------------------------------------------ - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-wemvsvs2.log' - +.. literalinclude:: listings/hello2.txt + :language: console Oops! The C++ test has failed. ReFrame complains that it does not know how to compile a C++ program. @@ -339,6 +218,10 @@ Note that you should *not* edit this configuration file in place. cp reframe/core/settings.py tutorials/config/mysettings.py +.. note:: + You may also use edit directly the supplied ``tutorials/config/settings.py`` file, which is the actual configuration file against which the various tutorials have been evaluated. + + Here is how the new configuration file looks like with the needed additions highlighted: .. literalinclude:: ../tutorials/config/settings.py @@ -370,39 +253,11 @@ Let's now rerun our "Hello, World!" tests: .. code-block:: console - ./bin/reframe -C tutorials/config/mysettings.py -c tutorials/basics/hello/hello2.py -r - + ./bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -r -.. code-block:: none - - [ReFrame Setup] - version: 3.6.0-dev.0+a3d0b0cd - command: './bin/reframe -C tutorials/config/mysettings.py -c tutorials/basics/hello/hello2.py -r' - launched by: user@tresa.local - working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - [==========] Running 2 check(s) - [==========] Started on Tue Mar 9 23:28:00 2021 - - [----------] start processing checks - [ RUN ] HelloMultiLangTest_c on catalina:default using gnu - [ RUN ] HelloMultiLangTest_c on catalina:default using clang - [ RUN ] HelloMultiLangTest_cpp on catalina:default using gnu - [ RUN ] HelloMultiLangTest_cpp on catalina:default using clang - [ OK ] (1/4) HelloMultiLangTest_cpp on catalina:default using gnu [compile: 0.768s run: 1.115s total: 1.909s] - [ OK ] (2/4) HelloMultiLangTest_c on catalina:default using gnu [compile: 0.600s run: 2.230s total: 2.857s] - [ OK ] (3/4) HelloMultiLangTest_c on catalina:default using clang [compile: 0.238s run: 2.129s total: 2.393s] - [ OK ] (4/4) HelloMultiLangTest_cpp on catalina:default using clang [compile: 1.006s run: 0.427s total: 1.456s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 4/4 test case(s) from 2 check(s) (0 failure(s)) - [==========] Finished on Tue Mar 9 23:28:03 2021 - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-dnubkvfi.log' +.. literalinclude:: listings/hello2_catalina.txt + :language: console Notice how the same tests are now tried with both the ``gnu`` and ``clang`` programming environments, without having to touch them at all! That's one of the powerful features of ReFrame and we shall see later on, how easily we can port our tests to an HPC cluster with minimal changes. @@ -411,7 +266,7 @@ Since we don't want to type it throughout the tutorial, we will now set it in th .. code-block:: console - export RFM_CONFIG_FILE=$(pwd)/tutorials/config/mysettings.py + export RFM_CONFIG_FILE=$(pwd)/tutorials/config/settings.py A Multithreaded "Hello, World!" @@ -469,31 +324,8 @@ Let's run the test now: ./bin/reframe -c tutorials/basics/hellomp/hellomp1.py -r -.. code-block:: none - - [ReFrame Setup] - version: 3.3-dev0 (rev: 5d246bff) - command: './bin/reframe -c tutorials/basics/hellomp/hellomp1.py -r' - launched by: user@tresa.local - working directory: '/Users/user/Repositories/reframe' - settings file: '/Users/user/Repositories/reframe/tutorials/config/settings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/hellomp/hellomp1.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - [==========] Running 1 check(s) - [==========] Started on Mon Oct 12 20:02:37 2020 - - [----------] start processing checks - [ RUN ] HelloThreadedTest on catalina:default using gnu - [ RUN ] HelloThreadedTest on catalina:default using clang - [ OK ] (1/2) HelloThreadedTest on catalina:default using gnu [compile: 1.591s run: 1.205s total: 2.816s] - [ OK ] (2/2) HelloThreadedTest on catalina:default using clang [compile: 1.141s run: 0.309s total: 1.465s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 2 test case(s) from 1 check(s) (0 failure(s)) - [==========] Finished on Mon Oct 12 20:02:40 2020 - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-h_itoc1k.log' +.. literalinclude:: listings/hellomp1.txt + :language: console Everything looks fine, but let's inspect the actual output of one of the tests: @@ -559,60 +391,8 @@ Let's run this version of the test now and see if it fails: ./bin/reframe -c tutorials/basics/hellomp/hellomp2.py -r -.. code-block:: none - - [ReFrame Setup] - version: 3.3-dev0 (rev: 5d246bff) - command: './bin/reframe -c tutorials/basics/hellomp/hellomp2.py -r' - launched by: user@tresa.local - working directory: '/Users/user/Repositories/reframe' - settings file: '/Users/user/Repositories/reframe/tutorials/config/settings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/hellomp/hellomp2.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - [==========] Running 1 check(s) - [==========] Started on Mon Oct 12 20:04:59 2020 - - [----------] start processing checks - [ RUN ] HelloThreadedExtendedTest on catalina:default using gnu - [ RUN ] HelloThreadedExtendedTest on catalina:default using clang - [ FAIL ] (1/2) HelloThreadedExtendedTest on catalina:default using gnu [compile: 1.222s run: 0.891s total: 2.130s] - [ FAIL ] (2/2) HelloThreadedExtendedTest on catalina:default using clang [compile: 0.835s run: 0.167s total: 1.018s] - [----------] all spawned checks have finished - - [ FAILED ] Ran 2 test case(s) from 1 check(s) (2 failure(s)) - [==========] Finished on Mon Oct 12 20:05:02 2020 - - ============================================================================== - SUMMARY OF FAILURES - ------------------------------------------------------------------------------ - FAILURE INFO for HelloThreadedExtendedTest - * Test Description: HelloThreadedExtendedTest - * System partition: catalina:default - * Environment: gnu - * Stage directory: /Users/user/Repositories/reframe/stage/catalina/default/gnu/HelloThreadedExtendedTest - * Node list: tresa.local - * Job type: local (id=60355) - * Maintainers: [] - * Failing phase: sanity - * Rerun with '-n HelloThreadedExtendedTest -p gnu --system catalina:default' - * Reason: sanity error: 12 != 16 - ------------------------------------------------------------------------------ - FAILURE INFO for HelloThreadedExtendedTest - * Test Description: HelloThreadedExtendedTest - * System partition: catalina:default - * Environment: clang - * Stage directory: /Users/user/Repositories/reframe/stage/catalina/default/clang/HelloThreadedExtendedTest - * Node list: tresa.local - * Job type: local (id=60366) - * Maintainers: [] - * Failing phase: sanity - * Rerun with '-n HelloThreadedExtendedTest -p clang --system catalina:default' - * Reason: sanity error: 6 != 16 - ------------------------------------------------------------------------------ - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-zz7x_5c8.log' - +.. literalinclude:: listings/hellomp2.txt + :language: console As expected, only some of lines are printed correctly which makes the test fail. To fix this test, we need to compile with ``-DSYNC_MESSAGES``, which will synchronize the printing of messages. @@ -682,41 +462,8 @@ Let's run the test now: The :option:`--performance-report` will generate a short report at the end for each performance test that has run. -.. code-block:: none - - [ReFrame Setup] - version: 3.3-dev0 (rev: 5d246bff) - command: './bin/reframe -c tutorials/basics/stream/stream1.py -r --performance-report' - launched by: user@tresa.local - working directory: '/Users/user/Repositories/reframe' - settings file: '/Users/user/Repositories/reframe/tutorials/config/settings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/stream/stream1.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - [==========] Running 1 check(s) - [==========] Started on Mon Oct 12 20:06:09 2020 - - [----------] start processing checks - [ RUN ] StreamTest on catalina:default using gnu - [ OK ] (1/1) StreamTest on catalina:default using gnu [compile: 1.386s run: 2.377s total: 3.780s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 1 test case(s) from 1 check(s) (0 failure(s)) - [==========] Finished on Mon Oct 12 20:06:13 2020 - ============================================================================== - PERFORMANCE REPORT - ------------------------------------------------------------------------------ - StreamTest - - catalina:default - - gnu - * num_tasks: 1 - * Copy: 24326.7 MB/s - * Scale: 16664.2 MB/s - * Add: 18398.7 MB/s - * Triad: 18930.6 MB/s - ------------------------------------------------------------------------------ - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-gczplnic.log' +.. literalinclude:: listings/stream1.txt + :language: console --------------------------------------------------- @@ -794,20 +541,8 @@ If any obtained performance value is beyond its respective thresholds, the test ./bin/reframe -c tutorials/basics/stream/stream3.py -r --performance-report - -.. code-block:: none - - FAILURE INFO for StreamWithRefTest - * Test Description: StreamWithRefTest - * System partition: catalina:default - * Environment: gnu - * Stage directory: /Users/user/Repositories/reframe/stage/catalina/default/gnu/StreamWithRefTest - * Node list: tresa.local - * Job type: local (id=62114) - * Maintainers: [] - * Failing phase: performance - * Rerun with '-n StreamWithRefTest -p gnu --system catalina:default' - * Reason: performance error: failed to meet reference: Copy=24586.5, expected 55200 (l=52440.0, u=57960.0) +.. literalinclude:: listings/stream3_failure_only.txt + :language: console ------------------------------ Examining the performance logs @@ -833,17 +568,8 @@ Let's inspect the log file from our last test: tail perflogs/catalina/default/StreamWithRefTest.log - -.. code-block:: none - - 2020-06-24T00:27:06|reframe 3.1-dev0 (rev: 9d92d0ec)|StreamWithRefTest on catalina:default using gnu|jobid=58384|Copy=24762.2|ref=25200 (l=-0.05, u=0.05)|MB/s - 2020-06-24T00:27:06|reframe 3.1-dev0 (rev: 9d92d0ec)|StreamWithRefTest on catalina:default using gnu|jobid=58384|Scale=16784.6|ref=16800 (l=-0.05, u=0.05)|MB/s - 2020-06-24T00:27:06|reframe 3.1-dev0 (rev: 9d92d0ec)|StreamWithRefTest on catalina:default using gnu|jobid=58384|Add=18553.8|ref=18500 (l=-0.05, u=0.05)|MB/s - 2020-06-24T00:27:06|reframe 3.1-dev0 (rev: 9d92d0ec)|StreamWithRefTest on catalina:default using gnu|jobid=58384|Triad=18679.0|ref=18800 (l=-0.05, u=0.05)|MB/s - 2020-06-24T12:42:07|reframe 3.1-dev0 (rev: 138cbd68)|StreamWithRefTest on catalina:default using gnu|jobid=62114|Copy=24586.5|ref=55200 (l=-0.05, u=0.05)|MB/s - 2020-06-24T12:42:07|reframe 3.1-dev0 (rev: 138cbd68)|StreamWithRefTest on catalina:default using gnu|jobid=62114|Scale=16880.6|ref=16800 (l=-0.05, u=0.05)|MB/s - 2020-06-24T12:42:07|reframe 3.1-dev0 (rev: 138cbd68)|StreamWithRefTest on catalina:default using gnu|jobid=62114|Add=18570.4|ref=18500 (l=-0.05, u=0.05)|MB/s - 2020-06-24T12:42:07|reframe 3.1-dev0 (rev: 138cbd68)|StreamWithRefTest on catalina:default using gnu|jobid=62114|Triad=19048.3|ref=18800 (l=-0.05, u=0.05)|MB/s +.. literalinclude:: listings/perflogs.txt + :language: console Several information are printed for each run, such as the performance variables, their value, their references and thresholds etc. The default format is in a form suitable for easy parsing, but you may fully control not only the format, but also what is being logged from the configuration file. @@ -924,142 +650,11 @@ We will only do so with the final versions of the tests from the previous sectio .. code-block:: console - export RFM_CONFIG_FILE=$(pwd)/tutorials/config/mysettings.py + export RFM_CONFIG_FILE=$(pwd)/tutorials/config/settings.py ./bin/reframe -c tutorials/basics/ -R -n 'HelloMultiLangTest|HelloThreadedExtended2Test|StreamWithRefTest' --performance-report -r - -.. code-block:: none - - [ReFrame Setup] - version: 3.4-dev2 (rev: f102d4bb) - command: './bin/reframe -c tutorials/basics/ -R -n HelloMultiLangTest|HelloThreadedExtended2Test|StreamWithRefTest --performance-report -r' - launched by: user@dom101 - working directory: '/users/user/Devel/reframe' - settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' - check search path: (R) '/users/user/Devel/reframe/tutorials/basics' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' - - [==========] Running 4 check(s) - [==========] Started on Mon Jan 25 00:34:32 2021 - - [----------] start processing checks - [ RUN ] HelloMultiLangTest_c on daint:login using builtin - [ RUN ] HelloMultiLangTest_c on daint:login using gnu - [ RUN ] HelloMultiLangTest_c on daint:login using intel - [ RUN ] HelloMultiLangTest_c on daint:login using pgi - [ RUN ] HelloMultiLangTest_c on daint:login using cray - [ RUN ] HelloMultiLangTest_c on daint:gpu using gnu - [ RUN ] HelloMultiLangTest_c on daint:gpu using intel - [ RUN ] HelloMultiLangTest_c on daint:gpu using pgi - [ RUN ] HelloMultiLangTest_c on daint:gpu using cray - [ RUN ] HelloMultiLangTest_c on daint:mc using gnu - [ RUN ] HelloMultiLangTest_c on daint:mc using intel - [ RUN ] HelloMultiLangTest_c on daint:mc using pgi - [ RUN ] HelloMultiLangTest_c on daint:mc using cray - [ RUN ] HelloMultiLangTest_cpp on daint:login using builtin - [ RUN ] HelloMultiLangTest_cpp on daint:login using gnu - [ RUN ] HelloMultiLangTest_cpp on daint:login using intel - [ RUN ] HelloMultiLangTest_cpp on daint:login using pgi - [ RUN ] HelloMultiLangTest_cpp on daint:login using cray - [ RUN ] HelloMultiLangTest_cpp on daint:gpu using gnu - [ RUN ] HelloMultiLangTest_cpp on daint:gpu using intel - [ RUN ] HelloMultiLangTest_cpp on daint:gpu using pgi - [ RUN ] HelloMultiLangTest_cpp on daint:gpu using cray - [ RUN ] HelloMultiLangTest_cpp on daint:mc using gnu - [ RUN ] HelloMultiLangTest_cpp on daint:mc using intel - [ RUN ] HelloMultiLangTest_cpp on daint:mc using pgi - [ RUN ] HelloMultiLangTest_cpp on daint:mc using cray - [ RUN ] HelloThreadedExtended2Test on daint:login using builtin - [ RUN ] HelloThreadedExtended2Test on daint:login using gnu - [ RUN ] HelloThreadedExtended2Test on daint:login using intel - [ RUN ] HelloThreadedExtended2Test on daint:login using pgi - [ RUN ] HelloThreadedExtended2Test on daint:login using cray - [ RUN ] HelloThreadedExtended2Test on daint:gpu using gnu - [ RUN ] HelloThreadedExtended2Test on daint:gpu using intel - [ RUN ] HelloThreadedExtended2Test on daint:gpu using pgi - [ RUN ] HelloThreadedExtended2Test on daint:gpu using cray - [ RUN ] HelloThreadedExtended2Test on daint:mc using gnu - [ RUN ] HelloThreadedExtended2Test on daint:mc using intel - [ RUN ] HelloThreadedExtended2Test on daint:mc using pgi - [ RUN ] HelloThreadedExtended2Test on daint:mc using cray - [ RUN ] StreamWithRefTest on daint:login using gnu - [ RUN ] StreamWithRefTest on daint:gpu using gnu - [ RUN ] StreamWithRefTest on daint:mc using gnu - [ OK ] ( 1/42) HelloThreadedExtended2Test on daint:login using cray [compile: 0.959s run: 56.203s total: 57.189s] - [ OK ] ( 2/42) HelloThreadedExtended2Test on daint:login using intel [compile: 2.096s run: 61.438s total: 64.062s] - [ OK ] ( 3/42) HelloMultiLangTest_cpp on daint:login using cray [compile: 0.479s run: 98.909s total: 99.406s] - [ OK ] ( 4/42) HelloMultiLangTest_c on daint:login using pgi [compile: 1.342s run: 137.250s total: 138.609s] - [ OK ] ( 5/42) HelloThreadedExtended2Test on daint:gpu using cray [compile: 0.792s run: 33.748s total: 34.558s] - [ OK ] ( 6/42) HelloThreadedExtended2Test on daint:gpu using intel [compile: 2.257s run: 48.545s total: 50.825s] - [ OK ] ( 7/42) HelloMultiLangTest_cpp on daint:gpu using cray [compile: 0.469s run: 85.383s total: 85.873s] - [ OK ] ( 8/42) HelloMultiLangTest_c on daint:gpu using cray [compile: 0.132s run: 124.678s total: 124.827s] - [ OK ] ( 9/42) HelloThreadedExtended2Test on daint:mc using cray [compile: 0.775s run: 15.569s total: 16.362s] - [ OK ] (10/42) HelloThreadedExtended2Test on daint:mc using intel [compile: 2.814s run: 24.600s total: 27.438s] - [ OK ] (11/42) HelloMultiLangTest_cpp on daint:mc using cray [compile: 0.474s run: 70.035s total: 70.528s] - [ OK ] (12/42) HelloMultiLangTest_c on daint:mc using cray [compile: 0.138s run: 110.807s total: 110.963s] - [ OK ] (13/42) HelloThreadedExtended2Test on daint:login using builtin [compile: 0.790s run: 67.313s total: 68.124s] - [ OK ] (14/42) HelloMultiLangTest_cpp on daint:login using pgi [compile: 1.799s run: 100.490s total: 102.683s] - [ OK ] (15/42) HelloMultiLangTest_cpp on daint:login using builtin [compile: 0.497s run: 108.380s total: 108.895s] - [ OK ] (16/42) HelloMultiLangTest_c on daint:login using gnu [compile: 1.337s run: 142.017s total: 143.373s] - [ OK ] (17/42) HelloMultiLangTest_cpp on daint:gpu using pgi [compile: 1.851s run: 88.935s total: 90.805s] - [ OK ] (18/42) HelloMultiLangTest_cpp on daint:gpu using gnu [compile: 1.640s run: 97.855s total: 99.513s] - [ OK ] (19/42) HelloMultiLangTest_c on daint:gpu using intel [compile: 1.578s run: 131.689s total: 133.287s] - [ OK ] (20/42) HelloMultiLangTest_cpp on daint:mc using pgi [compile: 1.917s run: 73.276s total: 75.213s] - [ OK ] (21/42) HelloMultiLangTest_cpp on daint:mc using gnu [compile: 1.727s run: 82.213s total: 83.960s] - [ OK ] (22/42) HelloMultiLangTest_c on daint:mc using intel [compile: 1.573s run: 117.806s total: 119.402s] - [ OK ] (23/42) HelloMultiLangTest_cpp on daint:login using gnu [compile: 1.644s run: 106.956s total: 108.618s] - [ OK ] (24/42) HelloMultiLangTest_c on daint:login using cray [compile: 0.146s run: 137.301s total: 137.466s] - [ OK ] (25/42) HelloMultiLangTest_c on daint:login using intel [compile: 1.613s run: 140.058s total: 141.689s] - [ OK ] (26/42) HelloMultiLangTest_c on daint:login using builtin [compile: 0.122s run: 143.692s total: 143.833s] - [ OK ] (27/42) HelloMultiLangTest_c on daint:gpu using pgi [compile: 1.361s run: 127.958s total: 129.341s] - [ OK ] (28/42) HelloMultiLangTest_c on daint:gpu using gnu [compile: 1.337s run: 136.031s total: 137.386s] - [ OK ] (29/42) HelloMultiLangTest_c on daint:mc using pgi [compile: 1.410s run: 113.998s total: 115.428s] - [ OK ] (30/42) HelloMultiLangTest_c on daint:mc using gnu [compile: 1.344s run: 122.086s total: 123.453s] - [ OK ] (31/42) HelloThreadedExtended2Test on daint:login using pgi [compile: 2.733s run: 60.105s total: 62.951s] - [ OK ] (32/42) HelloMultiLangTest_cpp on daint:login using intel [compile: 2.780s run: 104.916s total: 107.716s] - [ OK ] (33/42) HelloThreadedExtended2Test on daint:gpu using pgi [compile: 2.373s run: 39.144s total: 41.545s] - [ OK ] (34/42) HelloMultiLangTest_cpp on daint:gpu using intel [compile: 1.835s run: 95.042s total: 96.896s] - [ OK ] (35/42) HelloThreadedExtended2Test on daint:mc using pgi [compile: 2.686s run: 20.751s total: 23.457s] - [ OK ] (36/42) HelloMultiLangTest_cpp on daint:mc using intel [compile: 1.862s run: 79.275s total: 81.170s] - [ OK ] (37/42) HelloThreadedExtended2Test on daint:login using gnu [compile: 2.106s run: 67.284s total: 69.409s] - [ OK ] (38/42) HelloThreadedExtended2Test on daint:gpu using gnu [compile: 2.471s run: 56.360s total: 58.871s] - [ OK ] (39/42) HelloThreadedExtended2Test on daint:mc using gnu [compile: 2.007s run: 32.300s total: 34.330s] - [ OK ] (40/42) StreamWithRefTest on daint:login using gnu [compile: 1.941s run: 14.373s total: 16.337s] - [ OK ] (41/42) StreamWithRefTest on daint:gpu using gnu [compile: 1.954s run: 11.815s total: 13.791s] - [ OK ] (42/42) StreamWithRefTest on daint:mc using gnu [compile: 2.513s run: 10.672s total: 13.213s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 42 test case(s) from 4 check(s) (0 failure(s)) - [==========] Finished on Mon Jan 25 00:37:02 2021 - ============================================================================== - PERFORMANCE REPORT - ------------------------------------------------------------------------------ - StreamWithRefTest - - daint:login - - gnu - * num_tasks: 1 - * Copy: 72923.3 MB/s - * Scale: 45663.4 MB/s - * Add: 49417.7 MB/s - * Triad: 49426.4 MB/s - - daint:gpu - - gnu - * num_tasks: 1 - * Copy: 50638.7 MB/s - * Scale: 35186.0 MB/s - * Add: 38564.4 MB/s - * Triad: 38771.1 MB/s - - daint:mc - - gnu - * num_tasks: 1 - * Copy: 19072.5 MB/s - * Scale: 10395.6 MB/s - * Add: 11041.0 MB/s - * Triad: 11079.2 MB/s - ------------------------------------------------------------------------------ - Log file(s) saved in: '/tmp/rfm-r4yjva71.log' - +.. literalinclude:: listings/alltests_daint.txt + :language: console There it is! Without any change in our tests, we could simply run them in a HPC cluster with all of its intricacies. @@ -1154,132 +749,8 @@ Let's run our adapted test now: ./bin/reframe -c tutorials/basics/stream/stream4.py -r --performance-report -.. code-block:: none - - [ReFrame Setup] - version: 3.3-dev0 (rev: cb974c13) - command: './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/stream/stream4.py -r --performance-report' - launched by: user@dom101 - working directory: '/users/user/Devel/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/basics/stream/stream4.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' - - [==========] Running 1 check(s) - [==========] Started on Mon Oct 12 20:16:03 2020 - - [----------] start processing checks - [ RUN ] StreamMultiSysTest on daint:login using gnu - [ RUN ] StreamMultiSysTest on daint:login using intel - [ RUN ] StreamMultiSysTest on daint:login using pgi - [ RUN ] StreamMultiSysTest on daint:login using cray - [ RUN ] StreamMultiSysTest on daint:gpu using gnu - [ RUN ] StreamMultiSysTest on daint:gpu using intel - [ RUN ] StreamMultiSysTest on daint:gpu using pgi - [ RUN ] StreamMultiSysTest on daint:gpu using cray - [ RUN ] StreamMultiSysTest on daint:mc using gnu - [ RUN ] StreamMultiSysTest on daint:mc using intel - [ RUN ] StreamMultiSysTest on daint:mc using pgi - [ RUN ] StreamMultiSysTest on daint:mc using cray - [ OK ] ( 1/12) StreamMultiSysTest on daint:gpu using pgi [compile: 2.092s run: 11.201s total: 13.307s] - [ OK ] ( 2/12) StreamMultiSysTest on daint:gpu using gnu [compile: 2.349s run: 17.140s total: 19.509s] - [ OK ] ( 3/12) StreamMultiSysTest on daint:login using pgi [compile: 2.230s run: 20.946s total: 23.189s] - [ OK ] ( 4/12) StreamMultiSysTest on daint:login using gnu [compile: 2.161s run: 27.093s total: 29.266s] - [ OK ] ( 5/12) StreamMultiSysTest on daint:mc using gnu [compile: 1.954s run: 7.904s total: 9.870s] - [ OK ] ( 6/12) StreamMultiSysTest on daint:gpu using intel [compile: 2.286s run: 14.686s total: 16.984s] - [ OK ] ( 7/12) StreamMultiSysTest on daint:login using intel [compile: 2.520s run: 24.427s total: 26.960s] - [ OK ] ( 8/12) StreamMultiSysTest on daint:mc using intel [compile: 2.312s run: 5.350s total: 7.678s] - [ OK ] ( 9/12) StreamMultiSysTest on daint:gpu using cray [compile: 0.672s run: 10.791s total: 11.476s] - [ OK ] (10/12) StreamMultiSysTest on daint:login using cray [compile: 0.706s run: 20.505s total: 21.229s] - [ OK ] (11/12) StreamMultiSysTest on daint:mc using cray [compile: 0.674s run: 2.763s total: 3.453s] - [ OK ] (12/12) StreamMultiSysTest on daint:mc using pgi [compile: 2.088s run: 5.124s total: 7.224s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 12 test case(s) from 1 check(s) (0 failure(s)) - [==========] Finished on Mon Oct 12 20:16:36 2020 - ============================================================================== - PERFORMANCE REPORT - ------------------------------------------------------------------------------ - StreamMultiSysTest - - daint:login - - gnu - * num_tasks: 1 - * Copy: 95784.6 MB/s - * Scale: 73747.3 MB/s - * Add: 79138.3 MB/s - * Triad: 81253.3 MB/s - - intel - * num_tasks: 1 - * Copy: 103540.5 MB/s - * Scale: 109257.6 MB/s - * Add: 112189.8 MB/s - * Triad: 113440.8 MB/s - - pgi - * num_tasks: 1 - * Copy: 99071.7 MB/s - * Scale: 74721.3 MB/s - * Add: 81206.4 MB/s - * Triad: 78328.9 MB/s - - cray - * num_tasks: 1 - * Copy: 96664.5 MB/s - * Scale: 75637.4 MB/s - * Add: 74759.3 MB/s - * Triad: 73450.6 MB/s - - daint:gpu - - gnu - * num_tasks: 1 - * Copy: 42293.7 MB/s - * Scale: 38095.1 MB/s - * Add: 43080.7 MB/s - * Triad: 43719.2 MB/s - - intel - * num_tasks: 1 - * Copy: 52563.0 MB/s - * Scale: 54316.5 MB/s - * Add: 59044.5 MB/s - * Triad: 59165.5 MB/s - - pgi - * num_tasks: 1 - * Copy: 50710.5 MB/s - * Scale: 39639.5 MB/s - * Add: 44104.5 MB/s - * Triad: 44143.7 MB/s - - cray - * num_tasks: 1 - * Copy: 51159.8 MB/s - * Scale: 39176.0 MB/s - * Add: 43588.8 MB/s - * Triad: 43866.8 MB/s - - daint:mc - - gnu - * num_tasks: 1 - * Copy: 48744.5 MB/s - * Scale: 38774.7 MB/s - * Add: 43760.0 MB/s - * Triad: 44143.1 MB/s - - intel - * num_tasks: 1 - * Copy: 52707.0 MB/s - * Scale: 49011.8 MB/s - * Add: 57513.3 MB/s - * Triad: 57678.3 MB/s - - pgi - * num_tasks: 1 - * Copy: 46274.3 MB/s - * Scale: 40628.6 MB/s - * Add: 44352.4 MB/s - * Triad: 44630.2 MB/s - - cray - * num_tasks: 1 - * Copy: 46912.5 MB/s - * Scale: 40076.9 MB/s - * Add: 43639.0 MB/s - * Triad: 44068.3 MB/s - ------------------------------------------------------------------------------ - Log file(s) saved in: '/tmp/rfm-odx7qewe.log' - +.. literalinclude:: listings/stream4_daint.txt + :language: console Notice the improved performance of the benchmark in all partitions and the differences in performance between the different compilers. diff --git a/docs/tutorial_build_automation.rst b/docs/tutorial_build_automation.rst index ed91b60967..c0ba98f031 100644 --- a/docs/tutorial_build_automation.rst +++ b/docs/tutorial_build_automation.rst @@ -5,12 +5,6 @@ Tutorial 5: Using Build Automation Tools As a Build System In this tutorial we will present how to use `Easybuild `__ and `Spack `__ as a build system for a ReFrame test. The example uses the configuration file presented in :doc:`tutorial_basics`, which you can find in ``tutorials/config/settings.py``. We also assume that the reader is already familiar with the concepts presented in the basic tutorial and has a working knowledge of EasyBuild and Spack. -Finally, to avoid specifying the tutorial configuration file each time you run the test, make sure to export it here: - -.. code:: bash - - export RFM_CONFIG_FILE=$(pwd)/tutorials/config/mysettings.py - Using EasyBuild to Build the Test Code diff --git a/docs/tutorial_deps.rst b/docs/tutorial_deps.rst index e8bc2a80bd..afef9dfdea 100644 --- a/docs/tutorial_deps.rst +++ b/docs/tutorial_deps.rst @@ -111,72 +111,8 @@ Here is the output when running the OSU tests with the asynchronous execution po ./bin/reframe -c tutorials/deps/osu_benchmarks.py -r -.. code-block:: none - - [ReFrame Setup] - version: 3.10.0-dev.2 - command: './bin/reframe -c tutorials/deps/osu_benchmarks.py -r' - launched by: user@daint101 - working directory: '/users/user/Devel/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' - - [==========] Running 8 check(s) - [==========] Started on Wed Mar 10 20:53:56 2021 - - [----------] start processing checks - [ RUN ] OSUDownloadTest on daint:login using builtin - [ OK ] ( 1/22) OSUDownloadTest on daint:login using builtin [compile: 0.035s run: 2.520s total: 2.716s] - [ RUN ] OSUBuildTest on daint:gpu using gnu - [ RUN ] OSUBuildTest on daint:gpu using intel - [ RUN ] OSUBuildTest on daint:gpu using pgi - [ OK ] ( 2/22) OSUBuildTest on daint:gpu using gnu [compile: 156.713s run: 10.222s total: 170.501s] - [ RUN ] OSULatencyTest on daint:gpu using gnu - [ RUN ] OSUBandwidthTest on daint:gpu using gnu - [ RUN ] OSUAllreduceTest_2 on daint:gpu using gnu - [ RUN ] OSUAllreduceTest_4 on daint:gpu using gnu - [ RUN ] OSUAllreduceTest_16 on daint:gpu using gnu - [ RUN ] OSUAllreduceTest_8 on daint:gpu using gnu - [ OK ] ( 3/22) OSUBuildTest on daint:gpu using pgi [compile: 168.692s run: 0.751s total: 171.227s] - [ RUN ] OSUAllreduceTest_8 on daint:gpu using pgi - [ RUN ] OSULatencyTest on daint:gpu using pgi - [ RUN ] OSUBandwidthTest on daint:gpu using pgi - [ RUN ] OSUAllreduceTest_2 on daint:gpu using pgi - [ RUN ] OSUAllreduceTest_4 on daint:gpu using pgi - [ RUN ] OSUAllreduceTest_16 on daint:gpu using pgi - [ OK ] ( 4/22) OSULatencyTest on daint:gpu using gnu [compile: 0.031s run: 63.644s total: 64.558s] - [ OK ] ( 5/22) OSUAllreduceTest_2 on daint:gpu using gnu [compile: 0.016s run: 53.954s total: 64.619s] - [ OK ] ( 6/22) OSULatencyTest on daint:gpu using pgi [compile: 0.032s run: 28.134s total: 65.222s] - [ OK ] ( 7/22) OSUAllreduceTest_4 on daint:gpu using gnu [compile: 0.015s run: 49.682s total: 65.862s] - [ OK ] ( 8/22) OSUAllreduceTest_16 on daint:gpu using gnu [compile: 0.011s run: 44.188s total: 66.009s] - [ OK ] ( 9/22) OSUAllreduceTest_8 on daint:gpu using gnu [compile: 0.014s run: 38.366s total: 66.076s] - [ OK ] (10/22) OSUAllreduceTest_8 on daint:gpu using pgi [compile: 0.009s run: 34.306s total: 66.546s] - [ OK ] (11/22) OSUBuildTest on daint:gpu using intel [compile: 245.878s run: 0.555s total: 246.570s] - [ RUN ] OSUAllreduceTest_8 on daint:gpu using intel - [ RUN ] OSUAllreduceTest_4 on daint:gpu using intel - [ RUN ] OSULatencyTest on daint:gpu using intel - [ RUN ] OSUBandwidthTest on daint:gpu using intel - [ RUN ] OSUAllreduceTest_2 on daint:gpu using intel - [ RUN ] OSUAllreduceTest_16 on daint:gpu using intel - [ OK ] (12/22) OSUBandwidthTest on daint:gpu using gnu [compile: 0.017s run: 98.239s total: 104.363s] - [ OK ] (13/22) OSUAllreduceTest_2 on daint:gpu using pgi [compile: 0.014s run: 58.084s total: 93.705s] - [ OK ] (14/22) OSUAllreduceTest_4 on daint:gpu using pgi [compile: 0.023s run: 53.762s total: 82.721s] - [ OK ] (15/22) OSUAllreduceTest_16 on daint:gpu using pgi [compile: 0.052s run: 49.170s total: 82.695s] - [ OK ] (16/22) OSUBandwidthTest on daint:gpu using pgi [compile: 0.048s run: 89.141s total: 125.222s] - [ OK ] (17/22) OSUAllreduceTest_2 on daint:gpu using intel [compile: 0.024s run: 46.974s total: 65.742s] - [ OK ] (18/22) OSUAllreduceTest_8 on daint:gpu using intel [compile: 0.010s run: 70.032s total: 71.045s] - [ OK ] (19/22) OSUAllreduceTest_4 on daint:gpu using intel [compile: 0.045s run: 67.585s total: 72.897s] - [ OK ] (20/22) OSULatencyTest on daint:gpu using intel [compile: 0.013s run: 61.913s total: 73.029s] - [ OK ] (21/22) OSUAllreduceTest_16 on daint:gpu using intel [compile: 0.024s run: 59.141s total: 81.230s] - [ OK ] (22/22) OSUBandwidthTest on daint:gpu using intel [compile: 0.044s run: 121.324s total: 136.121s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 22/22 test case(s) from 8 check(s) (0 failure(s)) - [==========] Finished on Wed Mar 10 20:58:03 2021 - Log file(s) saved in: '/tmp/rfm-q0gd9y6v.log' - +.. literalinclude:: listings/osu_bench_deps.txt + :language: console Before starting running the tests, ReFrame topologically sorts them based on their dependencies and schedules them for running using the selected execution policy. With the serial execution policy, ReFrame simply executes the tests to completion as they "arrive," since the tests are already topologically sorted. @@ -195,27 +131,8 @@ For example, if we select only the :class:`OSULatencyTest` for running, ReFrame ./bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -l - -.. code-block:: none - - $ ./bin/reframe -C -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -l - [ReFrame Setup] - version: 3.3-dev2 (rev: 8ded20cd) - command: './bin/reframe -C tutorials/config/settings.py -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -l' - launched by: user@daint101 - working directory: '/users/user/Devel/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' - - [List of matched checks] - - OSUDownloadTest (found in '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py') - - OSUBuildTest (found in '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py') - - OSULatencyTest (found in '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py') - Found 3 check(s) - Log file(s) saved in: '/tmp/rfm-4c15g820.log' - +.. literalinclude:: listings/osu_latency_list.txt + :language: console Finally, when ReFrame cannot resolve a dependency of a test, it will issue a warning and skip completely all the test cases that recursively depend on this one. In the following example, we restrict the run of the :class:`OSULatencyTest` to the ``daint:gpu`` partition. @@ -226,109 +143,37 @@ As a result, its immediate dependency :class:`OSUBuildTest` will be skipped, whi ./bin/reframe -c tutorials/deps/osu_benchmarks.py --system=daint:gpu -n OSULatencyTest -l -.. code-block:: none - - [ReFrame Setup] - version: 3.6.0-dev.0+4de0fee1 - command: './bin/reframe -c tutorials/deps/osu_benchmarks.py --system=daint:gpu -n OSULatencyTest -l' - launched by: user@daint101 - working directory: '/users/user/Devel/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' - - ./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'gnu') -> 'OSUDownloadTest' - ./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'intel') -> 'OSUDownloadTest' - ./bin/reframe: could not resolve dependency: ('OSUBuildTest', 'daint:gpu', 'pgi') -> 'OSUDownloadTest' - ./bin/reframe: skipping all dependent test cases - - ('OSUBuildTest', 'daint:gpu', 'intel') - - ('OSUAllreduceTest_2', 'daint:gpu', 'intel') - - ('OSUBuildTest', 'daint:gpu', 'pgi') - - ('OSULatencyTest', 'daint:gpu', 'pgi') - - ('OSUAllreduceTest_8', 'daint:gpu', 'intel') - - ('OSUAllreduceTest_4', 'daint:gpu', 'pgi') - - ('OSULatencyTest', 'daint:gpu', 'intel') - - ('OSUAllreduceTest_4', 'daint:gpu', 'intel') - - ('OSUAllreduceTest_8', 'daint:gpu', 'pgi') - - ('OSUAllreduceTest_16', 'daint:gpu', 'pgi') - - ('OSUAllreduceTest_16', 'daint:gpu', 'intel') - - ('OSUBandwidthTest', 'daint:gpu', 'pgi') - - ('OSUBuildTest', 'daint:gpu', 'gnu') - - ('OSUBandwidthTest', 'daint:gpu', 'intel') - - ('OSUBandwidthTest', 'daint:gpu', 'gnu') - - ('OSUAllreduceTest_2', 'daint:gpu', 'pgi') - - ('OSUAllreduceTest_16', 'daint:gpu', 'gnu') - - ('OSUAllreduceTest_2', 'daint:gpu', 'gnu') - - ('OSULatencyTest', 'daint:gpu', 'gnu') - - ('OSUAllreduceTest_4', 'daint:gpu', 'gnu') - - ('OSUAllreduceTest_8', 'daint:gpu', 'gnu') - - [List of matched checks] - - Found 0 check(s) - - Log file(s) saved in: '/tmp/rfm-6cxeil6h.log' - +.. literalinclude:: listings/osu_latency_unresolved_deps.txt + :language: console Listing Dependencies -------------------- -You can view the dependencies of a test by using the :option:`-L` option: - - -.. code-block:: console - - ./bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -L - - -.. code-block:: none - - < ... omitted ... > +As shown in the listing of :class:`OSULatencyTest` before, the full dependency chain of the test is listed along with the test. +Each target dependency is printed in a new line prefixed by the ``^`` character and indented proportionally to its level. +If a target dependency appears in multiple paths, it will only be listed once. - - OSULatencyTest: - Description: - OSU latency test +The default test listing will list the dependencies at the test level or the *conceptual* dependencies. +ReFrame generates multiple test cases from each test depending on the target system configuration. +We have seen in the :doc:`tutorial_basics` already how the STREAM benchmark generated many more test cases when it was run in a HPC system with multiple partitions and programming environments. +These are the *actual* depedencies and form the actual test case graph that will be executed by the runtime. +The mapping of a test to its concrete test cases that will be executed on a system is called *test concretization*. +You can view the exact concretization of the selected tests with ``--list=concretized`` or simply ``-lC``. +Here is how the OSU benchmarks of this tutorial are concretized on the system ``daint``: - Environment modules: - - Location: - /users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py - - Maintainers: - - - Node allocation: - standard (2 task(s)) - - Pipeline hooks: - - post_setup: set_executable - - Tags: - - - Valid environments: - gnu, pgi, intel - - Valid systems: - daint:gpu - - Dependencies (conceptual): - OSUBuildTest +.. code-block:: console - Dependencies (actual): - - ('OSULatencyTest', 'daint:gpu', 'gnu') -> ('OSUBuildTest', 'daint:login', 'gnu') - - ('OSULatencyTest', 'daint:gpu', 'intel') -> ('OSUBuildTest', 'daint:login', 'intel') - - ('OSULatencyTest', 'daint:gpu', 'pgi') -> ('OSUBuildTest', 'daint:login', 'pgi') + ./bin/reframe -c tutorials/deps/osu_benchmarks.py -lC - < ... omitted ... > +.. literalinclude:: listings/osu_bench_list_concretized.txt + :language: console +Notice how the various test cases of the run benchmarks depend on the corresponding test cases of the build tests. -Dependencies are not only listed conceptually, e.g., "test A depends on test B," but also in a way that shows how they are actually interpreted between the different test cases of the tests. -The test dependencies do not change conceptually, but their actual interpretation might change from system to system or from programming environment to programming environment. -The following listing shows how the actual test cases dependencies are formed when we select only the ``gnu`` and ``builtin`` programming environment for running: +The concretization of test cases changes if a specifc partition or programming environment is passed from the command line or, of course, if the test is run on a different system. +If we scope our programming environments to ``gnu`` and ``builtin`` only, ReFrame will generate 8 test cases only instead of 22: .. note:: @@ -339,46 +184,33 @@ The following listing shows how the actual test cases dependencies are formed wh ./bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -L -p builtin -p gnu +.. literalinclude:: listings/osu_bench_list_concretized_gnu.txt + :language: console -.. code-block:: none - :emphasize-lines: 35 - - < ... omitted ... > - - OSULatencyTest: - Description: - OSU latency test +To gain a deeper understanding on how test dependencies work in Reframe, please refer to :doc:`dependencies`. - Environment modules: - - Location: - /users/user/Devel/reframe/tutorials/deps/osu_benchmarks.py +.. _param_deps: - Maintainers: - +Depending on Parameterized Tests +-------------------------------- - Node allocation: - standard (2 task(s)) +As shown earlier in this section, tests define their dependencies by referencing the target tests by their unique name. +This is straightforward when referring to regular tests, where their name matches the class name, but it becomes cumbersome trying to refer to a parameterized tests, since no safe assumption should be made as of the variant number of the test or how the parameters are encoded in the name. +In order to safely and reliably refer to a parameterized test, you should use the :func:`~reframe.core.pipeline.RegressionMixin.get_variant_nums` and :func:`~reframe.core.pipeline.RegressionMixin.variant_name` class methods as shown in the following example: - Pipeline hooks: - - post_setup: set_executable +.. literalinclude:: ../tutorials/deps/parameterized.py + :emphasize-lines: 37- - Tags: - +In this example, :class:`TestB` depends only on selected variants of :class:`TestA`. +The :func:`get_variant_nums` method accepts a set of key-value pairs representing the target test parameters and selector functions and returns the list of the variant numbers that correspond to these variants. +Using the :func:`variant_name` subsequently, we can get the actual name of the variant. - Valid environments: - gnu, pgi, intel - Valid systems: - daint:gpu - - Dependencies (conceptual): - OSUBuildTest - - Dependencies (actual): - - ('OSULatencyTest', 'daint:gpu', 'gnu') -> ('OSUBuildTest', 'daint:login', 'gnu') +.. code-block:: console - < ... omitted ... > + ./bin/reframe -c tutorials/deps/parameterized.py -l -For more information on test dependencies, you can have a look at :doc:`dependencies`. +.. literalinclude:: listings/param_deps_list.txt + :language: console diff --git a/docs/tutorial_fixtures.rst b/docs/tutorial_fixtures.rst index eb51b33b1e..1ccbe30e06 100644 --- a/docs/tutorial_fixtures.rst +++ b/docs/tutorial_fixtures.rst @@ -90,71 +90,54 @@ It is now time to run the new tests, but let us first list them: .. code-block:: bash - export RFM_CONFIG_FILE=$(pwd)/tutorials/config/settings.py reframe -c tutorials/fixtures/osu_benchmarks.py -l -.. code-block:: console +.. literalinclude:: listings/osu_bench_fixtures_list.txt + :language: console - [ReFrame Setup] - version: 3.9.0 - command: 'reframe -c tutorials/fixtures/osu_benchmarks.py -l' - launched by: user@daint106 - working directory: '/users/user/Devel/reframe' - settings file: '/users/user/Devel/reframe/tutorials/config/settings.py' - check search path: '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py' - stage directory: '/users/user/Devel/reframe/stage' - output directory: '/users/user/Devel/reframe/output' - [List of matched checks] - - osu_latency_test (found in '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py') - - osu_allreduce_test_8 (found in '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py') - - osu_allreduce_test_2 (found in '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py') - - osu_allreduce_test_4 (found in '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py') - - osu_bandwidth_test (found in '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py') - - osu_allreduce_test_16 (found in '/users/user/Devel/reframe/tutorials/fixtures/osu_benchmarks.py') - Found 6 check(s) - - Log file(s) saved in '/tmp/rfm-dlkc1vb_.log' - -Notice that only the leaf tests are listed and not their fixtures. -Listing the tests in detailed mode, however, using the :option:`-L` option, you will see all the generated fixtures: - - -.. code-block:: bash - - reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -L - -ReFrame will generate 4 fixtures for this test based on the partition and environment configurations for the current system. -The following figure shows the generated fixtures as well as their conceptual dependencies. +Notice how the :class:`build_osu_benchmarks` fixture is populated three times, once for each partition and environment combination, and the :class:`fetch_osu_benchmarks` is generated only once. +The following figure shows visually the conceptual dependencies of the :class:`osu_bandwidth_test`. .. figure:: _static/img/fixtures-conceptual-deps.svg :align: center :sub:`Expanded fixtures and dependencies for the OSU benchmarks example.` -Notice how the :class:`build_osu_benchmarks` fixture is populated three times, once for each partition and environment combination, and the :class:`fetch_osu_benchmarks` is generated only once. -Tests in a single ReFrame session must have unique names, so the fixture class name is mangled by the framework to generate a unique name in the test dependency DAG. A *scope* part is added to the base name of the fixture, which in this figure is indicated with red color. Under the hood, fixtures use the test dependency mechanism which is described in :doc:`dependencies`. -The dependencies shown in the previous figure are conceptual. -A single test in ReFrame generates a series of test cases for all the combinations of valid systems and valid programming environments and the actual dependencies are expressed in this more fine-grained layer, which is also the layer at which the execution of tests is scheduled. +The dependencies listed by default and shown in the previous figure are conceptual. +Depending on the available partitions and environments, tests and fixtures can be concretized differently. +Fixtures in particular are also more flexible in the way they can be concretized depending on their scope. +The following listing and figure show the concretization of the :class:`osu_bandwidth_test`: + +.. code-block:: bash + + reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC + +.. literalinclude:: listings/osu_bandwidth_concretized_daint.txt + :language: console -The following figure shows how the above graph translates into the actual DAG of test cases. .. figure:: _static/img/fixtures-actual-deps.svg :align: center :sub:`The actual dependencies for the OSU benchmarks example using fixtures.` - The first thing to notice here is how the individual test cases of :class:`osu_bandwidth_test` depend only the specific fixtures for their scope: when :class:`osu_bandwidth_test` runs on the ``daint:gpu`` partition using the ``gnu`` compiler it will only depend on the :class:`build_osu_benchmarks~daint:gpu+gnu` fixture. The second thing to notice is where the :class:`fetch_osu_benchmarks~daint` fixture will run. Since this is a *session* fixture, ReFrame has arbitrarily chosen to run it on ``daint:gpu`` using the ``gnu`` environment. A session fixture can run on any combination of valid partitions and environments. -The following figure shows how the test dependency DAG is concretized when we scope the valid programming environments from the command line using ``-p pgi``. +The following listing and figure show how the test dependency DAG is concretized when we scope the valid programming environments from the command line using ``-p pgi``. + +.. code-block:: bash + + reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC -p pgi +.. literalinclude:: listings/osu_bandwidth_concretized_daint_pgi.txt + :language: console .. figure:: _static/img/fixtures-actual-deps-scoped.svg :align: center @@ -163,66 +146,13 @@ The following figure shows how the test dependency DAG is concretized when we sc Notice how the :class:`fetch_osu_benchmarks~daint` fixture is selected to run in the only valid partition/environment combination. +This is an important difference compared to the same example written using raw dependencies in :doc:`dependencies`, in which case in order not to have unresolved dependencies, we would need to specify the valid programming environment of the test that fetches the sources. +Fixtures do not need that, since you can impose less strict constraints by setting their scope accordingly. -The following listing shows the output of running the tutorial examples. - -.. code-block:: console - - [==========] Running 10 check(s) - [==========] Started on Sun Oct 31 22:00:28 2021 - - [----------] start processing checks - [ RUN ] fetch_osu_benchmarks~daint on daint:gpu using gnu - [ OK ] ( 1/22) fetch_osu_benchmarks~daint on daint:gpu using gnu [compile: 0.007s run: 2.960s total: 2.988s] - [ RUN ] build_osu_benchmarks~daint:gpu+intel on daint:gpu using intel - [ RUN ] build_osu_benchmarks~daint:gpu+pgi on daint:gpu using pgi - [ RUN ] build_osu_benchmarks~daint:gpu+gnu on daint:gpu using gnu - [ OK ] ( 2/22) build_osu_benchmarks~daint:gpu+gnu on daint:gpu using gnu [compile: 26.322s run: 2.609s total: 30.214s] - [ RUN ] osu_allreduce_test_16 on daint:gpu using gnu - [ RUN ] osu_bandwidth_test on daint:gpu using gnu - [ RUN ] osu_latency_test on daint:gpu using gnu - [ RUN ] osu_allreduce_test_2 on daint:gpu using gnu - [ RUN ] osu_allreduce_test_8 on daint:gpu using gnu - [ RUN ] osu_allreduce_test_4 on daint:gpu using gnu - [ OK ] ( 3/22) build_osu_benchmarks~daint:gpu+intel on daint:gpu using intel [compile: 53.068s run: 0.650s total: 53.773s] - [ RUN ] osu_allreduce_test_2 on daint:gpu using intel - [ RUN ] osu_latency_test on daint:gpu using intel - [ RUN ] osu_allreduce_test_4 on daint:gpu using intel - [ RUN ] osu_allreduce_test_16 on daint:gpu using intel - [ RUN ] osu_allreduce_test_8 on daint:gpu using intel - [ OK ] ( 4/22) build_osu_benchmarks~daint:gpu+pgi on daint:gpu using pgi [compile: 52.482s run: 0.803s total: 53.981s] - [ RUN ] osu_allreduce_test_4 on daint:gpu using pgi - [ RUN ] osu_bandwidth_test on daint:gpu using intel - [ OK ] ( 5/22) osu_allreduce_test_16 on daint:gpu using gnu [compile: 0.015s run: 23.535s total: 23.922s] - [ RUN ] osu_latency_test on daint:gpu using pgi - [ RUN ] osu_bandwidth_test on daint:gpu using pgi - [ RUN ] osu_allreduce_test_2 on daint:gpu using pgi - [ RUN ] osu_allreduce_test_16 on daint:gpu using pgi - [ RUN ] osu_allreduce_test_8 on daint:gpu using pgi - [ OK ] ( 6/22) osu_latency_test on daint:gpu using gnu [compile: 0.010s run: 47.016s total: 54.703s] - [ OK ] ( 7/22) osu_allreduce_test_2 on daint:gpu using intel [compile: 0.009s run: 41.732s total: 42.313s] - [ OK ] ( 8/22) osu_allreduce_test_2 on daint:gpu using gnu [compile: 0.012s run: 54.571s total: 65.684s] - [ OK ] ( 9/22) osu_allreduce_test_8 on daint:gpu using gnu [compile: 0.011s run: 51.414s total: 65.712s] - [ OK ] (10/22) osu_allreduce_test_4 on daint:gpu using gnu [compile: 0.010s run: 48.378s total: 65.741s] - [ OK ] (11/22) osu_latency_test on daint:gpu using intel [compile: 0.008s run: 39.131s total: 42.877s] - [ OK ] (12/22) osu_allreduce_test_4 on daint:gpu using intel [compile: 0.009s run: 35.861s total: 42.898s] - [ OK ] (13/22) osu_allreduce_test_16 on daint:gpu using intel [compile: 0.008s run: 32.300s total: 42.901s] - [ OK ] (14/22) osu_allreduce_test_8 on daint:gpu using intel [compile: 0.009s run: 29.237s total: 42.914s] - [ OK ] (15/22) osu_allreduce_test_4 on daint:gpu using pgi [compile: 0.009s run: 26.134s total: 42.904s] - [ OK ] (16/22) osu_latency_test on daint:gpu using pgi [compile: 0.009s run: 23.085s total: 47.232s] - [ OK ] (17/22) osu_allreduce_test_2 on daint:gpu using pgi [compile: 0.008s run: 17.401s total: 41.728s] - [ OK ] (18/22) osu_allreduce_test_16 on daint:gpu using pgi [compile: 0.008s run: 15.895s total: 36.613s] - [ OK ] (19/22) osu_allreduce_test_8 on daint:gpu using pgi [compile: 0.009s run: 13.485s total: 34.296s] - [ OK ] (20/22) osu_bandwidth_test on daint:gpu using gnu [compile: 0.011s run: 80.564s total: 85.070s] - [ OK ] (21/22) osu_bandwidth_test on daint:gpu using intel [compile: 0.008s run: 76.772s total: 97.828s] - [ OK ] (22/22) osu_bandwidth_test on daint:gpu using pgi [compile: 0.009s run: 83.003s total: 110.656s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 22/22 test case(s) from 10 check(s) (0 failure(s), 0 skipped) - [==========] Finished on Sun Oct 31 22:07:25 2021 - Run report saved in '/users/user/.reframe/reports/run-report.json' - Log file(s) saved in '/tmp/rfm-qst7lvou.log' +Finally, let's run all the benchmarks at once: +.. literalinclude:: listings/osu_bench_fixtures_run.txt + :language: console .. tip:: A reasonable question is how to choose between fixtures and dependencies? diff --git a/docs/tutorial_tips_tricks.rst b/docs/tutorial_tips_tricks.rst index 34efaa0713..01e74bc751 100644 --- a/docs/tutorial_tips_tricks.rst +++ b/docs/tutorial_tips_tricks.rst @@ -19,14 +19,8 @@ In the following, we have inserted a small typo in the ``hello2.py`` tutorial ex ./bin/reframe -c tutorials/basics/hello -R -l -.. code-block:: none - - ./bin/reframe: skipping test file '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py': name error: tutorials/basics/hello/hello2.py:17: name 's' is not defined - sanity_patterns = s.assert_found(r'Hello, World\!', 'hello.out') - (rerun with '-v' for more information) - [List of matched checks] - - HelloTest (found in '/Users/user/Repositories/reframe/tutorials/basics/hello/hello1.py') - Found 1 check(s) +.. literalinclude:: listings/hello2_typo.txt + :language: console Notice how ReFrame prints also the source code line that caused the error. This is not always the case, however. @@ -46,42 +40,10 @@ As suggested by the warning message, passing :option:`-v` will give you the stac .. code:: bash - ./bin/reframe -c tutorials/basics/hello -R -lv - -.. code-block:: none - - ./bin/reframe: skipping test file '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py': name error: tutorials/basics/hello/hello2.py:17: name 's' is not defined - sanity_patterns = s.assert_found(r'Hello, World\!', 'hello.out') - (rerun with '-v' for more information) - Traceback (most recent call last): - File "/Users/user/Repositories/reframe/reframe/frontend/loader.py", line 172, in load_from_file - util.import_module_from_file(filename, force) - File "/Users/user/Repositories/reframe/reframe/utility/__init__.py", line 101, in import_module_from_file - return importlib.import_module(module_name) - File "/usr/local/Cellar/python@3.9/3.9.1_6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/importlib/__init__.py", line 127, in import_module - return _bootstrap._gcd_import(name[level:], package, level) - File "", line 1030, in _gcd_import - File "", line 1007, in _find_and_load - File "", line 986, in _find_and_load_unlocked - File "", line 680, in _load_unlocked - File "", line 790, in exec_module - File "", line 228, in _call_with_frames_removed - File "/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py", line 11, in - class HelloMultiLangTest(rfm.RegressionTest): - File "/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py", line 17, in HelloMultiLangTest - sanity_patterns = s.assert_found(r'Hello, World\!', 'hello.out') - NameError: name 's' is not defined - - Loaded 1 test(s) - Generated 1 test case(s) - Filtering test cases(s) by name: 1 remaining - Filtering test cases(s) by tags: 1 remaining - Filtering test cases(s) by other attributes: 1 remaining - Final number of test cases: 1 - [List of matched checks] - - HelloTest (found in '/Users/user/Repositories/reframe/tutorials/basics/hello/hello1.py') - Found 1 check(s) + ./bin/reframe -c tutorials/basics/hello -R -l -v +.. literalinclude:: listings/hello2_typo_stacktrace.txt + :language: console .. tip:: The :option:`-v` option can be given multiple times to increase the verbosity level further. @@ -127,20 +89,8 @@ If we run the test, we can see that the correct standard output filename will be ./bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -r -.. code-block:: none - - rfm_HelloMultiLangTest_cpp_job.out - [ OK ] (1/4) HelloMultiLangTest_cpp on catalina:default using gnu [compile: 0.677s run: 0.700s total: 1.394s] - rfm_HelloMultiLangTest_c_job.out - [ OK ] (2/4) HelloMultiLangTest_c on catalina:default using gnu [compile: 0.451s run: 1.788s total: 2.258s] - rfm_HelloMultiLangTest_c_job.out - [ OK ] (3/4) HelloMultiLangTest_c on catalina:default using clang [compile: 0.329s run: 1.585s total: 1.934s] - rfm_HelloMultiLangTest_cpp_job.out - [ OK ] (4/4) HelloMultiLangTest_cpp on catalina:default using clang [compile: 0.609s run: 0.373s total: 1.004s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 4 test case(s) from 2 check(s) (0 failure(s)) - [==========] Finished on Wed Jan 20 17:19:01 2021 +.. literalinclude:: listings/hello2_print_stdout.txt + :language: console Debugging sanity and performance patterns @@ -195,79 +145,10 @@ Let's try loading the ``tutorials/basics/hello/hello2.py`` file: .. code:: bash - ./bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -lvv + ./bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -l -vv - -.. code-block:: none - - Loading user configuration - Loading configuration file: 'tutorials/config/settings.py' - Detecting system - Looking for a matching configuration entry for system 'dhcp-133-191.cscs.ch' - Configuration found: picking system 'generic' - Selecting subconfig for 'generic' - Initializing runtime - Selecting subconfig for 'generic:default' - Initializing system partition 'default' - Selecting subconfig for 'generic' - Initializing system 'generic' - Initializing modules system 'nomod' - [ReFrame Environment] - RFM_CHECK_SEARCH_PATH= - RFM_CHECK_SEARCH_RECURSIVE= - RFM_CLEAN_STAGEDIR= - RFM_COLORIZE= - RFM_CONFIG_FILE=/Users/user/Repositories/reframe/tutorials/config/settings.py - RFM_GRAYLOG_ADDRESS= - RFM_IGNORE_CHECK_CONFLICTS= - RFM_IGNORE_REQNODENOTAVAIL= - RFM_INSTALL_PREFIX=/Users/user/Repositories/reframe - RFM_KEEP_STAGE_FILES= - RFM_MODULE_MAPPINGS= - RFM_MODULE_MAP_FILE= - RFM_NON_DEFAULT_CRAYPE= - RFM_OUTPUT_DIR= - RFM_PERFLOG_DIR= - RFM_PREFIX= - RFM_PURGE_ENVIRONMENT= - RFM_REPORT_FILE= - RFM_SAVE_LOG_FILES= - RFM_STAGE_DIR= - RFM_SYSLOG_ADDRESS= - RFM_SYSTEM= - RFM_TIMESTAMP_DIRS= - RFM_UNLOAD_MODULES= - RFM_USER_MODULES= - RFM_USE_LOGIN_SHELL= - RFM_VERBOSE= - [ReFrame Setup] - version: 3.4-dev2 (rev: 33a97c81) - command: './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -lvv' - launched by: user@dhcp-133-191.cscs.ch - working directory: '/Users/user/Repositories/reframe' - settings file: 'tutorials/config/settings.py' - check search path: '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py' - stage directory: '/Users/user/Repositories/reframe/stage' - output directory: '/Users/user/Repositories/reframe/output' - - Looking for tests in '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py' - Validating '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py': OK - > Loaded 2 test(s) - Loaded 2 test(s) - Generated 2 test case(s) - Filtering test cases(s) by name: 2 remaining - Filtering test cases(s) by tags: 2 remaining - Filtering test cases(s) by other attributes: 2 remaining - Building and validating the full test DAG - Full test DAG: - ('HelloMultiLangTest_c', 'generic:default', 'builtin') -> [] - ('HelloMultiLangTest_cpp', 'generic:default', 'builtin') -> [] - Final number of test cases: 2 - [List of matched checks] - - HelloMultiLangTest_c (found in '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py') - - HelloMultiLangTest_cpp (found in '/Users/user/Repositories/reframe/tutorials/basics/hello/hello2.py') - Found 2 check(s) - Log file(s) saved in: '/var/folders/h7/k7cgrdl13r996m4dmsvjq7v80000gp/T/rfm-3956_dlu.log' +.. literalinclude:: listings/hello2_list_verbose.txt + :language: console You can see all the different phases ReFrame's frontend goes through when loading a test. The first "strange" thing to notice in this log is that ReFrame picked the generic system configuration. @@ -412,30 +293,8 @@ Let's run the whole test DAG: ./bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py -r -.. code-block:: none - - - - [ OK ] ( 1/10) T0 on generic:default using builtin [compile: 0.014s run: 0.297s total: 0.337s] - [ OK ] ( 2/10) T4 on generic:default using builtin [compile: 0.010s run: 0.171s total: 0.207s] - [ OK ] ( 3/10) T5 on generic:default using builtin [compile: 0.010s run: 0.192s total: 0.225s] - [ OK ] ( 4/10) T1 on generic:default using builtin [compile: 0.008s run: 0.198s total: 0.226s] - [ FAIL ] ( 5/10) T8 on generic:default using builtin [compile: n/a run: n/a total: 0.003s] - ==> test failed during 'setup': test staged in '/Users/user/Repositories/reframe/stage/generic/default/builtin/T8' - [ FAIL ] ( 6/10) T9 [compile: n/a run: n/a total: n/a] - ==> test failed during 'startup': test staged in '' - [ OK ] ( 7/10) T6 on generic:default using builtin [compile: 0.007s run: 0.224s total: 0.262s] - [ OK ] ( 8/10) T3 on generic:default using builtin [compile: 0.007s run: 0.211s total: 0.235s] - [ FAIL ] ( 9/10) T2 on generic:default using builtin [compile: 0.011s run: 0.318s total: 0.389s] - ==> test failed during 'sanity': test staged in '/Users/user/Repositories/reframe/stage/generic/default/builtin/T2' - [ FAIL ] (10/10) T7 [compile: n/a run: n/a total: n/a] - ==> test failed during 'startup': test staged in '' - [----------] all spawned checks have finished - - [ FAILED ] Ran 10 test case(s) from 10 check(s) (4 failure(s)) - [==========] Finished on Thu Jan 21 13:58:43 2021 - - +.. literalinclude:: listings/deps_complex_run.txt + :language: console You can restore the run session and run only the failed test cases as follows: @@ -471,18 +330,8 @@ Let's try to rerun the :class:`T6` test from the previous test dependency chain: Notice how only the :class:`T6` test was rerun and none of its dependencies, since they were simply restored: -.. code-block:: none - - [==========] Running 1 check(s) - [==========] Started on Thu Jan 21 14:27:18 2021 - - [----------] start processing checks - [ RUN ] T6 on generic:default using builtin - [ OK ] (1/1) T6 on generic:default using builtin [compile: 0.012s run: 0.428s total: 0.464s] - [----------] all spawned checks have finished - - [ PASSED ] Ran 1 test case(s) from 1 check(s) (0 failure(s)) - [==========] Finished on Thu Jan 21 14:27:19 2021 +.. literalinclude:: listings/deps_rerun_t6.txt + :language: console If we tried to run :class:`T6` without restoring the session, we would have to rerun also the whole dependency chain, i.e., also :class:`T5`, :class:`T1`, :class:`T4` and :class:`T0`. @@ -491,18 +340,8 @@ If we tried to run :class:`T6` without restoring the session, we would have to r ./bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py -n T6 -r -.. code-block:: none - - [ OK ] (1/5) T0 on generic:default using builtin [compile: 0.012s run: 0.424s total: 0.464s] - [ OK ] (2/5) T4 on generic:default using builtin [compile: 0.011s run: 0.348s total: 0.381s] - [ OK ] (3/5) T5 on generic:default using builtin [compile: 0.007s run: 0.225s total: 0.248s] - [ OK ] (4/5) T1 on generic:default using builtin [compile: 0.009s run: 0.235s total: 0.267s] - [ OK ] (5/5) T6 on generic:default using builtin [compile: 0.010s run: 0.265s total: 0.297s] - [----------] all spawned checks have finished - - - [ PASSED ] Ran 5 test case(s) from 5 check(s) (0 failure(s)) - [==========] Finished on Thu Jan 21 14:32:09 2021 +.. literalinclude:: listings/deps_run_t6.txt + :language: console .. _generate-ci-pipeline: diff --git a/hpctestlib/sciapps/amber/nve.py b/hpctestlib/sciapps/amber/nve.py index d8044f7d29..e1e917ccda 100644 --- a/hpctestlib/sciapps/amber/nve.py +++ b/hpctestlib/sciapps/amber/nve.py @@ -86,7 +86,7 @@ class amber_nve_check(rfm.RunOnlyRegressionTest, pin_prefix=True): ('FactorIX_production_NVE', -234188.0, 1.0E-04), ('JAC_production_NVE_4fs', -44810.0, 1.0E-03), ('JAC_production_NVE', -58138.0, 5.0E-04) - ]) + ], fmt=lambda x: x[0]) # Parameter encoding the variant of the test. # diff --git a/reframe/core/fields.py b/reframe/core/fields.py index bafbf3d597..11e09b4dfd 100644 --- a/reframe/core/fields.py +++ b/reframe/core/fields.py @@ -38,8 +38,13 @@ def remove_convertible(value): class Field: '''Base class for attribute validators.''' + def __init__(self, attr_name=None): + if attr_name is not None: + self._name = attr_name + def __set_name__(self, owner, name): - self._name = name + if not hasattr(self, '_name'): + self._name = name def __get__(self, obj, objtype): if obj is None: @@ -60,7 +65,8 @@ def __set__(self, obj, value): class TypedField(Field): '''Stores a field of predefined type''' - def __init__(self, main_type, *other_types): + def __init__(self, main_type, *other_types, attr_name=None): + super().__init__(attr_name) self._types = (main_type,) + other_types if not all(isinstance(t, type) for t in self._types): raise TypeError('{0} is not a sequence of types'. @@ -134,8 +140,8 @@ def __set__(self, obj, value): class TimerField(TypedField): '''Stores a timer in the form of a :class:`datetime.timedelta` object''' - def __init__(self, *other_types): - super().__init__(str, int, float, *other_types) + def __init__(self, *other_types, attr_name=None): + super().__init__(str, int, float, *other_types, attr_name=attr_name) def __set__(self, obj, value): value = remove_convertible(value) @@ -165,9 +171,9 @@ class ScopedDictField(TypedField): It also handles implicit conversions from ordinary dicts.''' - def __init__(self, valuetype, *other_types): + def __init__(self, valuetype, *other_types, attr_name=None): super().__init__(types.Dict[str, types.Dict[str, valuetype]], - ScopedDict, *other_types) + ScopedDict, *other_types, attr_name=attr_name) def __set__(self, obj, value): value = remove_convertible(value) @@ -181,10 +187,22 @@ def __set__(self, obj, value): class DeprecatedField(Field): '''Field wrapper for deprecating fields.''' - OP_SET = 1 - OP_GET = 2 + OP_GET = 1 + OP_SET = 2 OP_ALL = OP_SET | OP_GET + @property + def message(self): + return self._message + + @property + def op(self): + return self._op + + @property + def from_version(self): + return self._from_version + def __set_name__(self, owner, name): self._target_field.__set_name__(owner, name) diff --git a/reframe/core/fixtures.py b/reframe/core/fixtures.py index 9350c3f6b8..4db9e05027 100644 --- a/reframe/core/fixtures.py +++ b/reframe/core/fixtures.py @@ -33,28 +33,46 @@ class FixtureData: This data is required to instantiate the fixture. ''' - def __init__(self, variant_num, envs, parts, variables, scope): - self.data = (variant_num, envs, parts, variables, scope,) + __slots__ = ('__data',) + + def __init__(self, variant, envs, parts, variables, scope, scope_enc): + self.__data = (variant, envs, parts, variables, scope, scope_enc) + + @property + def data(self): + return self.__data @property def variant_num(self): - return self.data[0] + return self.__data[0] @property def environments(self): - return self.data[1] + return self.__data[1] @property def partitions(self): - return self.data[2] + return self.__data[2] @property def variables(self): - return self.data[3] + return self.__data[3] @property def scope(self): - return self.data[4] + return self.__data[4] + + @property + def scope_enc(self): + return self.__data[5] + + def mashup(self): + s = f'{self.variant_num}/{self.scope_enc}' + if self.variables: + s += '/' + '&'.join(f'{k}={self.variables[k]}' + for k in sorted(self.variables)) + + return sha256(s.encode('utf-8')).hexdigest()[:8] class FixtureRegistry: @@ -152,7 +170,7 @@ def add(self, fixture, variant_num, parent_name, partitions, prog_envs): cls = fixture.cls scope = fixture.scope - fname = fixture.get_name(variant_num) + fname = fixture.cls.variant_name(variant_num) variables = fixture.variables reg_names = [] self._registry.setdefault(cls, dict()) @@ -164,7 +182,7 @@ def add(self, fixture, variant_num, parent_name, partitions, prog_envs): in sorted(variables.items())) ) if self._hash: - vname = '%' + sha256(vname.encode('utf-8')).hexdigest()[:8] + vname = '_' + sha256(vname.encode('utf-8')).hexdigest()[:8] fname += vname @@ -178,8 +196,6 @@ def add(self, fixture, variant_num, parent_name, partitions, prog_envs): # Register the fixture if scope == 'session': # The name is mangled with the system name - name = f'{fname}~{self._sys_name}' - # Select a valid environment supported by a partition for part in valid_partitions: valid_envs = self._filter_valid_environs(part, prog_envs) @@ -189,14 +205,14 @@ def add(self, fixture, variant_num, parent_name, partitions, prog_envs): return [] # Register the fixture - self._registry[cls][name] = FixtureData( - variant_num, [valid_envs[0]], [part], variables, scope - ) + fixt_data = FixtureData(variant_num, [valid_envs[0]], [part], + variables, scope, self._sys_name) + name = f'{cls.__name__}_{fixt_data.mashup()}' + self._registry[cls][name] = fixt_data reg_names.append(name) elif scope == 'partition': for part in valid_partitions: # The mangled name contains the full partition name - name = f'{fname}~{part}' # Select an environment supported by the partition valid_envs = self._filter_valid_environs(part, prog_envs) @@ -204,31 +220,30 @@ def add(self, fixture, variant_num, parent_name, partitions, prog_envs): continue # Register the fixture - self._registry[cls][name] = FixtureData( - variant_num, [valid_envs[0]], [part], variables, scope - ) + fixt_data = FixtureData(variant_num, [valid_envs[0]], [part], + variables, scope, part) + name = f'{cls.__name__}_{fixt_data.mashup()}' + self._registry[cls][name] = fixt_data reg_names.append(name) elif scope == 'environment': for part in valid_partitions: for env in self._filter_valid_environs(part, prog_envs): # The mangled name contains the full part and env names - ext = f'{part}+{env}' - name = f'{fname}~{ext}' - # Register the fixture - self._registry[cls][name] = FixtureData( - variant_num, [env], [part], variables, scope - ) + fixt_data = FixtureData(variant_num, [env], [part], + variables, scope, f'{part}+{env}') + name = f'{cls.__name__}_{fixt_data.mashup()}' + self._registry[cls][name] = fixt_data reg_names.append(name) elif scope == 'test': # The mangled name contains the parent test name. - name = f'{fname}~{parent_name}' # Register the fixture - self._registry[cls][name] = FixtureData( - variant_num, list(prog_envs), list(valid_partitions), - variables, scope - ) + fixt_data = FixtureData(variant_num, list(prog_envs), + list(valid_partitions), + variables, scope, parent_name) + name = f'{cls.__name__}_{fixt_data.mashup()}' + self._registry[cls][name] = fixt_data reg_names.append(name) return reg_names @@ -276,13 +291,12 @@ def instantiate_all(self): ret = [] for cls, variants in self._registry.items(): for name, args in variants.items(): - varnum, penv, part, variables, _ = args.data + varnum, penv, part, variables, *_ = args.data # Set the fixture name and stolen env and part from the parent, # alongside the other variables specified during the fixture's # declaration. fixtvars = { - 'name': name, 'valid_prog_environs': penv, 'valid_systems': part, **variables @@ -290,8 +304,8 @@ def instantiate_all(self): try: # Instantiate the fixture - inst = cls(variant_num=varnum, variables=fixtvars, - is_fixture=True) + inst = cls(variant_num=varnum, fixt_name=name, + fixt_data=args, fixt_vars=fixtvars) except Exception: exc_info = sys.exc_info() getlogger().warning( @@ -460,10 +474,6 @@ def scope(self): '''The fixture scope.''' return self._scope - def get_name(self, variant_num=None): - '''Utility to retrieve the full name of a given fixture variant.''' - return self.cls.fullname(variant_num) - @property def action(self): '''Action specified on this fixture.''' @@ -513,7 +523,7 @@ def variables(self): class FixtureSpace(namespaces.Namespace): - ''' Regression test fixture space. + '''Regression test fixture space. The fixture space is first built by joining the available fixture spaces in the base classes, and later extended by the locally defined fixtures @@ -641,7 +651,7 @@ def _expand_partitions_envs(self, obj): part = tuple(obj.valid_systems) except AttributeError: raise ReframeSyntaxError( - f"'valid_systems' is undefined in test {obj.name}" + f"'valid_systems' is undefined in test {obj.unique_name!r}" ) else: rt = runtime.runtime() @@ -652,7 +662,8 @@ def _expand_partitions_envs(self, obj): prog_envs = tuple(obj.valid_prog_environs) except AttributeError: raise ReframeSyntaxError( - f"'valid_prog_environs' is undefined in test {obj.name}" + f"'valid_prog_environs' is undefined " + f"in test {obj.unique_name!r}" ) else: if '*' in prog_envs: @@ -686,7 +697,7 @@ def __getitem__(self, key): underlying fixture object with that name. ''' if isinstance(key, int): - ret = dict() + ret = {} f_ids = self.__variant_combinations[key] for i, f in enumerate(self.fixtures): ret[f] = f_ids[i] diff --git a/reframe/core/logging.py b/reframe/core/logging.py index 5aab81c0e2..a1646cf146 100644 --- a/reframe/core/logging.py +++ b/reframe/core/logging.py @@ -470,6 +470,12 @@ def __init__(self, name, level=logging.NOTSET): def setLevel(self, level): self.level = _check_level(level) + if sys.version_info[:2] >= (3, 7): + # Clear the internal cache of the base logger, otherwise the + # logger will remain disabled if its level is raised and then + # lowered again + self._cache.clear() + def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None): record = super().makeRecord(name, level, fn, lno, msg, args, exc_info, diff --git a/reframe/core/meta.py b/reframe/core/meta.py index f30406e218..642c4dc4e6 100644 --- a/reframe/core/meta.py +++ b/reframe/core/meta.py @@ -16,20 +16,11 @@ import reframe.core.variables as variables import reframe.core.fixtures as fixtures import reframe.core.hooks as hooks -import reframe.core.runtime as rt import reframe.utility as utils from reframe.core.exceptions import ReframeSyntaxError from reframe.core.deferrable import deferrable, _DeferredPerformanceExpression - - -def _use_compact_names(): - try: - return getattr(_use_compact_names, '_cached') - except AttributeError: - ret = rt.runtime().get_option('general/0/compact_test_names') - _use_compact_names._cached = ret - return ret +from reframe.core.runtime import runtime class RegressionTestMeta(type): @@ -60,7 +51,6 @@ def __setitem__(self, key, value): # Override the regular class attribute (if present) and return self._namespace.pop(key, None) return - elif isinstance(value, parameters.TestParam): # Insert the attribute in the parameter namespace try: @@ -73,7 +63,6 @@ def __setitem__(self, key, value): # Override the regular class attribute (if present) and return self._namespace.pop(key, None) return - elif isinstance(value, fixtures.TestFixture): # Insert the attribute in the fixture namespace self['_rfm_local_fixture_space'][key] = value @@ -81,7 +70,6 @@ def __setitem__(self, key, value): # Override the regular class attribute (if present) self._namespace.pop(key, None) return - elif key in self['_rfm_local_param_space']: raise ReframeSyntaxError( f'cannot redefine parameter {key!r}' @@ -136,7 +124,6 @@ def __getitem__(self, key): try: # Handle variable access return self['_rfm_local_var_space'][key] - except KeyError: # Handle parameter access if key in self['_rfm_local_param_space']: @@ -241,6 +228,7 @@ def __prepare__(metacls, name, bases, **kwargs): # Directives to add/modify a regression test variable namespace['variable'] = variables.TestVar namespace['required'] = variables.Undefined + namespace['deprecate'] = variables.TestVar.create_deprecated # Regression test fixture space namespace['_rfm_local_fixture_space'] = namespaces.LocalNamespace() @@ -486,14 +474,13 @@ def __call__(cls, *args, **kwargs): # respective points in the parameter and fixture spaces. variant_num = kwargs.pop('variant_num', None) param_index, fixt_index = cls._map_variant_num(variant_num) + fixt_name = kwargs.pop('fixt_name', None) + fixt_data = kwargs.pop('fixt_data', None) # Intercept variables to be set before initialization - variables = kwargs.pop('variables', {}) - if not isinstance(variables, collections.abc.Mapping): - raise TypeError("'variables' argument must be a mapping") - - # Intercept is_fixture argument to flag an instance as a fixture - is_fixture = kwargs.pop('is_fixture', False) + fixt_vars = kwargs.pop('fixt_vars', {}) + if not isinstance(fixt_vars, collections.abc.Mapping): + raise TypeError("'fixt_vars' argument must be a mapping") obj = cls.__new__(cls, *args, **kwargs) @@ -508,11 +495,13 @@ def __call__(cls, *args, **kwargs): obj._rfm_fixt_variant = fixt_index # Flag the instance as fixture - if is_fixture: + if fixt_name: + obj._rfm_unique_name = fixt_name + obj._rfm_fixt_data = fixt_data obj._rfm_is_fixture = True # Set the variables passed to the constructor - for k, v in variables.items(): + for k, v in fixt_vars.items(): if k in cls.var_space: setattr(obj, k, v) @@ -670,7 +659,7 @@ class attribute. This behavior does not apply when the assigned value @property def num_variants(cls): - '''Number of unique tests that can be instantiated from this class.''' + '''Total number of variants of the test.''' return len(cls._rfm_param_space) * len(cls._rfm_fixture_space) def _map_variant_num(cls, variant_num=None): @@ -697,15 +686,32 @@ def get_variant_nums(cls, **conditions): '''Get the variant numbers that meet the specified conditions. The given conditions enable filtering the parameter space of the test. - These can be specified by passing key-value pairs with the parameter - name to filter and an associated callable that returns ``True`` when - the filtering condition is met. Multiple conditions are supported. - However, filtering the fixture space is not allowed. + Filtering the fixture space is not allowed. .. code-block:: python # Filter out the test variants where my_param is greater than 3 cls.get_variant_nums(my_param=lambda x: x < 4) + + The returned list of variant numbers can be passed to + :func:`variant_name` in order to retrieve the actual test name. + + :param conditions: keyword arguments where the key is the test + parameter name and the value is either a single value or a unary + function that evaluates to :obj:`True` if the parameter point must + be kept, :obj:`False` otherwise. If a single value is passed this + is implicitly converted to the equality function, such that + + .. code-block:: python + + get_variant_nums(p=10) + + is equivalent to + + .. code-block:: python + + get_variant_nums(p=lambda x: x == 10) + ''' if not conditions: return list(range(cls.num_variants)) @@ -773,29 +779,41 @@ class MyTest(rfm.RegressionTest): # 'f1': 0, # } # } + + :param variant_num: An integer in the range of [0, cls.num_variants). + :param recurse: Flag to control the recursion through the fixture + space. + :param max_depth: Set the recursion limit. When the ``recurse`` + argument is set to ``False``, this option has no effect. + ''' pid, fid = cls._map_variant_num(variant_num) - ret = dict() - ret['params'] = cls.param_space[pid] - ret['fixtures'] = cls.fixture_space[fid] + ret = { + 'params': cls.param_space[pid] if pid is not None else {}, + 'fixtures': cls.fixture_space[fid] if fid is not None else {} + } # Get current recursion level rdepth = kwargs.get('_current_depth', 0) - if recurse and (max_depth is None or rdepth < max_depth): - for fix, variant in ret['fixtures'].items(): + for fname, variant in ret['fixtures'].items(): if len(variant) > 1: continue - fcls = cls.fixture_space[fix].cls - ret['fixtures'][fix] = fcls.get_variant_info( + fixt = cls.fixture_space[fname] + ret['fixtures'][fname] = fixt.cls.get_variant_info( variant[0], recurse=recurse, max_depth=max_depth, _current_depth=rdepth+1 ) return ret + @property + def raw_params(cls): + '''Expose the raw parameters.''' + return cls.param_space.params + @property def param_space(cls): '''Expose the parameter space.''' @@ -824,25 +842,20 @@ def is_abstract(cls): ''' return cls.num_variants == 0 - def fullname(cls, variant_num=None): - '''Return the full name of a test for a given test variant number. + def variant_name(cls, variant_num=None): + '''Return the name of the test variant with a specific variant number. - This function returns a unique name for each of the provided variant - numbers. If no ``variant_num`` is provided, this function returns the - qualified class name. - - :param variant_num: An integer in the range of [0, cls.num_variants). - - :meta private: + :param variant_num: An integer in the range of ``[0, cls.num_variants)``. ''' - name = cls.__qualname__ + name = cls.__name__ if variant_num is None: return name - if _use_compact_names(): + if runtime().get_option('general/0/compact_test_names'): if cls.num_variants > 1: - name += f'@{variant_num}' + width = utils.count_digits(cls.num_variants) + name += f'_{variant_num:0{width}}' else: pid, fid = cls._map_variant_num(variant_num) @@ -852,6 +865,51 @@ def fullname(cls, variant_num=None): for v in cls.param_space[pid].values()) if len(cls.fixture_space) > 1: - name += f'@{fid}' + name += f'_{fid}' return name + + +def make_test(name, bases, body, **kwargs): + '''Define a new test class programmatically. + + Using this method is completely equivalent to using the :keyword:`class` + to define the test class. More specifically, the following: + + .. code-block:: python + + hello_cls = rfm.make_test( + 'HelloTest', (rfm.RunOnlyRegressionTest,), + { + 'valid_systems': ['*'], + 'valid_prog_environs': ['*'], + 'executable': 'echo', + 'sanity_patterns': sn.assert_true(1) + } + ) + + is completely equivalent to + + .. code-block:: python + + class HelloTest(rfm.RunOnlyRegressionTest): + valid_systems = ['*'] + valid_prog_environs = ['*'] + executable = 'echo', + sanity_patterns: sn.assert_true(1) + + hello_cls = HelloTest + + :param name: The name of the new test class. + :param bases: A tuple of the base classes of the class that is being + created. + :param body: A mapping of key/value pairs that will be inserted as class + attributes in the newly created class. + :param kwargs: Any keyword arguments to be passed to the + :class:`RegressionTestMeta` metaclass. + + ''' + namespace = RegressionTestMeta.__prepare__(name, bases, **kwargs) + namespace.update(body) + cls = RegressionTestMeta(name, bases, namespace, **kwargs) + return cls diff --git a/reframe/core/parameters.py b/reframe/core/parameters.py index d6921bdeea..edcabdf6b2 100644 --- a/reframe/core/parameters.py +++ b/reframe/core/parameters.py @@ -28,36 +28,68 @@ class TestParam: ''' def __init__(self, values=None, - inherit_params=False, filter_params=None): + inherit_params=False, filter_params=None, fmt=None): if values is None: values = [] - # By default, filter out all the parameter values defined in the - # base classes. if not inherit_params: + # By default, filter out all the parameter values defined in the + # base classes. def filter_params(x): return () - - # If inherit_params==True, inherit all the parameter values from the - # base classes as default behaviour. elif filter_params is None: + # If inherit_params==True, inherit all the parameter values from + # the base classes as default behaviour. def filter_params(x): return x self.values = tuple(values) - # Validate the filter_param argument + # Validate and set the filter_params function + if (not callable(filter_params) or + not utils.is_trivially_callable(filter_params, non_def_args=1)): + raise TypeError("'filter_params' argument must be a callable " + "accepting a single argument") + + self.filter_params = filter_params + + # Validate and set the alternative function + if fmt is None: + def fmt(x): + return x + + if (not callable(fmt) or + not utils.is_trivially_callable(fmt, non_def_args=1)): + raise TypeError("'fmt' argument must be a callable " + "accepting a single argument") + + self.__fmt_fn = fmt + + @property + def format(self): + return self.__fmt_fn + + def update(self, other): + '''Update this parameter from another one. + + The values from the other parameter will be filtered according to the + filter function of this one and prepended to this parameter's values. + ''' + try: - valid = utils.is_trivially_callable(filter_params, non_def_args=1) - except TypeError: - raise TypeError( - 'the provided parameter filter is not a callable' - ) from None + filt_vals = self.filter_params(other.values) + except Exception: + raise else: - if not valid: - raise TypeError('filter function must take a single argument') + try: + self.values = tuple(filt_vals) + self.values + except TypeError: + raise ReframeSyntaxError( + f"'filter_param' must return an iterable" + ) from None - self.filter_params = filter_params + def is_abstract(self): + return len(self.values) == 0 class ParamSpace(namespaces.Namespace): @@ -83,7 +115,7 @@ def __init__(self, target_cls=None, illegal_names=None): # Store all param combinations to allow random access. self.__param_combinations = tuple( itertools.product( - *(copy.deepcopy(p) for p in self.params.values()) + *(copy.deepcopy(p.values) for p in self.params.values()) ) ) @@ -102,41 +134,30 @@ def join(self, other, cls): :param other: instance of the ParamSpace class. :param cls: the target class. ''' - for key in other.params: + for name in other.params: # With multiple inheritance, a single parameter # could be doubly defined and lead to repeated # values - if (key in self.params and - self.params[key] != () and - other.params[key] != ()): - + if self.defines(name) and other.defines(name): raise ReframeSyntaxError( f'parameter space conflict: ' - f'parameter {key!r} is defined in more than ' + f'parameter {name!r} is defined in more than ' f'one base class of class {cls.__qualname__!r}' ) - self.params[key] = ( - other.params.get(key, ()) + self.params.get(key, ()) - ) + if not self.defines(name): + # If we do not define the parameter, take it from other + self.params[name] = other.params[name] def extend(self, cls): '''Extend the parameter space with the local parameter space.''' local_param_space = getattr(cls, self.local_namespace_name, dict()) for name, p in local_param_space.items(): - try: - filt_vals = p.filter_params(self.params.get(name, ())) - except Exception: - raise - else: - try: - self.params[name] = (tuple(filt_vals) + p.values) - except TypeError: - raise ReframeSyntaxError( - f"'filter_param' must return an iterable " - f"(parameter {name!r})" - ) from None + if name in self.params: + p.update(self.params[name]) + + self.params[name] = p # Clear the local param space local_param_space.clear() @@ -184,6 +205,18 @@ def inject(self, obj, cls=None, params_index=None): for key in self.params: setattr(obj, key, None) + @property + def params(self): + return self._namespace + + def defines(self, name): + '''Return True if parameter is defined. + + A parameter is defined if it exists in the namespace and it is not + abstract. + ''' + return name in self.params and not self.params[name].is_abstract() + def __iter__(self): '''Create a generator object to iterate over the parameter space @@ -194,10 +227,6 @@ def __iter__(self): ''' yield from self.__param_combinations - @property - def params(self): - return self._namespace - def __len__(self): '''Returns the number of all possible parameter combinations. @@ -220,22 +249,28 @@ def __len__(self): def __getitem__(self, key): '''Access an element in the parameter space. - If the key is an integer, this function will retrieve a given point in - the parameter space. If the key is a parameter name, it will instead - return all the values assigned to that parameter. + If the key is an integer, this will be interpreted as a point in the + parameter space and this function will return a mapping of the + parameter names and their corresponding values. If the key is a + parameter name, it will instead return all the values assigned to that + parameter. If the key is an integer, this function will raise an :class:`IndexError` if the key is out of bounds. + ''' if isinstance(key, int): - ret = dict() + ret = {} val = self.__param_combinations[key] - for i, key in enumerate(self.params): - ret[key] = val[i] + for i, name in enumerate(self.params): + ret[name] = val[i] return ret - return self.params.get(key, ()) + try: + return self.params[key].values + except KeyError: + return () def is_empty(self): return self.params == {} @@ -246,6 +281,10 @@ def get_variant_nums(self, **conditions): The conditions are passed as key-value pairs, where the keys are the parameter names to apply the filtering on and the values are functions that expect the parameter's value as the sole argument. + + :returns: the indices of the matching parameters in the parameter + space. + ''' candidates = range(len(self)) if not conditions: @@ -257,6 +296,12 @@ def get_variant_nums(self, **conditions): raise NameError( f'no such parameter: {param!r}' ) + elif not callable(cond): + # Convert it to the identity function + val = cond + + def cond(x): + return x == val elif not utils.is_trivially_callable(cond, non_def_args=1): raise ValueError( f'condition on {param!r} must be a callable accepting a ' diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 7967e7a854..aa8b82deb5 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -9,7 +9,8 @@ __all__ = [ 'CompileOnlyRegressionTest', 'RegressionTest', 'RunOnlyRegressionTest', - 'DEPEND_BY_ENV', 'DEPEND_EXACT', 'DEPEND_FULLY', 'final', 'RegressionMixin' + 'DEPEND_BY_ENV', 'DEPEND_EXACT', 'DEPEND_FULLY', 'final', + 'RegressionMixin' ] @@ -43,6 +44,7 @@ ReframeSyntaxError) from reframe.core.meta import RegressionTestMeta from reframe.core.schedulers import Job +from reframe.core.variables import DEPRECATE_WR from reframe.core.warnings import user_deprecation_warning @@ -209,14 +211,22 @@ def pipeline_hooks(cls): #: The name of the test. #: - #: :type: string that can contain any character except ``/`` - #: :default: For non-parameterised tests, the default name is the test - #: class name. For parameterised tests, the default name is constructed - #: by concatenating the test class name and the string representations - #: of every test parameter: ``TestClassName__``. - #: Any non-alphanumeric value in a parameter's representation is - #: converted to ``_``. - name = variable(typ.Str[r'[^\/]+']) + #: This is an alias of :attr:`unique_name`. + #: + #: .. warning:: + #: + #: Setting the name of a test is deprecated and will be disabled in the + #: future. If you were setting the name of a test to circumvent the old + #: long parameterized test names in order to reference them in + #: dependency chains, please refer to :ref:`param_deps` for more details on how + #: to achieve this. + #: + #: .. versionchanged:: 3.10.0 + #: Setting the :attr:`name` attribute is deprecated. + #: + name = deprecate(variable(typ.Str[r'[^\/]+'], attr_name='_rfm_unique_name'), + "setting the 'name' attribute is deprecated and " + "will be disabled in the future", DEPRECATE_WR) #: List of programming environments supported by this test. #: @@ -258,7 +268,7 @@ def pipeline_hooks(cls): #: A detailed description of the test. #: #: :type: :class:`str` - #: :default: ``self.name`` + #: :default: ``self.display_name`` descr = variable(str) #: The path to the source file or source directory of the test. @@ -353,14 +363,15 @@ def pipeline_hooks(cls): #: The name of the executable to be launched during the run phase. #: #: If this variable is undefined when entering the compile pipeline - #: stage, it will be set to ``os.path.join('.', self.name)``. Classes - #: that override the compile stage may leave this variable undefined. + #: stage, it will be set to ``os.path.join('.', self.unique_name)``. + #: Classes that override the compile stage may leave this variable + #: undefined. #: #: :type: :class:`str` #: :default: :class:`required` #: #: .. versionchanged:: 3.7.3 - #: Default value changed from ``os.path.join('.', self.name)`` to + #: Default value changed from ``os.path.join('.', self.unique_name)`` to #: :class:`required`. executable = variable(str) @@ -898,18 +909,19 @@ def __init_subclass__(cls, *, special=False, pin_prefix=False, @deferrable def __rfm_init__(self, *args, prefix=None, **kwargs): - if not hasattr(self, 'name'): - self.name = type(self).fullname(self.variant_num) + if not self.is_fixture() and not hasattr(self, '_rfm_unique_name'): + self._rfm_unique_name = type(self).variant_name(self.variant_num) # Add the parameters from the parameterized_test decorator. if args or kwargs: arg_names = map(lambda x: util.toalphanum(str(x)), itertools.chain(args, kwargs.values())) - self.name += '_' + '_'.join(arg_names) + self._rfm_unique_name += '_' + '_'.join(arg_names) + self._rfm_old_style_params = True # Pass if descr is a required variable. if not hasattr(self, 'descr'): - self.descr = self.name + self.descr = self.display_name self._perfvalues = {} @@ -1023,6 +1035,71 @@ def __getattr__(self, name): # Export read-only views to interesting fields + @property + def unique_name(self): + '''The unique name of this test. + + :type: :class:`str` + + .. versionadded:: 3.10.0 + ''' + return self._rfm_unique_name + + @property + def display_name(self): + '''A human-readable version of the name this test. + + This name contains a string representation of the various parameters + of this specific test variant. + + :type: :class:`str` + + .. note:: + The display name may not be unique. + + .. versionadded:: 3.10.0 + + ''' + def _format_params(cls, info, prefix=' %'): + name = '' + for p, v in info['params'].items(): + format_fn = cls.raw_params[p].format + name += f'{prefix}{p}={format_fn(v)}' + + for f, v in info['fixtures'].items(): + if isinstance(v, tuple): + # This is join fixture + continue + + fixt = cls.fixture_space[f] + name += _format_params(fixt.cls, v, f'{prefix}{f}.') + + # Append any variables set for the fixtures + for var, val in fixt.variables.items(): + name += f'{prefix}{f}.{var}={val}' + + return name + + if hasattr(self, '_rfm_old_style_params'): + return self.unique_name + + if hasattr(self, '_rfm_display_name'): + return self._rfm_display_name + + cls = type(self) + basename = cls.__name__ + variant_info = cls.get_variant_info(self.variant_num, recurse=True) + self._rfm_display_name = basename + _format_params(cls, variant_info) + if self.is_fixture(): + # Add the variable info and scope + fixt_data = self._rfm_fixt_data + suffix = ''.join(f' %{k}={v}' for k, + v in fixt_data.variables.items()) + suffix += f' ~{fixt_data.scope_enc}' + self._rfm_display_name += suffix + + return self._rfm_display_name + @property def current_environ(self): '''The programming environment that the regression test is currently @@ -1206,12 +1283,13 @@ def info(self): you use the :class:`RegressionTest`'s attributes, because this method may be called at any point of the test's lifetime. ''' - ret = self.name + + ret = self.display_name if self.current_partition: - ret += ' on %s' % self.current_partition.fullname + ret += f' @{self.current_partition.fullname}' if self.current_environ: - ret += ' using %s' % self.current_environ.name + ret += f'+{self.current_environ.name}' return ret @@ -1334,11 +1412,11 @@ def _setup_paths(self): runtime = rt.runtime() self._stagedir = runtime.make_stagedir( self.current_system.name, self._current_partition.name, - self._current_environ.name, self.name + self._current_environ.name, self.unique_name ) self._outputdir = runtime.make_outputdir( self.current_system.name, self._current_partition.name, - self._current_environ.name, self.name + self._current_environ.name, self.unique_name ) except OSError as e: raise PipelineError('failed to set up paths') from e @@ -1399,10 +1477,10 @@ def setup(self, partition, environ, **job_opts): self._current_environ = environ self._setup_paths() self._resolve_fixtures() - self._job = self._setup_job(f'rfm_{self.name}_job', + self._job = self._setup_job(f'rfm_{self.unique_name}_job', self.local, **job_opts) - self._build_job = self._setup_job(f'rfm_{self.name}_build', + self._build_job = self._setup_job(f'rfm_{self.unique_name}_build', self.local or self.build_locally, **job_opts) @@ -1470,7 +1548,7 @@ def compile(self): # Set executable (only if hasn't been provided) if not hasattr(self, 'executable'): - self.executable = os.path.join('.', self.name) + self.executable = os.path.join('.', self.unique_name) # Verify the sourcepath and determine the sourcepath in the stagedir if (os.path.isabs(self.sourcepath) or @@ -2191,7 +2269,7 @@ def getdep(self, target, environ=None, part=None): raise DependencyError('no test case is associated with this test') for d in self._case().deps: - mask = int(d.check.name == target) + mask = int(d.check.unique_name == target) mask |= (int(d.partition.name == part) | int(part == '*')) << 1 mask |= (int(d.environ.name == environ) | int(environ == '*')) << 2 if mask == 7: @@ -2241,17 +2319,16 @@ def skip_if_no_procinfo(self, msg=None): self.skip_if(not proc.info, msg) def __str__(self): - return "%s(name='%s', prefix='%s')" % (type(self).__name__, - self.name, self.prefix) + return f'{self.unique_name} [{self.display_name}]' def __eq__(self, other): if not isinstance(other, RegressionTest): return NotImplemented - return self.name == other.name + return self.unique_name == other.unique_name def __hash__(self): - return hash(self.name) + return hash(self.unique_name) def __rfm_json_decode__(self, json): # 'tags' are decoded as list, so we convert them to a set @@ -2276,9 +2353,8 @@ def setup(self, partition, environ, **job_opts): self._current_partition = partition self._current_environ = environ self._setup_paths() - self._job = self._setup_job(f'rfm_{self.name}_job', - self.local, - **job_opts) + self._job = self._setup_job(f'rfm_{self.unique_name}_job', + self.local, **job_opts) self._resolve_fixtures() def compile(self): @@ -2334,7 +2410,7 @@ def setup(self, partition, environ, **job_opts): self._current_partition = partition self._current_environ = environ self._setup_paths() - self._build_job = self._setup_job(f'rfm_{self.name}_build', + self._build_job = self._setup_job(f'rfm_{self.unique_name}_build', self.local or self.build_locally, **job_opts) self._resolve_fixtures() diff --git a/reframe/core/variables.py b/reframe/core/variables.py index c4448a8700..8207ec4b53 100644 --- a/reframe/core/variables.py +++ b/reframe/core/variables.py @@ -13,6 +13,8 @@ import reframe.core.fields as fields import reframe.core.namespaces as namespaces from reframe.core.exceptions import ReframeSyntaxError +from reframe.core.warnings import (user_deprecation_warning, + suppress_deprecations) class _UndefinedType: @@ -25,6 +27,10 @@ def __deepcopy__(self, memo): Undefined = _UndefinedType() +DEPRECATE_RD = 1 +DEPRECATE_WR = 2 +DEPRECATE_RDWR = DEPRECATE_RD | DEPRECATE_WR + class TestVar: '''Regression test variable class. @@ -53,6 +59,20 @@ def __init__(self, *args, **kwargs): self._field = field_type(*args, **kwargs) + @classmethod + def create_deprecated(cls, var, message, + kind=DEPRECATE_RDWR, from_version='0.0.0'): + ret = TestVar.__new__(TestVar) + ret._field = fields.DeprecatedField(var.field, message, + kind, from_version) + ret._default_value = var._default_value + return ret + + def _check_deprecation(self, kind): + if isinstance(self.field, fields.DeprecatedField): + if self.field.op & kind: + user_deprecation_warning(self.field.message) + def is_defined(self): return self._default_value is not Undefined @@ -60,6 +80,19 @@ def undefine(self): self._default_value = Undefined def define(self, value): + if value != self._default_value: + # We only issue a deprecation warning if the write attempt changes + # the value. This is a workaround to the fact that if a variable + # defined in parent classes is accessed by the current class, then + # the definition of the variable is "copied" in the class body as + # an assignment (see `MetaNamespace.__getitem__()`). The + # `VarSpace.extend()` method then checks all local class body + # assignments and if they refer to a variable (inherited or not), + # they call `define()` on it. So, practically, in this case, the + # `_default_value` is set redundantly once per class in the + # hierarchy. + self._check_deprecation(DEPRECATE_WR) + self._default_value = value @property @@ -67,6 +100,7 @@ def default_value(self): # Variables must be returned by-value to prevent an instance from # modifying the class variable space. self._check_is_defined() + self._check_deprecation(DEPRECATE_RD) return copy.deepcopy(self._default_value) @property @@ -81,7 +115,7 @@ def __set_name__(self, owner, name): self._name = name def __setattr__(self, name, value): - '''Set any additional variable attribute into __attrs__.''' + '''Set any additional variable attribute into the default value.''' if name in self.__slots__: super().__setattr__(name, value) else: @@ -528,6 +562,14 @@ def inject(self, obj, cls): :param cls: The test class. ''' + # Attribute injection is a special operation; the actual attribute + # descriptor fields will be created and they will be assigned their + # value; deprecations have been checked already during the class + # construction, so we don't want to trigger them also here. + with suppress_deprecations(): + self._inject(obj, cls) + + def _inject(self, obj, cls): for name, var in self.items(): setattr(cls, name, var.field) getattr(cls, name).__set_name__(obj, name) diff --git a/reframe/core/warnings.py b/reframe/core/warnings.py index 5e13aa16ed..6fd537d237 100644 --- a/reframe/core/warnings.py +++ b/reframe/core/warnings.py @@ -84,3 +84,18 @@ def user_deprecation_warning(message, from_version='0.0.0'): if _RAISE_DEPRECATION_ALWAYS or version >= min_version: warnings.warn(message, ReframeDeprecationWarning, stacklevel=stack_level) + + +class suppress_deprecations: + '''Temporarily suprress ReFrame deprecation warnings.''' + + def __init__(self, *args, **kwargs): + self._ctxmgr = warnings.catch_warnings(*args, **kwargs) + + def __enter__(self): + ret = self._ctxmgr.__enter__() + warnings.simplefilter('ignore', ReframeDeprecationWarning) + return ret + + def __exit__(self, exc_type, exc_val, exc_tb): + return self._ctxmgr.__exit__(exc_type, exc_val, exc_tb) diff --git a/reframe/frontend/ci.py b/reframe/frontend/ci.py index 1e01ec932c..49b3a4b3a2 100644 --- a/reframe/frontend/ci.py +++ b/reframe/frontend/ci.py @@ -27,10 +27,10 @@ def rfm_command(testcase): else: config_opt = '' - report_file = f'{testcase.check.name}-report.json' + report_file = f'{testcase.check.unique_name}-report.json' if testcase.level: restore_files = ','.join( - f'{t.check.name}-report.json' for t in tc.deps + f'{t.check.unique_name}-report.json' for t in tc.deps ) else: restore_files = None @@ -42,9 +42,9 @@ def rfm_command(testcase): f'-R' if recurse else '', f'--report-file={report_file}', f'--restore-session={restore_files}' if restore_files else '', - f'--report-junit={testcase.check.name}-report.xml', + f'--report-junit={testcase.check.unique_name}-report.xml', f'{"".join("-" + verbosity)}' if verbosity else '', - '-n', f"'^{testcase.check.name}$'", '-r' + '-n', f"'^{testcase.check.unique_name}$'", '-r' ]) max_level = 0 # We need the maximum level to generate the stages section @@ -63,13 +63,13 @@ def rfm_command(testcase): json['image'] = image_name for tc in testcases: - json[f'{tc.check.name}'] = { + json[f'{tc.check.unique_name}'] = { 'stage': f'rfm-stage-{tc.level}', 'script': [rfm_command(tc)], 'artifacts': { - 'paths': [f'{tc.check.name}-report.json'] + 'paths': [f'{tc.check.unique_name}-report.json'] }, - 'needs': [t.check.name for t in tc.deps] + 'needs': [t.check.unique_name for t in tc.deps] } max_level = max(max_level, tc.level) diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index e4b4ed8213..f1c83fbfc7 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -36,69 +36,6 @@ from reframe.frontend.executors import Runner, generate_testcases -def format_check(check, check_deps, detailed=False): - def fmt_list(x): - if not x: - return '' - - return ', '.join(x) - - def fmt_deps(): - no_deps = True - lines = [] - for t, deps in check_deps: - for d in deps: - lines.append(f'- {t} -> {d}') - - if lines: - return '\n '.join(lines) - else: - return '' - - location = inspect.getfile(type(check)) - if not detailed: - return f'- {check.name} (found in {location!r})' - - if check.num_tasks > 0: - node_alloc_scheme = (f'standard ({check.num_tasks} task(s) -- ' - f'may be set differently in hooks)') - elif check.num_tasks == 0: - node_alloc_scheme = 'flexible' - else: - node_alloc_scheme = f'flexible (minimum {-check.num_tasks} task(s))' - - check_info = { - 'Description': check.descr, - 'Environment modules': fmt_list(check.modules), - 'Location': location, - 'Maintainers': fmt_list(check.maintainers), - 'Node allocation': node_alloc_scheme, - 'Pipeline hooks': { - k: fmt_list(fn.__name__ for fn in v) - for k, v in check.pipeline_hooks().items() - }, - 'Tags': fmt_list(check.tags), - 'Valid environments': fmt_list(check.valid_prog_environs), - 'Valid systems': fmt_list(check.valid_systems), - 'Dependencies (conceptual)': fmt_list( - [d[0] for d in check.user_deps()] - ), - 'Dependencies (actual)': fmt_deps() - } - lines = [f'- {check.name}:'] - for prop, val in check_info.items(): - lines.append(f' {prop}:') - if isinstance(val, dict): - for k, v in val.items(): - lines.append(f' - {k}: {v}') - else: - lines.append(f' {val}') - - lines.append('') - - return '\n'.join(lines) - - def format_env(envvars): ret = '[ReFrame Environment]\n' notset = '' @@ -107,23 +44,119 @@ def format_env(envvars): return ret -def list_checks(testcases, printer, detailed=False): +def list_checks(testcases, printer, detailed=False, concretized=False): printer.info('[List of matched checks]') + unique_checks = set() + + def dep_lines(u, *, prefix, depth=0, lines=None, printed=None): + if lines is None: + lines = [] + + if printed is None: + printed = set(unique_checks) + + adj = u.deps + for v in adj: + if concretized or (not concretized and + v.check.unique_name not in printed): + dep_lines(v, prefix=prefix + 2*' ', depth=depth+1, + lines=lines, printed=printed) + + printed.add(v.check.unique_name) + if not v.check.is_fixture(): + unique_checks.add(v.check.unique_name) + + if depth: + tc_info = '' + details = '' + if concretized: + tc_info = f' @{u.partition.fullname}+{u.environ.name}' + + location = inspect.getfile(type(u.check)) + if detailed: + details = f' [id: {u.check.unique_name}, file: {location!r}]' + + lines.append( + f'{prefix}^{u.check.display_name}{tc_info}{details}' + ) - # Collect dependencies per test - deps = {} - for t in testcases: - deps.setdefault(t.check.name, []) - deps[t.check.name].append((t, t.deps)) + return lines - checks = set( - t.check for t in testcases - if detailed or not t.check.is_fixture() - ) - printer.info( - '\n'.join(format_check(c, deps[c.name], detailed) for c in checks) - ) - printer.info(f'Found {len(checks)} check(s)\n') + # We need the leaf test cases to be printed at the leftmost + leaf_testcases = list(t for t in testcases if t.in_degree == 0) + for t in leaf_testcases: + tc_info = '' + details = '' + if concretized: + tc_info = f' @{t.partition.fullname}+{t.environ.name}' + + location = inspect.getfile(type(t.check)) + if detailed: + details = f' [id: {t.check.unique_name}, file: {location!r}]' + + # if not concretized and t.check.name not in unique_checks: + if concretized or (not concretized and + t.check.unique_name not in unique_checks): + printer.info(f'- {t.check.display_name}{tc_info}{details}') + + if not t.check.is_fixture(): + unique_checks.add(t.check.unique_name) + + for l in reversed(dep_lines(t, prefix=' ')): + printer.info(l) + + if concretized: + printer.info(f'Concretized {len(testcases)} test case(s)\n') + else: + printer.info(f'Found {len(unique_checks)} check(s)\n') + + +def describe_checks(testcases, printer): + records = [] + unique_names = set() + for tc in testcases: + if tc.check.is_fixture(): + continue + + if tc.check.name not in unique_names: + unique_names.add(tc.check.name) + rec = json.loads(jsonext.dumps(tc.check)) + + # Now manipulate the record to be more user-friendly + # + # 1. Add other fields that are relevant for users + # 2. Remove all private fields + rec['unique_name'] = tc.check.unique_name + rec['display_name'] = tc.check.display_name + rec['pipeline_hooks'] = {} + rec['perf_variables'] = list(rec['perf_variables'].keys()) + rec['prefix'] = tc.check.prefix + rec['variant_num'] = tc.check.variant_num + for stage, hooks in tc.check.pipeline_hooks().items(): + for hk in hooks: + rec['pipeline_hooks'].setdefault(stage, []) + rec['pipeline_hooks'][stage].append(hk.__name__) + + for attr in list(rec.keys()): + if attr == '__rfm_class__': + rec['@class'] = rec[attr] + del rec[attr] + elif attr == '__rfm_file__': + rec['@file'] = rec[attr] + del rec[attr] + elif attr.startswith('_'): + del rec[attr] + + # List all required variables + required = [] + for var in tc.check._rfm_var_space: + if not tc.check._rfm_var_space[var].is_defined(): + required.append(var) + + rec['@required'] = required + records.append(dict(sorted(rec.items()))) + + printer.info(jsonext.dumps(records, indent=2)) def list_tags(testcases, printer): @@ -308,13 +341,19 @@ def main(): help=('Generate into FILE a Gitlab CI pipeline ' 'for the selected tests and exit'), ) + + action_options.add_argument( + '--describe', action='store_true', + help='Give full details on the selected tests' + ) action_options.add_argument( - '-L', '--list-detailed', action='store_true', - help='List the selected checks providing details for each test' + '-L', '--list-detailed', nargs='?', const='T', choices=['C', 'T'], + help=('List the selected tests (T) or the concretized test cases (C) ' + 'providing more details') ) action_options.add_argument( - '-l', '--list', action='store_true', - help='List the selected checks' + '-l', '--list', nargs='?', const='T', choices=['C', 'T'], + help='List the selected tests (T) or the concretized test cases (C)' ) action_options.add_argument( '--list-tags', action='store_true', @@ -573,6 +612,25 @@ def main(): help='Use a login shell for job scripts' ) + def restrict_logging(): + '''Restrict logging to errors only. + + This is done when specific options are passed, which generate JSON + output and we don't want to pollute the output with other logging + output. + + :returns: :obj:`True` if the logging was restricted, :obj:`False` + otherwise. + + ''' + + if (options.show_config or + options.detect_host_topology or options.describe): + logging.getlogger().setLevel(logging.ERROR) + return True + else: + return False + # Parse command line options = argparser.parse_args() if len(sys.argv) == 1: @@ -588,10 +646,11 @@ def main(): site_config.select_subconfig('generic') options.update_config(site_config) logging.configure_logging(site_config) - logging.getlogger().colorize = site_config.get('general/0/colorize') printer = PrettyPrinter() printer.colorize = site_config.get('general/0/colorize') - printer.adjust_verbosity(calc_verbosity(site_config, options.quiet)) + if not restrict_logging(): + printer.adjust_verbosity(calc_verbosity(site_config, options.quiet)) + if os.getenv('RFM_GRAYLOG_SERVER'): printer.warning( 'RFM_GRAYLOG_SERVER environment variable is deprecated; ' @@ -661,9 +720,10 @@ def main(): printer.error(logfiles_message()) sys.exit(1) - logging.getlogger().colorize = site_config.get('general/0/colorize') printer.colorize = site_config.get('general/0/colorize') - printer.adjust_verbosity(calc_verbosity(site_config, options.quiet)) + if not restrict_logging(): + printer.adjust_verbosity(calc_verbosity(site_config, options.quiet)) + try: printer.debug('Initializing runtime') runtime.init_runtime(site_config) @@ -703,6 +763,8 @@ def main(): # Show configuration after everything is set up if options.show_config: + # Restore logging level + printer.setLevel(logging.INFO) config_param = options.show_config if config_param == 'all': printer.info(str(rt.site_config)) @@ -720,14 +782,17 @@ def main(): if options.detect_host_topology: from reframe.utility.cpuinfo import cpuinfo + s_cpuinfo = cpuinfo() + + # Restore logging level + printer.setLevel(logging.INFO) topofile = options.detect_host_topology if topofile == '-': - json.dump(cpuinfo(), sys.stdout, indent=2) - sys.stdout.write('\n') + printer.info(json.dumps(s_cpuinfo, indent=2)) else: try: with open(topofile, 'w') as fp: - json.dump(cpuinfo(), fp, indent=2) + json.dump(s_cpuinfo, fp, indent=2) fp.write('\n') except OSError as e: getlogger().error( @@ -952,8 +1017,17 @@ def _case_failed(t): tc.check.disable_hook(h) # Act on checks + if options.describe: + # Restore logging level + printer.setLevel(logging.INFO) + describe_checks(testcases, printer) + sys.exit(0) + if options.list or options.list_detailed: - list_checks(testcases, printer, options.list_detailed) + concretized = (options.list == 'C' or + options.list_detailed == 'C') + detailed = options.list_detailed is not None + list_checks(testcases, printer, detailed, concretized) sys.exit(0) if options.list_tags: @@ -1223,4 +1297,5 @@ def module_unuse(*paths): printer.error(f'could not save log file: {e}') sys.exit(1) finally: - printer.info(logfiles_message()) + if not restrict_logging(): + printer.info(logfiles_message()) diff --git a/reframe/frontend/dependencies.py b/reframe/frontend/dependencies.py index c3160c24b6..5c719c4d4c 100644 --- a/reframe/frontend/dependencies.py +++ b/reframe/frontend/dependencies.py @@ -31,7 +31,7 @@ def build_index(cases): ret = {} for c in cases: - cname = c.check.name + cname = c.check.unique_name ret.setdefault(cname, []) ret[cname].append(c) @@ -130,11 +130,11 @@ def _reduce_deps(graph): '''Reduce test case graph to a test-only graph.''' ret = {} for case, deps in graph.items(): - test_deps = util.OrderedSet(d.check.name for d in deps) + test_deps = util.OrderedSet(d.check.unique_name for d in deps) try: - ret[case.check.name] |= test_deps + ret[case.check.unique_name] |= test_deps except KeyError: - ret[case.check.name] = test_deps + ret[case.check.unique_name] = test_deps return ret @@ -251,11 +251,11 @@ def visit(node, path): # Index test cases by test name cases_by_name = {} for c in graph.keys(): - c.level = levels[c.check.name] + c.level = levels[c.check.unique_name] try: - cases_by_name[c.check.name].append(c) + cases_by_name[c.check.unique_name].append(c) except KeyError: - cases_by_name[c.check.name] = [c] + cases_by_name[c.check.unique_name] = [c] return list(itertools.chain(*(retrieve(cases_by_name, n, []) for n in visited))) diff --git a/reframe/frontend/executors/__init__.py b/reframe/frontend/executors/__init__.py index 8077ffe33c..f98ec9c0b6 100644 --- a/reframe/frontend/executors/__init__.py +++ b/reframe/frontend/executors/__init__.py @@ -54,7 +54,7 @@ def __iter__(self): return iter([self._check, self._partition, self._environ]) def __hash__(self): - return (hash(self.check.name) ^ + return (hash(self.check.unique_name) ^ hash(self.partition.fullname) ^ hash(self.environ.name)) @@ -62,12 +62,14 @@ def __eq__(self, other): if not isinstance(other, type(self)): return NotImplemented - return (self.check.name == other.check.name and + return (self.check.unique_name == other.check.unique_name and self.environ.name == other.environ.name and self.partition.fullname == other.partition.fullname) def __repr__(self): - c, p, e = self.check.name, self.partition.fullname, self.environ.name + c = self.check.unique_name if self.check else None + p = self.partition.fullname if self.partition else None + e = self.environ.name if self.environ else None return f'({c!r}, {p!r}, {e!r})' @property @@ -387,6 +389,13 @@ def abort(self, cause=None): else: self.fail((type(exc), exc, None)) + def info(self): + '''Return an info string about this task.''' + name = self.check.display_name + part = self.testcase.partition.fullname + env = self.testcase.environ.name + return f'{name} @{part}+{env}' + class TaskEventListener(abc.ABC): @abc.abstractmethod @@ -459,7 +468,7 @@ def stats(self): return self._stats def runall(self, testcases, restored_cases=None): - num_checks = len({tc.check.name for tc in testcases}) + num_checks = len({tc.check.unique_name for tc in testcases}) self._printer.separator('short double line', 'Running %d check(s)' % num_checks) self._printer.timestamp('Started on', 'short double line') @@ -496,7 +505,7 @@ def _retry_failed(self, cases): rt = runtime.runtime() failures = self._stats.failed() while (failures and rt.current_run < self._max_retries): - num_failed_checks = len({tc.check.name for tc in failures}) + num_failed_checks = len({tc.check.unique_name for tc in failures}) rt.next_run() self._printer.separator( @@ -516,7 +525,7 @@ def _runall(self, testcases): def print_separator(check, prefix): self._printer.separator( 'short single line', - '%s %s (%s)' % (prefix, check.name, check.descr) + '%s %s (%s)' % (prefix, check.unique_name, check.descr) ) self._printer.separator('short single line', diff --git a/reframe/frontend/executors/policies.py b/reframe/frontend/executors/policies.py index c647f7cc62..3c34780129 100644 --- a/reframe/frontend/executors/policies.py +++ b/reframe/frontend/executors/policies.py @@ -97,7 +97,7 @@ def runcase(self, case): self.printer.status( 'RUN', - f'{check.name} on {partition.fullname} using {environ.name}' + f'{check.name} @{partition.fullname}+{environ.name}' ) task = RegressionTask(case, self.task_listeners) self._task_index[case] = task @@ -437,11 +437,9 @@ def _advance_startup(self, task): return 1 elif self.deps_succeeded(task): try: - self.printer.status( - 'RUN', f'{task.check.name} on ' - f'{task.testcase.partition.fullname} using ' - f'{task.testcase.environ.name}' - ) + part = task.testcase.partition + env = task.testcase.environ.name + self.printer.status('RUN', task.info()) task.setup(task.testcase.partition, task.testcase.environ, sched_flex_alloc_nodes=self.sched_flex_alloc_nodes, @@ -592,7 +590,7 @@ def on_task_failure(self, task): timings = task.pipeline_timings(['compile_complete', 'run_complete', 'total']) - msg = f'{task.check.info()} [{timings}]' + msg = f'{task.info()} [{timings}]' if task.failed_stage == 'cleanup': self.printer.status('ERROR', msg, just='right') else: @@ -616,7 +614,7 @@ def on_task_success(self, task): timings = task.pipeline_timings(['compile_complete', 'run_complete', 'total']) - msg = f'{task.check.info()} [{timings}]' + msg = f'{task.info()} [{timings}]' self.printer.status('OK', msg, just='right') timings = task.pipeline_timings(['setup', 'compile_complete', diff --git a/reframe/frontend/filters.py b/reframe/frontend/filters.py index 5c1b1a6b93..b53dd7616f 100644 --- a/reframe/frontend/filters.py +++ b/reframe/frontend/filters.py @@ -6,6 +6,7 @@ import re from reframe.core.exceptions import ReframeError +from reframe.core.runtime import runtime def re_compile(patt): @@ -19,7 +20,17 @@ def have_name(patt): regex = re_compile(patt) def _fn(case): - return regex.match(case.check.name) + # Match pattern, but remove spaces from the `display_name` + display_name = case.check.display_name.replace(' ', '') + rt = runtime() + if not rt.get_option('general/0/compact_test_names'): + return regex.match(case.check.unique_name) + else: + if '@' in patt: + # Do an exact match on the unique name + return patt.replace('@', '_') == case.check.unique_name + else: + return regex.match(display_name) return _fn diff --git a/reframe/frontend/loader.py b/reframe/frontend/loader.py index cda5eb1aff..210cda857c 100644 --- a/reframe/frontend/loader.py +++ b/reframe/frontend/loader.py @@ -100,8 +100,8 @@ def _validate_check(self, check): getlogger().warning( f'{checkfile}: {attr!r} is not copyable; ' f'not copyable attributes are not ' - f'allowed inside the __init__() method; ' - f'consider setting them in a pipeline hook instead' + f'allowed inside the __init__() method or post-init hooks; ' + f'consider setting them in another pipeline hook instead' ) return False @@ -215,13 +215,13 @@ def load_from_module(self, module): testfile = module.__file__ try: - conflicted = self._loaded[c.name] + conflicted = self._loaded[c.unique_name] except KeyError: - self._loaded[c.name] = testfile + self._loaded[c.unique_name] = testfile tests.append(c) else: raise NameConflictError( - f'test {c.name!r} from {testfile!r} ' + f'test {c.unique_name!r} from {testfile!r} ' f'is already defined in {conflicted!r}' ) diff --git a/reframe/frontend/printer.py b/reframe/frontend/printer.py index 4b08b77040..e8054cd84b 100644 --- a/reframe/frontend/printer.py +++ b/reframe/frontend/printer.py @@ -75,3 +75,11 @@ def timestamp(self, msg='', separator=None): def __getattr__(self, attr): # delegate all other attribute lookup to the underlying logger return getattr(logging.getlogger(), attr) + + def __setattr__(self, attr, value): + # Delegate colorize setting to the backend logger + if attr == 'colorize': + logging.getlogger().colorize = value + self.__dict__['colorize'] = value + else: + super().__setattr__(attr, value) diff --git a/reframe/frontend/runreport.py b/reframe/frontend/runreport.py index 70e746ebb2..f8ac0a9f84 100644 --- a/reframe/frontend/runreport.py +++ b/reframe/frontend/runreport.py @@ -13,9 +13,12 @@ import reframe as rfm import reframe.core.exceptions as errors import reframe.utility.jsonext as jsonext -import reframe.utility.versioning as versioning -DATA_VERSION = '1.3.0' + +# The schema data version +# Major version bumps are expected to break the validation of previous schemas + +DATA_VERSION = '2.0' _SCHEMA = os.path.join(rfm.INSTALL_PREFIX, 'reframe/schemas/runreport.json') @@ -31,12 +34,12 @@ def __init__(self, report): self._cases_index = {} for run in self._report['runs']: for tc in run['testcases']: - c, p, e = tc['name'], tc['system'], tc['environment'] + c, p, e = tc['unique_name'], tc['system'], tc['environment'] self._cases_index[c, p, e] = tc # Index also the restored cases for tc in self._report['restored_cases']: - c, p, e = tc['name'], tc['system'], tc['environment'] + c, p, e = tc['unique_name'], tc['system'], tc['environment'] self._cases_index[c, p, e] = tc def __getitem__(self, key): @@ -71,7 +74,7 @@ def slice(self, prop, when=None, unique=False): yield val def case(self, check, part, env): - c, p, e = check.name, part.fullname, env.name + c, p, e = check.unique_name, part.fullname, env.name ret = self._cases_index.get((c, p, e)) if ret is None: # Look up the case in the fallback reports @@ -151,18 +154,15 @@ def _load_report(filename): try: jsonschema.validate(report, schema) except jsonschema.ValidationError as e: - raise errors.ReframeError(f'invalid report {filename!r}') from e + try: + found_ver = report['session_info']['data_version'] + except KeyError: + found_ver = 'n/a' - # Check if the report data is compatible - found_ver = versioning.parse( - report['session_info']['data_version'] - ) - required_ver = versioning.parse(DATA_VERSION) - if found_ver.major != required_ver.major or found_ver < required_ver: raise errors.ReframeError( - f'incompatible report data versions: ' - f'found {found_ver}, required >= {required_ver}' - ) + f'invalid report {filename!r} ' + f'(required data version: {DATA_VERSION}), found: {found_ver})' + ) from e return _RunReport(report) @@ -202,7 +202,7 @@ def junit_xml_report(json_report): testsuite_properties = etree.SubElement(xml_testsuite, 'properties') for tc in rfm_run['testcases']: casename = ( - f"{tc['name']}[{tc['system']}, {tc['environment']}]" + f"{tc['unique_name']}[{tc['system']}, {tc['environment']}]" ) testcase = etree.SubElement( xml_testsuite, 'testcase', diff --git a/reframe/frontend/statistics.py b/reframe/frontend/statistics.py index aee28fae77..af889b8b40 100644 --- a/reframe/frontend/statistics.py +++ b/reframe/frontend/statistics.py @@ -70,7 +70,8 @@ def retry_report(self): environ_name = t.check.current_environ.name # Overwrite entry from previous run if available - messages[f"{t.check.name}:{partition_name}:{environ_name}"] = ( + key = f"{t.check.unique_name}:{partition_name}:{environ_name}" + messages[key] = ( f" * Test {t.check.info()} was retried {run} time(s) and " f"{'failed' if t.failed else 'passed'}." ) @@ -96,14 +97,15 @@ def json(self, force=False): 'build_stderr': None, 'build_stdout': None, 'dependencies_actual': [ - (d.check.name, d.partition.fullname, d.environ.name) + (d.check.unique_name, + d.partition.fullname, d.environ.name) for d in t.testcase.deps ], 'dependencies_conceptual': [ d[0] for d in t.check.user_deps() ], 'description': check.descr, - 'prefix': check.prefix, + 'display_name': check.display_name, 'filename': inspect.getfile(type(check)), 'environment': None, 'fail_phase': None, @@ -116,6 +118,7 @@ def json(self, force=False): 'nodelist': [], 'outputdir': None, 'perfvars': None, + 'prefix': check.prefix, 'result': None, 'stagedir': check.stagedir, 'scheduler': None, @@ -126,7 +129,8 @@ def json(self, force=False): 'time_run': t.duration('run_complete'), 'time_sanity': t.duration('sanity'), 'time_setup': t.duration('setup'), - 'time_total': t.duration('total') + 'time_total': t.duration('total'), + 'unique_name': check.unique_name } # We take partition and environment from the test case and not @@ -213,8 +217,9 @@ def print_failure_report(self, printer): f'(for the last of {last_run} retries)' if last_run > 0 else '' ) printer.info(line_width * '-') - printer.info(f"FAILURE INFO for {r['name']} {retry_info}") - printer.info(f" * Test Description: {r['description']}") + printer.info(f"FAILURE INFO for {r['unique_name']} {retry_info}") + printer.info(f" * Expanded name: {r['display_name']}") + printer.info(f" * Description: {r['description']}") printer.info(f" * System partition: {r['system']}") printer.info(f" * Environment: {r['environment']}") printer.info(f" * Stage directory: {r['stagedir']}") @@ -230,7 +235,7 @@ def print_failure_report(self, printer): f"{r['dependencies_actual']}") printer.info(f" * Maintainers: {r['maintainers']}") printer.info(f" * Failing phase: {r['fail_phase']}") - printer.info(f" * Rerun with '-n {r['name']}" + printer.info(f" * Rerun with '-n {r['unique_name']}" f" -p {r['environment']} --system {r['system']} -r'") printer.info(f" * Reason: {r['fail_reason']}") @@ -251,7 +256,7 @@ def print_failure_stats(self, printer): partfullname = partition.fullname if partition else 'None' environ_name = (check.current_environ.name if check.current_environ else 'None') - f = f'[{check.name}, {environ_name}, {partfullname}]' + f = f'[{check.unique_name}, {environ_name}, {partfullname}]' if tf.failed_stage not in failures: failures[tf.failed_stage] = [] @@ -297,10 +302,10 @@ def performance_report(self): previous_part = '' for t in self.tasks(): if t.check.perfvalues.keys(): - if t.check.name != previous_name: + if t.check.unique_name != previous_name: report_body.append(line_width * '-') - report_body.append(t.check.name) - previous_name = t.check.name + report_body.append(t.check.display_name) + previous_name = t.check.unique_name if t.check.current_partition.fullname != previous_part: report_body.append( diff --git a/reframe/schemas/runreport.json b/reframe/schemas/runreport.json index 6afe98223d..caabfdad9e 100644 --- a/reframe/schemas/runreport.json +++ b/reframe/schemas/runreport.json @@ -22,6 +22,7 @@ "items": {"type": "string"} }, "description": {"type": "string"}, + "display_name": {"type": "string"}, "environment": {"type": ["string", "null"]}, "fail_info": { "type": ["object", "null"], @@ -94,11 +95,10 @@ "time_run": {"type": ["number", "null"]}, "time_sanity": {"type": ["number", "null"]}, "time_setup": {"type": ["number", "null"]}, - "time_total": {"type": ["number", "null"]} + "time_total": {"type": ["number", "null"]}, + "unique_name": {"type": "string"} }, - "required": [ - "environment", "name", "result", "system", "filename" - ] + "required": ["environment", "stagedir", "system", "unique_name"] } }, "type": "object", @@ -142,9 +142,7 @@ "items": {"$ref": "#/defs/testcase_type"} } }, - "required": [ - "num_cases", "num_failures", "num_aborted", "runid", "testcases" - ] + "required": ["testcases"] } } }, diff --git a/reframe/utility/__init__.py b/reframe/utility/__init__.py index 470a98b13c..94c1a9e97f 100644 --- a/reframe/utility/__init__.py +++ b/reframe/utility/__init__.py @@ -789,8 +789,11 @@ def _parse_node(nodename): return basename, width, nodeid -def _count_digits(n): - '''Count digits of a decimal number.''' +def count_digits(n): + '''Count the digits of a decimal number. + + :meta private: + ''' num_digits = 1 while n > 10: @@ -847,7 +850,7 @@ def __str__(self): abbrev.append(f'{self.name}{s_start}') else: last = start + delta*(size-1) - digits_last = _count_digits(last) + digits_last = count_digits(last) pad = self.width - digits_last nd_range = self.name if pad > 0: diff --git a/tools/gendoclistings.py b/tools/gendoclistings.py new file mode 100755 index 0000000000..1ecf3af034 --- /dev/null +++ b/tools/gendoclistings.py @@ -0,0 +1,339 @@ +#!/usr/bin/env python3 + +import collections +import functools +import os +import socket +import sys +import reframe.utility.osext as osext + + +def print_usage(): + print(f'Usage: {sys.argv[0]} [all||]') + + +ListingInfo = collections.namedtuple( + 'ListingInfo', + ['command', 'tags', 'filters', 'env', 'xfail'] +) + + +def remove_nocolor_opt(s): + return s.replace(' --nocolor', '') + + +def remove_system_opt(s): + return s.replace(' --system=catalina', '') + + +def replace_home(s): + return s.replace(os.getenv('HOME'), '/home/user') + + +def replace_user(s): + user = osext.osuser() + return s.replace(user, 'user') + + +def replace_hostname(s): + host = socket.getfqdn() + return s.replace(host, 'host') + + +DEFAULT_FILTERS = [remove_nocolor_opt, remove_system_opt, + replace_home, replace_user, replace_hostname] + + +LISTINGS = { + 'hello1': ListingInfo( + './bin/reframe -c tutorials/basics/hello/hello1.py -r', + {'local', 'tutorial-basics'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=False + ), + 'run-report': ListingInfo( + f'cat {os.getenv("HOME")}/.reframe/reports/run-report.json', + {'local', 'tutorial-basics'}, + DEFAULT_FILTERS, + env=None, + xfail=False + ), + 'hello2': ListingInfo( + './bin/reframe -c tutorials/basics/hello/hello2.py -r', + {'local', 'tutorial-basics'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=True + ), + 'hello2_catalina': ListingInfo( + './bin/reframe -C tutorials/config/settings.py --system=catalina -c tutorials/basics/hello/hello2.py -r', # noqa: E501 + {'local', 'tutorial-basics'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=False + ), + 'hellomp1': ListingInfo( + './bin/reframe --system=catalina -c tutorials/basics/hellomp/hellomp1.py -r', # noqa: E501 + {'local', 'tutorial-basics'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'hellomp2': ListingInfo( + './bin/reframe --system=catalina -c tutorials/basics/hellomp/hellomp2.py -r', # noqa: E501 + {'local', 'tutorial-basics'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=True + ), + 'alltests_daint': ListingInfo( + './bin/reframe -c tutorials/basics/ -R -n "HelloMultiLangTest|HelloThreadedExtended2Test|StreamWithRefTest" --performance-report -r', # noqa: E501 + {'remote', 'tutorial-basics'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'stream4_daint': ListingInfo( + './bin/reframe -c tutorials/basics/stream/stream4.py -r --performance-report', # noqa: E501 + {'remote', 'tutorial-basics'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'osu_bench_deps': ListingInfo( + './bin/reframe -c tutorials/deps/osu_benchmarks.py -r', + {'remote', 'tutorial-deps'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'osu_latency_list': ListingInfo( + './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -l', # noqa: E501 + {'remote', 'tutorial-deps'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'osu_latency_unresolved_deps': ListingInfo( + './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest --system=daint:gpu -l', # noqa: E501 + {'remote', 'tutorial-deps'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'osu_bench_list_concretized': ListingInfo( + './bin/reframe -c tutorials/deps/osu_benchmarks.py -lC', + {'remote', 'tutorial-deps'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'osu_bench_list_concretized_gnu': ListingInfo( + './bin/reframe -c tutorials/deps/osu_benchmarks.py -n OSULatencyTest -L -p builtin -p gnu', # noqa: E501 + {'remote', 'tutorial-deps'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'param_deps_list': ListingInfo( + './bin/reframe -c tutorials/deps/parameterized.py -l', + {'local', 'tutorial-deps'}, + DEFAULT_FILTERS, + env=None, + xfail=False + ), + 'osu_bench_fixtures_list': ListingInfo( + './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -l', + {'remote', 'tutorial-fixtures'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py') + }, + xfail=False + ), + 'osu_bandwidth_concretized_daint': ListingInfo( + './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC', # noqa: E501 + {'remote', 'tutorial-fixtures'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'osu_bandwidth_concretized_daint_pgi': ListingInfo( + './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -n osu_bandwidth_test -lC -p pgi', # noqa: E501 + {'remote', 'tutorial-fixtures'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'osu_bench_fixtures_run': ListingInfo( + './bin/reframe -c tutorials/fixtures/osu_benchmarks.py -r', + {'remote', 'tutorial-fixtures'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'stream_params': ListingInfo( + './bin/reframe --system=catalina -c tutorials/advanced/parameterized/stream.py -l', # noqa: E501 + {'local', 'tutorial-advanced'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'maketest_mixin': ListingInfo( + './bin/reframe --system=catalina -c tutorials/advanced/makefiles/maketest_mixin.py -l', # noqa: E501 + {'local', 'tutorial-advanced'}, + DEFAULT_FILTERS, + env={ + 'RFM_CONFIG_FILE': os.path.join(os.getcwd(), + 'tutorials/config/settings.py'), + 'RFM_COLORIZE': 'n' + }, + xfail=False + ), + 'hello2_typo': ListingInfo( + 'sed -ie "s/parameter/paramter/g" tutorials/basics/hello/hello2.py && ' + './bin/reframe -c tutorials/basics/hello -R -l && ' + 'mv tutorials/basics/hello/hello2.pye tutorials/basics/hello/hello2.py', # noqa: E501 + {'local', 'tutorial-tips-n-tricks'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=False + ), + 'hello2_typo_stacktrace': ListingInfo( + 'sed -ie "s/parameter/paramter/g" tutorials/basics/hello/hello2.py && ' + './bin/reframe -c tutorials/basics/hello -R -l -v && ' + 'mv tutorials/basics/hello/hello2.pye tutorials/basics/hello/hello2.py', # noqa: E501 + {'local', 'tutorial-tips-n-tricks'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=False + ), + 'hello2_print_stdout': ListingInfo( + 'sed -ie "s/self\.stdout/sn.print(self.stdout)/g" tutorials/basics/hello/hello2.py && ' # noqa: E501 + './bin/reframe --system=catalina -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -r && ' # noqa: E501 + 'mv tutorials/basics/hello/hello2.pye tutorials/basics/hello/hello2.py', # noqa: E501 + {'local', 'tutorial-tips-n-tricks'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=False + ), + 'hello2_list_verbose': ListingInfo( + './bin/reframe -C tutorials/config/settings.py -c tutorials/basics/hello/hello2.py -l -vv', # noqa: E501 + {'local', 'tutorial-tips-n-tricks'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=False + ), + 'deps_complex_run': ListingInfo( + './bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py -r', # noqa: E501 + {'local', 'tutorial-tips-n-tricks'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=True + ), + 'deps_rerun_t6': ListingInfo( + './bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py --keep-stage-files -r > /dev/null || ' # noqa: E501 + './bin/reframe --restore-session --keep-stage-files -n T6 -r', + {'local', 'tutorial-tips-n-tricks'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=False + ), + 'deps_run_t6': ListingInfo( + './bin/reframe -c unittests/resources/checks_unlisted/deps_complex.py -n T6 -r', # noqa: E501 + {'local', 'tutorial-tips-n-tricks'}, + DEFAULT_FILTERS, + env={'RFM_COLORIZE': 'n'}, + xfail=False + ) +} + + +runcmd = functools.partial(osext.run_command, log=False, shell=True) + +if __name__ == '__main__': + try: + choice = sys.argv[1] + except IndexError: + choice = 'all' + + for name, info in LISTINGS.items(): + if (choice != 'all' and choice not in info.tags and choice != name): + continue + + print(f'Generating listing {name}...') + + # Set up the environment + if info.env: + for k, v in info.env.items(): + os.environ[k] = v + + completed = runcmd(info.command, check=not info.xfail) + if info.xfail and completed.returncode == 0: + print(f'{info.command} should have failed, but it did not; ' + f'skipping...') + continue + + # Apply filters + output = completed.stdout + for f in info.filters: + output = f(output) + + # Write the listing + filename = os.path.join('docs/listings', f'{name}.txt') + with open(filename, 'w') as fp: + fp.write(output) diff --git a/tutorials/deps/parameterized.py b/tutorials/deps/parameterized.py new file mode 100644 index 0000000000..2632d15139 --- /dev/null +++ b/tutorials/deps/parameterized.py @@ -0,0 +1,39 @@ +# Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich) +# ReFrame Project Developers. See the top-level LICENSE file for details. +# +# SPDX-License-Identifier: BSD-3-Clause + +import reframe as rfm +import reframe.utility.sanity as sn + + +@rfm.simple_test +class TestA(rfm.RunOnlyRegressionTest): + z = parameter(range(10)) + executable = 'echo' + valid_systems = ['*'] + valid_prog_environs = ['*'] + + @run_after('init') + def set_exec_opts(self): + self.executable_opts = [str(self.z)] + + @sanity_function + def validate(self): + return sn.assert_eq( + sn.extractsingle(r'\d+', self.stdout, 0, int), self.z + ) + + +@rfm.simple_test +class TestB(rfm.RunOnlyRegressionTest): + executable = 'echo' + valid_systems = ['*'] + valid_prog_environs = ['*'] + sanity_patterns = sn.assert_true(1) + + @run_after('init') + def setdeps(self): + variants = TestA.get_variant_nums(z=lambda x: x > 5) + for v in variants: + self.depends_on(TestA.variant_name(v)) diff --git a/unittests/conftest.py b/unittests/conftest.py index b232584ac5..a27f066058 100644 --- a/unittests/conftest.py +++ b/unittests/conftest.py @@ -64,7 +64,7 @@ def _make_exec_ctx(config_file=TEST_CONFIG_FILE, @pytest.fixture def make_exec_ctx_g(make_exec_ctx): - '''Same as ``make_exec_ctx_g`` except that it is a generator. + '''Same as ``make_exec_ctx`` except that it is a generator. You should use this fixture if you want to pass it to ``yield from`` expressions. diff --git a/unittests/resources/checks/bad/invalid_check.py b/unittests/resources/checks/bad/invalid_check.py index 40a3d265e6..5325d54e75 100644 --- a/unittests/resources/checks/bad/invalid_check.py +++ b/unittests/resources/checks/bad/invalid_check.py @@ -8,9 +8,8 @@ @rfm.simple_test class SomeTest(rfm.RegressionTest): - def __init__(self): - self.valid_systems = [] - self.valid_prog_environs = [] + valid_systems = [] + valid_prog_environs = [] class NotATest: diff --git a/unittests/resources/checks/emptycheck.py b/unittests/resources/checks/emptycheck.py index e41361274c..3da6add308 100644 --- a/unittests/resources/checks/emptycheck.py +++ b/unittests/resources/checks/emptycheck.py @@ -8,6 +8,5 @@ @rfm.simple_test class EmptyTest(rfm.RegressionTest): - def __init__(self): - self.valid_systems = [] - self.valid_prog_environs = [] + valid_systems = [] + valid_prog_environs = [] diff --git a/unittests/resources/checks/frontend_checks.py b/unittests/resources/checks/frontend_checks.py index fc291f990e..6a3eb8bc0e 100644 --- a/unittests/resources/checks/frontend_checks.py +++ b/unittests/resources/checks/frontend_checks.py @@ -18,21 +18,18 @@ class BaseFrontendCheck(rfm.RunOnlyRegressionTest): - def __init__(self): - self.local = True - self.executable = 'echo hello && echo perf: 10 Gflop/s' - self.sanity_patterns = sn.assert_found('hello', self.stdout) - self.tags = {type(self).__name__} - self.maintainers = ['VK'] + valid_systems = ['*'] + valid_prog_environs = ['*'] + executable = 'echo hello && echo perf: 10 Gflop/s' + local = True + + @sanity_function + def validate_output(self): + return sn.assert_found('hello', self.stdout) @rfm.simple_test class BadSetupCheck(BaseFrontendCheck): - def __init__(self): - super().__init__() - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - @run_after('setup') def raise_error(self): raise ReframeError('Setup failure') @@ -40,12 +37,6 @@ def raise_error(self): @rfm.simple_test class BadSetupCheckEarly(BaseFrontendCheck): - def __init__(self): - super().__init__() - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.local = False - @run_before('setup') def raise_error_early(self): raise ReframeError('Setup failure') @@ -53,54 +44,39 @@ def raise_error_early(self): @rfm.simple_test class NoSystemCheck(BaseFrontendCheck): - def __init__(self): - super().__init__() - self.valid_systems = [] - self.valid_prog_environs = ['*'] + valid_systems = [] @rfm.simple_test class NoPrgEnvCheck(BaseFrontendCheck): - def __init__(self): - super().__init__() - self.valid_systems = ['*'] - self.valid_prog_environs = [] + valid_prog_environs = [] @rfm.simple_test class SanityFailureCheck(BaseFrontendCheck): - def __init__(self): - super().__init__() - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.sanity_patterns = sn.assert_found('foo', self.stdout) + @sanity_function + def validate_output(self): + return sn.assert_found('foo', self.stdout) @rfm.simple_test class PerformanceFailureCheck(BaseFrontendCheck): - def __init__(self): - super().__init__() - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.perf_patterns = { - 'perf': sn.extractsingle(r'perf: (\d+)', self.stdout, 1, int) - } - self.reference = { - '*': { - 'perf': (20, -0.1, 0.1, 'Gflop/s') - } + reference = { + '*': { + 'perf': (20, -0.1, 0.1, 'Gflop/s') } + } + + @performance_function('Gflop/s') + def perf(self): + return sn.extractsingle(r'perf: (\d+)', self.stdout, 1, int) @rfm.simple_test class CustomPerformanceFailureCheck(BaseFrontendCheck, special=True): '''Simulate a performance check that ignores completely logging''' - def __init__(self): - super().__init__() - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.strict_check = False + strict_check = False def check_performance(self): raise PerformanceError('performance failure') @@ -109,12 +85,8 @@ def check_performance(self): class KeyboardInterruptCheck(BaseFrontendCheck, special=True): '''Simulate keyboard interrupt during test's execution.''' - def __init__(self, phase='wait'): - super().__init__() - self.executable = 'sleep 1' - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.phase = phase + executable = 'sleep 1' + phase = variable(str) @run_before('setup') def raise_before_setup(self): @@ -126,98 +98,83 @@ def run_wait(self): if self.phase == 'wait': raise KeyboardInterrupt else: - super().wait() + return super().run_wait() class SystemExitCheck(BaseFrontendCheck, special=True): '''Simulate system exit from within a check.''' - def __init__(self): - super().__init__() - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - def run_wait(self): # We do our nasty stuff in wait() to make things more complicated sys.exit(1) @rfm.simple_test -class CleanupFailTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.sourcesdir = None - self.executable = 'echo foo' - self.sanity_patterns = sn.assert_found(r'foo', self.stdout) - +class CleanupFailTest(BaseFrontendCheck): @run_before('cleanup') def fail(self): # Make this test fail on purpose raise Exception -class SleepCheck(BaseFrontendCheck): - _next_id = 0 - - def __init__(self, sleep_time): - super().__init__() - self.name = '%s_%s' % (self.name, SleepCheck._next_id) - self.sourcesdir = None - self.sleep_time = sleep_time - self.executable = 'python3' +class SleepCheck(rfm.RunOnlyRegressionTest, special=True): + sleep_time = variable(float, int) + poll_fail = variable(str, type(None), value=None) + print_timestamp = ( + 'python3 -c "import time; print(time.time(), flush=True)"' + ) + executable = 'python3' + prerun_cmds = [print_timestamp] + postrun_cmds = [print_timestamp] + sanity_patterns = sn.assert_true(1) + valid_systems = ['*'] + valid_prog_environs = ['*'] + + @run_before('run') + def set_sleep_time(self): self.executable_opts = [ - '-c "from time import sleep; sleep(%s)"' % sleep_time + f'-c "import time; time.sleep({self.sleep_time})"' ] - print_timestamp = ( - "python3 -c \"from datetime import datetime; " - "print(datetime.today().strftime('%s.%f'), flush=True)\"") - self.prerun_cmds = [print_timestamp] - self.postrun_cmds = [print_timestamp] - self.sanity_patterns = sn.assert_found(r'.*', self.stdout) - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - SleepCheck._next_id += 1 - - -class SleepCheckPollFail(SleepCheck, special=True): - '''Emulate a test failing in the polling phase.''' def run_complete(self): - raise ValueError + if self.poll_fail == 'early': + # Emulate a test failing in the polling phase. + raise ValueError + elif self.poll_fail == 'late' and self.job.finished(): + # Emulate a test failing in the polling phase after the test has + # finished + raise ValueError + return super().run_complete() -class SleepCheckPollFailLate(SleepCheck, special=True): - '''Emulate a test failing in the polling phase - after the test has finished.''' - def run_complete(self): - if self._job.finished(): - raise ValueError +class RetriesCheck(rfm.RunOnlyRegressionTest): + filename = variable(str) + num_runs = variable(int) + local = True + valid_systems = ['*'] + valid_prog_environs = ['*'] -class RetriesCheck(BaseFrontendCheck): - def __init__(self, run_to_pass, filename): - super().__init__() - self.sourcesdir = None - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.prerun_cmds = ['current_run=$(cat %s)' % filename] - self.executable = 'echo $current_run' - self.postrun_cmds = ['((current_run++))', - 'echo $current_run > %s' % filename] - self.sanity_patterns = sn.assert_found('%d' % run_to_pass, self.stdout) + @run_before('run') + def set_exec(self): + self.executable = f''' +current_run=$(cat {self.filename}) +echo $current_run +((current_run++)) +echo $current_run > {self.filename}''' + + @sanity_function + def validate(self): + return sn.assert_found(str(self.num_runs), self.stdout) class SelfKillCheck(rfm.RunOnlyRegressionTest, special=True): - def __init__(self): - self.local = True - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.executable = 'echo hello' - self.sanity_patterns = sn.assert_found('hello', self.stdout) - self.tags = {type(self).__name__} - self.maintainers = ['TM'] + valid_systems = ['*'] + valid_prog_environs = ['*'] + local = True + executable = 'echo' + sanity_patterns = sn.assert_true(1) def run(self): super().run() @@ -226,13 +183,12 @@ def run(self): class CompileFailureCheck(rfm.RegressionTest): - def __init__(self): - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.sanity_patterns = sn.assert_found(r'hello', self.stdout) - self.sourcesdir = None - self.sourcepath = 'x.c' - self.prebuild_cmds = ['echo foo > x.c'] + valid_systems = ['*'] + valid_prog_environs = ['*'] + sanity_patterns = sn.assert_true(1) + sourcesdir = None + sourcepath = 'x.c' + prebuild_cmds = ['echo foo > x.c'] # The following tests do not validate and should not be loaded @@ -241,23 +197,26 @@ def __init__(self): class TestWithGenerator(rfm.RunOnlyRegressionTest): '''This test is invalid in ReFrame and the loader must not load it''' - def __init__(self): - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] + valid_systems = ['*'] + valid_prog_environs = ['*'] + @run_after('init') + def post_init(self): def foo(): yield True - self.sanity_patterns = sn.defer(foo()) + self.x = foo() @rfm.simple_test class TestWithFileObject(rfm.RunOnlyRegressionTest): '''This test is invalid in ReFrame and the loader must not load it''' - def __init__(self): - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] + valid_systems = ['*'] + valid_prog_environs = ['*'] + + @run_after('init') + def file_handler(self): with open(__file__) as fp: pass diff --git a/unittests/resources/checks/hellocheck.py b/unittests/resources/checks/hellocheck.py index 98efee960f..eb4ccff652 100644 --- a/unittests/resources/checks/hellocheck.py +++ b/unittests/resources/checks/hellocheck.py @@ -8,30 +8,33 @@ @rfm.simple_test -class HelloTest(rfm.RegressionTest): - def __init__(self): - self.name = 'hellocheck' - self.descr = 'C Hello World test' +class HelloTest(rfm.RegressionTest, pin_prefix=True): + descr = 'C Hello World test' - # All available systems are supported - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.sourcepath = 'hello.c' - self.tags = {'foo', 'bar'} - self.sanity_patterns = sn.assert_found(r'Hello, World\!', self.stdout) - self.maintainers = ['VK'] + # All available systems are supported + valid_systems = ['*'] + valid_prog_environs = ['*'] + sourcepath = 'hello.c' + tags = {'foo', 'bar'} + maintainers = ['VK'] + + @sanity_function + def validate(self): + return sn.assert_found(r'Hello, World\!', self.stdout) @rfm.simple_test class CompileOnlyHelloTest(rfm.CompileOnlyRegressionTest): - def __init__(self): - self.descr = 'Compile-only C Hello World test' + descr = 'Compile-only C Hello World test' + + # All available systems are supported + valid_systems = ['*'] + valid_prog_environs = ['*'] + sourcepath = 'hello.c' - # All available systems are supported - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.sourcepath = 'hello.c' - self.sanity_patterns = sn.assert_not_found(r'(?i)error', self.stdout) + @sanity_function + def validate(self): + return sn.assert_not_found(r'(?i)error', self.stdout) @rfm.simple_test @@ -41,5 +44,6 @@ class SkipTest(rfm.RunOnlyRegressionTest): valid_prog_environs = ['*'] sanity_patterns = sn.assert_true(1) - def __init__(self): + @run_after('init') + def foo(self): self.skip_if(True, 'unsupported') diff --git a/unittests/resources/checks/hellocheck_make.py b/unittests/resources/checks/hellocheck_make.py index 7f896a1901..179e77e709 100644 --- a/unittests/resources/checks/hellocheck_make.py +++ b/unittests/resources/checks/hellocheck_make.py @@ -9,17 +9,21 @@ @rfm.simple_test class HelloMakeTest(rfm.RegressionTest): - def __init__(self): - self.descr = 'C++ Hello World test' + descr = 'Makefile test' - # All available systems are supported - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.build_system = 'Make' + # All available systems are supported + valid_systems = ['*'] + valid_prog_environs = ['*'] + build_system = 'Make' + executable = './hello_c' + keep_files = ['hello_c'] + tags = {'foo', 'bar'} + maintainers = ['VK'] + + @run_before('compile') + def setflags(self): self.build_system.cflags = ['-O2'] - self.build_system.cxxflags = ['-O2'] - self.executable = './hello_c' - self.keep_files = ['hello_c'] - self.tags = {'foo', 'bar'} - self.sanity_patterns = sn.assert_found(r'Hello, World\!', self.stdout) - self.maintainers = ['VK'] + + @sanity_function + def validate(self): + return sn.assert_found(r'Hello, World\!', self.stdout) diff --git a/unittests/resources/checks_unlisted/deprecated_test.py b/unittests/resources/checks_unlisted/deprecated_test.py index 3297d30c16..9e9c6c3daa 100644 --- a/unittests/resources/checks_unlisted/deprecated_test.py +++ b/unittests/resources/checks_unlisted/deprecated_test.py @@ -6,12 +6,11 @@ @rfm.simple_test class DeprecatedTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.local = True - self.executable = 'echo hello' - self.sanity_patterns = sn.assert_found('hello', self.stdout) + valid_systems = ['*'] + valid_prog_environs = ['*'] + local = True + executable = 'echo' + sanity_patterns = sn.assert_true(1) @run_before('setup') def deprecation_warning(self): diff --git a/unittests/resources/checks_unlisted/deps_complex.py b/unittests/resources/checks_unlisted/deps_complex.py index eb4b590475..7a9555f702 100644 --- a/unittests/resources/checks_unlisted/deps_complex.py +++ b/unittests/resources/checks_unlisted/deps_complex.py @@ -6,6 +6,7 @@ import os import reframe as rfm import reframe.utility.sanity as sn +import reframe.utility.typecheck as typ # # The following tests implement the dependency graph below: @@ -35,19 +36,19 @@ class BaseTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.sourcesdir = None - self.executable = 'echo' - self._count = int(type(self).__name__[1:]) - self.sanity_patterns = sn.defer(True) - self.keep_files = ['out.txt'] - - @property - @deferrable - def count(self): - return self._count + valid_systems = ['*'] + valid_prog_environs = ['*'] + sourcesdir = None + executable = 'echo' + keep_files = ['out.txt'] + count = variable(int) + deps = variable(typ.List[str], value=[]) + + @run_after('init') + def init_deps(self): + self.count = int(self.unique_name[1:]) + for d in self.deps: + self.depends_on(d) @run_before('run') def write_count(self): @@ -59,115 +60,119 @@ def write_count(self): @rfm.simple_test class T0(BaseTest): - pass + sanity_patterns = sn.assert_true(1) @rfm.simple_test class T1(BaseTest): - def __init__(self): - super().__init__() - self.depends_on('T4') - self.depends_on('T5') - self.sanity_patterns = sn.assert_eq(self.count, 14) + deps = ['T4', 'T5'] + + @sanity_function + def validate(self): + return sn.assert_eq(self.count, 14) @require_deps def prepend_output(self, T4, T5): with open(os.path.join(T4().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) with open(os.path.join(T5().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) @rfm.simple_test class T2(BaseTest): - def __init__(self): - super().__init__() - self.depends_on('T6') + deps = ['T6'] + @sanity_function + def validate(self): # Make this test fail on purpose: expected value is 31 normally - self.sanity_patterns = sn.assert_eq(self.count, 30) + return sn.assert_eq(self.count, 30) @require_deps def prepend_output(self, T6): with open(os.path.join(T6().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) @rfm.simple_test class T3(T2): - def __init__(self): - super().__init__() - self.sanity_patterns = sn.assert_eq(self.count, 32) + @sanity_function + def validate(self): + return sn.assert_eq(self.count, 32) @rfm.simple_test class T4(BaseTest): - def __init__(self): - super().__init__() - self.depends_on('T0') - self.sanity_patterns = sn.assert_eq(self.count, 4) + deps = ['T0'] + + @sanity_function + def validate(self): + return sn.assert_eq(self.count, 4) @require_deps def prepend_output(self, T0): with open(os.path.join(T0().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) @rfm.simple_test class T5(BaseTest): - def __init__(self): - super().__init__() - self.depends_on('T4') - self.sanity_patterns = sn.assert_eq(self.count, 9) + deps = ['T4'] + + @sanity_function + def validate(self): + return sn.assert_eq(self.count, 9) @require_deps def prepend_output(self, T4): with open(os.path.join(T4().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) @rfm.simple_test class T6(BaseTest): - def __init__(self): - super().__init__() - self.depends_on('T1') - self.depends_on('T5') - self.sanity_patterns = sn.assert_eq(self.count, 29) + deps = ['T1', 'T5'] + + @sanity_function + def validate(self): + return sn.assert_eq(self.count, 29) @require_deps def prepend_output(self, T1, T5): with open(os.path.join(T1().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) with open(os.path.join(T5().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) @rfm.simple_test class T7(BaseTest): - def __init__(self): - super().__init__() - self.depends_on('T2') - self.sanity_patterns = sn.assert_eq(self.count, 38) + deps = ['T2'] + + @sanity_function + def validate(self): + return sn.assert_eq(self.count, 38) @require_deps def prepend_output(self, T2): with open(os.path.join(T2().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) @rfm.simple_test class T8(BaseTest): - def __init__(self): - super().__init__() - self.depends_on('T1') - self.sanity_patterns = sn.assert_eq(self.count, 22) + deps = ['T1'] + + @sanity_function + def validate(self): + return sn.assert_eq(self.count, 22) @require_deps def prepend_output(self, T1): with open(os.path.join(T1().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) @run_after('setup') def fail(self): @@ -180,12 +185,13 @@ class T9(BaseTest): # This tests fails because of T8. It is added to make sure that # all tests are accounted for in the summary. - def __init__(self): - super().__init__() - self.depends_on('T8') - self.sanity_patterns = sn.assert_eq(self.count, 31) + deps = ['T8'] + + @sanity_function + def validate(self): + return sn.assert_eq(self.count, 31) @require_deps def prepend_output(self, T8): with open(os.path.join(T8().stagedir, 'out.txt')) as fp: - self._count += int(fp.read()) + self.count += int(fp.read()) diff --git a/unittests/resources/checks_unlisted/deps_simple.py b/unittests/resources/checks_unlisted/deps_simple.py index 7bef882429..b856ad187f 100644 --- a/unittests/resources/checks_unlisted/deps_simple.py +++ b/unittests/resources/checks_unlisted/deps_simple.py @@ -10,12 +10,10 @@ @rfm.simple_test class Test0(rfm.RunOnlyRegressionTest): - def __init__(self): - self.valid_systems = ['sys0:p0', 'sys0:p1'] - self.valid_prog_environs = ['e0', 'e1'] - self.executable = 'echo' - self.executable_opts = [self.name] - self.sanity_patterns = sn.assert_found(self.name, self.stdout) + valid_systems = ['sys0:p0', 'sys0:p1'] + valid_prog_environs = ['e0', 'e1'] + executable = 'echo' + sanity_patterns = sn.assert_true(1) @rfm.simple_test @@ -23,7 +21,8 @@ class Test1(rfm.RunOnlyRegressionTest): kind = parameter(['default', 'fully', 'by_part', 'by_case', 'custom', 'any', 'all', 'nodeps']) - def __init__(self): + @run_after('init') + def setup_deps(self): def custom_deps(src, dst): return ( src[0] == 'p0' and diff --git a/unittests/resources/checks_unlisted/kbd_interrupt.py b/unittests/resources/checks_unlisted/kbd_interrupt.py index 32547bb737..f63fe6c24d 100644 --- a/unittests/resources/checks_unlisted/kbd_interrupt.py +++ b/unittests/resources/checks_unlisted/kbd_interrupt.py @@ -15,12 +15,10 @@ @rfm.simple_test class KeyboardInterruptCheck(rfm.RunOnlyRegressionTest): - def __init__(self): - self.local = True - self.executable = 'sleep 1' - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.tags = {self.name} + local = True + executable = 'sleep 1' + valid_systems = ['*'] + valid_prog_environs = ['*'] @run_before('setup') def raise_keyboard_interrupt(self): diff --git a/unittests/test_cli.py b/unittests/test_cli.py index 55e9368c06..acd60533e1 100644 --- a/unittests/test_cli.py +++ b/unittests/test_cli.py @@ -90,6 +90,10 @@ def _run_reframe(system='generic:default', argv += ['-l'] elif action == 'list_detailed': argv += ['-L'] + elif action == 'list_concretized': + argv += ['-lC'] + elif action == 'list_detailed_concretized': + argv += ['-LC'] elif action == 'list_tags': argv += ['--list-tags'] elif action == 'help': @@ -244,7 +248,7 @@ def test_check_submit_success(run_reframe, remote_exec_ctx): def test_check_failure(run_reframe): returncode, stdout, _ = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'BadSetupCheck'] + more_options=['-n', 'BadSetupCheck$'] ) assert 'FAILED' in stdout assert returncode != 0 @@ -253,7 +257,7 @@ def test_check_failure(run_reframe): def test_check_setup_failure(run_reframe): returncode, stdout, stderr = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'BadSetupCheckEarly'], + more_options=['-n', 'BadSetupCheckEarly'], local=False, ) @@ -268,7 +272,7 @@ def test_check_kbd_interrupt(run_reframe): checkpath=[ 'unittests/resources/checks_unlisted/kbd_interrupt.py' ], - more_options=['-t', 'KeyboardInterruptCheck'], + more_options=['-n', 'KeyboardInterruptCheck'], local=False, ) assert 'Traceback' not in stdout @@ -280,7 +284,7 @@ def test_check_kbd_interrupt(run_reframe): def test_check_sanity_failure(run_reframe, tmp_path): returncode, stdout, stderr = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'SanityFailureCheck'] + more_options=['-n', 'SanityFailureCheck'] ) assert 'FAILED' in stdout @@ -297,7 +301,7 @@ def test_check_sanity_failure(run_reframe, tmp_path): def test_dont_restage(run_reframe, tmp_path): run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'SanityFailureCheck'] + more_options=['-n', 'SanityFailureCheck'] ) # Place a random file in the test's stage directory and rerun with @@ -307,7 +311,7 @@ def test_dont_restage(run_reframe, tmp_path): (stagedir / 'foobar').touch() returncode, stdout, stderr = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'SanityFailureCheck', + more_options=['-n', 'SanityFailureCheck', '--dont-restage', '--max-retries=1'] ) assert os.path.exists(stagedir / 'foobar') @@ -340,7 +344,7 @@ def test_checkpath_symlink(run_reframe, tmp_path): def test_performance_check_failure(run_reframe, tmp_path, perflogdir): returncode, stdout, stderr = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'PerformanceFailureCheck'] + more_options=['-n', 'PerformanceFailureCheck'] ) assert 'FAILED' in stdout @@ -360,7 +364,7 @@ def test_perflogdir_from_env(run_reframe, tmp_path, monkeypatch): monkeypatch.setenv('FOODIR', str(tmp_path / 'perflogs')) returncode, stdout, stderr = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'PerformanceFailureCheck'], + more_options=['-n', 'PerformanceFailureCheck'], perflogdir='$FOODIR' ) assert returncode == 1 @@ -373,7 +377,7 @@ def test_perflogdir_from_env(run_reframe, tmp_path, monkeypatch): def test_performance_report(run_reframe): returncode, stdout, _ = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'PerformanceFailureCheck', '--performance-report'] + more_options=['-n', 'PerformanceFailureCheck', '--performance-report'] ) assert r'PERFORMANCE REPORT' in stdout assert r'perf: 10 Gflop/s' in stdout @@ -382,7 +386,7 @@ def test_performance_report(run_reframe): def test_skip_system_check_option(run_reframe): returncode, stdout, _ = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['--skip-system-check', '-t', 'NoSystemCheck'] + more_options=['--skip-system-check', '-n', 'NoSystemCheck'] ) assert 'PASSED' in stdout assert returncode == 0 @@ -391,7 +395,7 @@ def test_skip_system_check_option(run_reframe): def test_skip_prgenv_check_option(run_reframe): returncode, stdout, _ = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['--skip-prgenv-check', '-t', 'NoPrgEnvCheck'] + more_options=['--skip-prgenv-check', '-n', 'NoPrgEnvCheck'] ) assert 'PASSED' in stdout assert returncode == 0 @@ -539,6 +543,24 @@ def test_list_with_details(run_reframe): assert returncode == 0 +def test_list_concretized(run_reframe): + returncode, stdout, stderr = run_reframe( + checkpath=['unittests/resources/checks/frontend_checks.py'], + action='list_concretized' + ) + assert 'Traceback' not in stdout + assert 'Traceback' not in stderr + assert returncode == 0 + + returncode, stdout, stderr = run_reframe( + checkpath=['unittests/resources/checks/frontend_checks.py'], + action='list_detailed_concretized' + ) + assert 'Traceback' not in stdout + assert 'Traceback' not in stderr + assert returncode == 0 + + def test_list_tags(run_reframe): returncode, stdout, stderr = run_reframe( checkpath=['unittests/resources/checks/hellocheck.py', @@ -556,7 +578,7 @@ def test_filtering_multiple_criteria(run_reframe): returncode, stdout, stderr = run_reframe( checkpath=['unittests/resources/checks'], action='list', - more_options=['-t', 'foo', '-n', 'hellocheck'] + more_options=['-t', 'foo', '-n', 'HelloTest'] ) assert 'Traceback' not in stdout assert 'Traceback' not in stderr @@ -729,7 +751,7 @@ def test_overwrite_module_path(run_reframe, user_exec_ctx): def test_failure_stats(run_reframe): returncode, stdout, stderr = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], - more_options=['-t', 'SanityFailureCheck', '--failure-stats'] + more_options=['-n', 'SanityFailureCheck', '--failure-stats'] ) assert r'FAILURE STATISTICS' in stdout assert r'sanity 1 [SanityFailureCheck' in stdout diff --git a/unittests/test_dependencies.py b/unittests/test_dependencies.py index e31a7330b8..3cf15f3e7c 100644 --- a/unittests/test_dependencies.py +++ b/unittests/test_dependencies.py @@ -13,6 +13,7 @@ import reframe.utility as util import reframe.utility.sanity as sn import reframe.utility.udeps as udeps +import unittests.utility as test_util from reframe.core.environments import Environment from reframe.core.exceptions import DependencyError @@ -320,12 +321,10 @@ def test_dependecies_how_functions_undoc(): def test_build_deps_deprecated_syntax(loader, default_exec_ctx): class Test0(rfm.RegressionTest): - def __init__(self): - self.valid_systems = ['sys0:p0', 'sys0:p1'] - self.valid_prog_environs = ['e0', 'e1'] - self.executable = 'echo' - self.executable_opts = [self.name] - self.sanity_patterns = sn.assert_found(self.name, self.stdout) + valid_systems = ['sys0:p0', 'sys0:p1'] + valid_prog_environs = ['e0', 'e1'] + executable = 'echo' + sanity_patterns = sn.assert_true(1) class Test1_deprecated(rfm.RunOnlyRegressionTest): kind = parameter([rfm.DEPEND_FULLY, @@ -517,23 +516,14 @@ def test_build_deps_empty(default_exec_ctx): assert {} == dependencies.build_deps([])[0] -@pytest.fixture -def make_test(): - class MyTest(rfm.RegressionTest): - def __init__(self, name): - self.name = name - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.executable = 'echo' - self.executable_opts = [name] - - def _make_test(name): - return MyTest(name) - - return _make_test +def make_test(name): + return test_util.make_check(rfm.RegressionTest, + alt_name=name, + valid_systems=['*'], + valid_prog_environs=['*']) -def test_valid_deps(make_test, default_exec_ctx): +def test_valid_deps(default_exec_ctx): # # t0 +-->t5<--+ # ^ | | @@ -571,7 +561,7 @@ def test_valid_deps(make_test, default_exec_ctx): ) -def test_cyclic_deps(make_test, default_exec_ctx): +def test_cyclic_deps(default_exec_ctx): # # t0 +-->t5<--+ # ^ | | @@ -617,7 +607,7 @@ def test_cyclic_deps(make_test, default_exec_ctx): 't3->t1->t4->t3' in str(exc_info.value)) -def test_cyclic_deps_by_env(make_test, default_exec_ctx): +def test_cyclic_deps_by_env(default_exec_ctx): t0 = make_test('t0') t1 = make_test('t1') t1.depends_on('t0', udeps.env_is('e0')) @@ -636,7 +626,7 @@ def test_validate_deps_empty(default_exec_ctx): dependencies.validate_deps({}) -def test_skip_unresolved_deps(make_test, make_exec_ctx): +def test_skip_unresolved_deps(make_exec_ctx): # # t0 t4 # ^ ^ ^ @@ -702,7 +692,7 @@ def assert_topological_order(cases, graph): assert cases_order in valid_orderings -def test_prune_deps(make_test, default_exec_ctx): +def test_prune_deps(default_exec_ctx): # # t0 +-->t5<--+ # ^ | | @@ -757,7 +747,7 @@ def test_prune_deps(make_test, default_exec_ctx): assert len(pruned_deps[node('t0')]) == 0 -def test_toposort(make_test, default_exec_ctx): +def test_toposort(default_exec_ctx): # # t0 +-->t5<--+ # ^ | | @@ -807,7 +797,7 @@ def test_toposort(make_test, default_exec_ctx): assert cases_by_level[4] == {'t4'} -def test_toposort_subgraph(make_test, default_exec_ctx): +def test_toposort_subgraph(default_exec_ctx): # # t0 # ^ diff --git a/unittests/test_fields.py b/unittests/test_fields.py index d33ed0e06f..76b70ff93d 100644 --- a/unittests/test_fields.py +++ b/unittests/test_fields.py @@ -26,6 +26,24 @@ class FieldTester: getattr(c, 'var') +def test_alt_attr_name(): + class FieldTester: + var = fields.Field(attr_name='foo') + + c = FieldTester() + c.var = 5 + + assert c.var == 5 + assert c.foo == 5 + + c.foo = 6 + assert c.var == 6 + assert c.foo == 6 + + assert 'var' not in c.__dict__ + assert 'foo' in c.__dict__ + + def test_constant_field(): class FieldTester: ro = fields.ConstantField('foo') diff --git a/unittests/test_filters.py b/unittests/test_filters.py index 14643340b1..4f7b6bb2b5 100644 --- a/unittests/test_filters.py +++ b/unittests/test_filters.py @@ -5,53 +5,75 @@ import pytest +import reframe as rfm import reframe.core.exceptions as errors import reframe.frontend.executors as executors import reframe.frontend.filters as filters import reframe.utility.sanity as sn +import unittests.utility as test_util def count_checks(filter_fn, checks): return sn.count(filter(filter_fn, checks)) -def make_case(attrs): - class _MyTest: - def __init__(self): - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - - test = _MyTest() - for k, v in attrs.items(): - setattr(test, k, v) - +def make_case(*args, **kwargs): + test = test_util.make_check(*args, **kwargs) return executors.TestCase(test, None, None) @pytest.fixture def sample_cases(): + class _X(rfm.RegressionTest): + valid_systems = ['*'] + valid_prog_environs = ['*'] + return [ - make_case({ - 'name': 'check1', - 'tags': {'a', 'b', 'c', 'd'}, - 'num_gpus_per_node': 1, - 'maintainers': {'A', 'B', 'C', 'D'} - }), - make_case({ - 'name': 'check2', - 'tags': {'x', 'y', 'z'}, - 'num_gpus_per_node': 0, - 'maintainers': {'X', 'Y', 'Z'} - }), - make_case({ - 'name': 'check3', - 'tags': {'a', 'z'}, - 'num_gpus_per_node': 1, - 'maintainers': {'A', 'Z'} - }) + make_case(_X, alt_name='check1', + tags={'a', 'b', 'c', 'd'}, + num_gpus_per_node=1, + maintainers=['A', 'B', 'C', 'D']), + make_case(_X, alt_name='check2', + tags={'x', 'y', 'z'}, + num_gpus_per_node=0, + maintainers=['X', 'Y', 'Z']), + make_case(_X, alt_name='check3', + tags={'a', 'z'}, + num_gpus_per_node=1, + maintainers=['A', 'Z']) ] +@pytest.fixture +def use_compact_names(make_exec_ctx_g): + yield from make_exec_ctx_g(options={'general/compact_test_names': True}) + + +@pytest.fixture +def sample_param_cases(use_compact_names): + class _X(rfm.RegressionTest): + p = parameter([1, 1, 3]) + valid_systems = ['*'] + valid_prog_environs = ['*'] + + return [executors.TestCase(_X(variant_num=v), None, None) + for v in range(_X.num_variants)] + + +@pytest.fixture +def sample_param_cases_compat(): + # Param cases with the old naming scheme; i.e., with + # `general/compact_test_names=False` + + class _X(rfm.RegressionTest): + p = parameter([1, 1, 3]) + valid_systems = ['*'] + valid_prog_environs = ['*'] + + return [executors.TestCase(_X(variant_num=v), None, None) + for v in range(_X.num_variants)] + + def test_have_name(sample_cases): assert 1 == count_checks(filters.have_name('check1'), sample_cases) assert 3 == count_checks(filters.have_name('check'), sample_cases) @@ -62,6 +84,23 @@ def test_have_name(sample_cases): sample_cases) +def test_have_name_param_test(sample_param_cases): + assert 2 == count_checks(filters.have_name('.*%p=1'), sample_param_cases) + assert 1 == count_checks(filters.have_name('_X%p=3'), sample_param_cases) + assert 1 == count_checks(filters.have_name('_X@2'), sample_param_cases) + + +def test_have_name_param_test_compat(sample_param_cases_compat): + assert 0 == count_checks(filters.have_name('.*%p=1'), + sample_param_cases_compat) + assert 0 == count_checks(filters.have_name('_X%p=3'), + sample_param_cases_compat) + assert 0 == count_checks(filters.have_name('_X@2'), + sample_param_cases_compat) + assert 2 == count_checks(filters.have_name('_X_1'), + sample_param_cases_compat) + + def test_have_not_name(sample_cases): assert 2 == count_checks(filters.have_not_name('check1'), sample_cases) assert 1 == count_checks(filters.have_not_name('check1|check3'), diff --git a/unittests/test_fixtures.py b/unittests/test_fixtures.py index 3b399b3158..9019c1fce4 100644 --- a/unittests/test_fixtures.py +++ b/unittests/test_fixtures.py @@ -259,13 +259,14 @@ class Foo(rfm.RegressionTest): def test_fixture_data(): '''Test the structure that holds the raw fixture data in the registry.''' - d = fixtures.FixtureData(1, 2, 3, 4, 5) - assert d.data == (1, 2, 3, 4, 5) + d = fixtures.FixtureData(1, 2, 3, 4, 5, 'foo') + assert d.data == (1, 2, 3, 4, 5, 'foo') assert d.variant_num == 1 assert d.environments == 2 assert d.partitions == 3 assert d.variables == 4 assert d.scope == 5 + assert d.scope_enc == 'foo' @pytest.fixture diff --git a/unittests/test_loader.py b/unittests/test_loader.py index e85c9b5482..aa81836271 100644 --- a/unittests/test_loader.py +++ b/unittests/test_loader.py @@ -123,56 +123,34 @@ def setup(self, partition, environ, **job_opts): @rfm.simple_test class TestSimple(rfm.RegressionTest): - def __init__(self): - pass + pass @rfm.simple_test class TestSpecial(rfm.RegressionTest, special=True): - def __init__(self): - pass - def setup(self, partition, environ, **job_opts): super().setup(partition, environ, **job_opts) @rfm.simple_test class TestSpecialRunOnly(rfm.RunOnlyRegressionTest, special=True): - def __init__(self): - pass - def setup(self, partition, environ, **job_opts): super().setup(partition, environ, **job_opts) - def run(self): - super().run() - @rfm.simple_test class TestSpecialCompileOnly(rfm.CompileOnlyRegressionTest, special=True): - def __init__(self): - pass - def setup(self, partition, environ, **job_opts): super().setup(partition, environ, **job_opts) - def run(self): - super().run() - with pytest.raises(ReframeSyntaxError): @rfm.simple_test class TestSpecialDerived(TestSpecial): - def __init__(self): - pass - def setup(self, partition, environ, **job_opts): super().setup(partition, environ, **job_opts) with pytest.warns(ReframeDeprecationWarning): @rfm.simple_test class TestFinal(rfm.RegressionTest): - def __init__(self): - pass - @rfm.final def my_new_final(self): pass diff --git a/unittests/test_meta.py b/unittests/test_meta.py index 5275401e96..8d35208acd 100644 --- a/unittests/test_meta.py +++ b/unittests/test_meta.py @@ -381,13 +381,13 @@ class Foo(MyMeta): v = variable(int, value=1) assert Foo().v == 1 - assert Foo(variables={'v': 10}).v == 10 + assert Foo(fixt_vars={'v': 10}).v == 10 # Non-variables are silently ignored - assert not hasattr(Foo(variables={'vv': 10}), 'vv') + assert not hasattr(Foo(fixt_vars={'vv': 10}), 'vv') with pytest.raises(TypeError): - Foo(variables='not a mapping') + Foo(fixt_vars='not a mapping') def test_variants(MyMeta): @@ -497,8 +497,14 @@ class Foo(MyMeta): q = parameter(range(10)) variants = Foo.get_variant_nums(p=lambda x: x < 5, q=lambda x: x > 3) - for variant in variants: - assert Foo.get_variant_info(variant)['params']['p'] < 5 - assert Foo.get_variant_info(variant)['params']['q'] > 3 + for v in variants: + assert Foo.get_variant_info(v)['params']['p'] < 5 + assert Foo.get_variant_info(v)['params']['q'] > 3 assert Foo.get_variant_nums() == list(range(Foo.num_variants)) + + # Check condensed syntax + variants = Foo.get_variant_nums(p=5, q=4) + for v in variants: + assert Foo.get_variant_info(v)['params']['p'] == 5 + assert Foo.get_variant_info(v)['params']['q'] == 4 diff --git a/unittests/test_parameters.py b/unittests/test_parameters.py index 928395d23e..5cec18cda0 100644 --- a/unittests/test_parameters.py +++ b/unittests/test_parameters.py @@ -312,7 +312,7 @@ class Foo(rfm.RegressionTest): p = 4 p = parameter([1, 2]) - assert Foo.p == (1, 2,) + assert Foo.p.values == (1, 2,) def test_override_parameter(): @@ -353,7 +353,7 @@ def test_class_attr_access(): class MyTest(rfm.RegressionTest): p = parameter([1, 2, 3]) - assert MyTest.p == (1, 2, 3,) + assert MyTest.p.values == (1, 2, 3,) with pytest.raises(ReframeSyntaxError, match='cannot override parameter'): MyTest.p = (4, 5,) diff --git a/unittests/test_pipeline.py b/unittests/test_pipeline.py index 4dd11dbc9f..d81f42708f 100644 --- a/unittests/test_pipeline.py +++ b/unittests/test_pipeline.py @@ -18,6 +18,7 @@ from reframe.core.exceptions import (BuildError, PipelineError, ReframeError, PerformanceError, SanityError, SkipTestError, ReframeSyntaxError) +from reframe.core.meta import make_test def _run(test, partition, prgenv): @@ -140,22 +141,20 @@ def _container_exec_ctx(platform): def test_eq(): - class T0(rfm.RegressionTest): - def __init__(self): - self.name = 'T0' - - class T1(rfm.RegressionTest): - def __init__(self): - self.name = 'T0' + T0 = make_test('T0', (rfm.RegressionTest,), {}) + T1 = make_test('T1', (rfm.RegressionTest,), {}) + T2 = make_test('T1', (rfm.RegressionTest,), {}) - t0, t1 = T0(), T1() - assert t0 == t1 - assert hash(t0) == hash(t1) - - t1.name = 'T1' + t0, t1, t2 = T0(), T1(), T2() assert t0 != t1 assert hash(t0) != hash(t1) + # T1 and T2 are different classes but have the same name, so the + # corresponding tests should compare equal + assert T1 is not T2 + assert t1 == t2 + assert hash(t1) == hash(t2) + def test_environ_setup(hellotest, local_exec_ctx): # Use test environment for the regression check @@ -201,36 +200,22 @@ def test_hellocheck_build_remotely(hellotest, remote_exec_ctx): assert not hellotest.build_job.scheduler.is_local -def test_hellocheck_local_prepost_run(hellotest, local_exec_ctx): - @sn.deferrable - def stagedir(test): - return test.stagedir - - # Test also the prebuild/postbuild functionality - hellotest.prerun_cmds = ['echo prerun: `pwd`'] - hellotest.postrun_cmds = ['echo postrun: `pwd`'] - pre_run_path = sn.extractsingle(r'^prerun: (\S+)', hellotest.stdout, 1) - post_run_path = sn.extractsingle(r'^postrun: (\S+)', hellotest.stdout, 1) - hellotest.sanity_patterns = sn.all([ - sn.assert_eq(stagedir(hellotest), pre_run_path), - sn.assert_eq(stagedir(hellotest), post_run_path), - ]) - _run(hellotest, *local_exec_ctx) - +def test_hellocheck_local_prepost_run(HelloTest, local_exec_ctx): + class _X(HelloTest): + # Test also the prebuild/postbuild functionality + prerun_cmds = ['echo prerun: `pwd`'] + postrun_cmds = ['echo postrun: `pwd`'] -def test_run_only_sanity(local_exec_ctx): - @test_util.custom_prefix('unittests/resources/checks') - class MyTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.executable = './hello.sh' - self.executable_opts = ['Hello, World!'] - self.local = True - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] - self.sanity_patterns = sn.assert_found( - r'Hello, World\!', self.stdout) + @sanity_function + def validate(self): + pre_path = sn.extractsingle(r'^prerun: (\S+)', self.stdout, 1) + post_path = sn.extractsingle(r'^postrun: (\S+)', self.stdout, 1) + return sn.all([ + sn.assert_eq(self.stagedir, pre_path), + sn.assert_eq(self.stagedir, post_path), + ]) - _run(MyTest(), *local_exec_ctx) + _run(_X(), *local_exec_ctx) def test_run_only_set_sanity_in_a_hook(local_exec_ctx): @@ -245,7 +230,8 @@ class MyTest(rfm.RunOnlyRegressionTest): @run_after('run') def set_sanity(self): self.sanity_patterns = sn.assert_found( - r'Hello, World\!', self.stdout) + r'Hello, World\!', self.stdout + ) _run(MyTest(), *local_exec_ctx) @@ -276,12 +262,10 @@ class MyOtherTest(MyTest): def test_run_only_no_srcdir(local_exec_ctx): @test_util.custom_prefix('foo/bar/') class MyTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.executable = 'echo' - self.executable_opts = ['hello'] - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] - self.sanity_patterns = sn.assert_found(r'hello', self.stdout) + valid_systems = ['*'] + valid_prog_environs = ['*'] + executable = 'echo' + sanity_patterns = sn.assert_true(1) test = MyTest() assert test.sourcesdir is None @@ -314,10 +298,9 @@ class MyTest(rfm.RunOnlyRegressionTest): def test_compile_only_failure(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.CompileOnlyRegressionTest): - def __init__(self): - self.sourcepath = 'compiler_failure.c' - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] + sourcepath = 'compiler_failure.c' + valid_prog_environs = ['*'] + valid_systems = ['*'] test = MyTest() test.setup(*local_exec_ctx) @@ -329,13 +312,18 @@ def __init__(self): def test_compile_only_warning(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.CompileOnlyRegressionTest): - def __init__(self): - self.build_system = 'SingleSource' - self.build_system.srcfile = 'compiler_warning.c' + valid_prog_environs = ['*'] + valid_systems = ['*'] + build_system = 'SingleSource' + sourcepath = 'compiler_warning.c' + + @run_before('compile') + def setup_build(self): self.build_system.cflags = ['-Wall'] - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] - self.sanity_patterns = sn.assert_found(r'warning', self.stderr) + + @sanity_function + def validate(self): + return sn.assert_found(r'warning', self.stderr) _run(MyTest(), *local_exec_ctx) @@ -408,10 +396,9 @@ def test_supports_environ(hellotest, generic_system): def test_sourcesdir_none(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.RegressionTest): - def __init__(self): - self.sourcesdir = None - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] + sourcesdir = None + valid_prog_environs = ['*'] + valid_systems = ['*'] with pytest.raises(ReframeError): _run(MyTest(), *local_exec_ctx) @@ -420,14 +407,15 @@ def __init__(self): def test_sourcesdir_build_system(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.RegressionTest): - def __init__(self): - self.build_system = 'Make' - self.sourcepath = 'code' - self.executable = './code/hello' - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.sanity_patterns = sn.assert_found(r'Hello, World\!', - self.stdout) + build_system = 'Make' + sourcepath = 'code' + executable = './code/hello' + valid_systems = ['*'] + valid_prog_environs = ['*'] + + @sanity_function + def validate(self): + return sn.assert_found(r'Hello, World\!', self.stdout) _run(MyTest(), *local_exec_ctx) @@ -435,18 +423,19 @@ def __init__(self): def test_sourcesdir_none_generated_sources(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.RegressionTest): - def __init__(self): - self.sourcesdir = None - self.prebuild_cmds = [ - "printf '#include \\n int main(){ " - "printf(\"Hello, World!\\\\n\"); return 0; }' > hello.c" - ] - self.executable = './hello' - self.sourcepath = 'hello.c' - self.valid_systems = ['*'] - self.valid_prog_environs = ['*'] - self.sanity_patterns = sn.assert_found(r'Hello, World\!', - self.stdout) + sourcesdir = None + prebuild_cmds = [ + "printf '#include \\n int main(){ " + "printf(\"Hello, World!\\\\n\"); return 0; }' > hello.c" + ] + executable = './hello' + sourcepath = 'hello.c' + valid_systems = ['*'] + valid_prog_environs = ['*'] + + @sanity_function + def validate(self): + return sn.assert_found(r'Hello, World\!', self.stdout) _run(MyTest(), *local_exec_ctx) @@ -454,10 +443,9 @@ def __init__(self): def test_sourcesdir_none_compile_only(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.CompileOnlyRegressionTest): - def __init__(self): - self.sourcesdir = None - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] + sourcesdir = None + valid_prog_environs = ['*'] + valid_systems = ['*'] with pytest.raises(BuildError): _run(MyTest(), *local_exec_ctx) @@ -466,14 +454,15 @@ def __init__(self): def test_sourcesdir_none_run_only(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.sourcesdir = None - self.executable = 'echo' - self.executable_opts = ["Hello, World!"] - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] - self.sanity_patterns = sn.assert_found(r'Hello, World\!', - self.stdout) + sourcesdir = None + executable = 'echo' + executable_opts = ['Hello, World!'] + valid_prog_environs = ['*'] + valid_systems = ['*'] + + @sanity_function + def validate(self): + return sn.assert_found(r'Hello, World\!', self.stdout) _run(MyTest(), *local_exec_ctx) @@ -481,9 +470,8 @@ def __init__(self): def test_sourcepath_abs(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.CompileOnlyRegressionTest): - def __init__(self): - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] + valid_prog_environs = ['*'] + valid_systems = ['*'] test = MyTest() test.setup(*local_exec_ctx) @@ -495,9 +483,8 @@ def __init__(self): def test_sourcepath_upref(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.CompileOnlyRegressionTest): - def __init__(self): - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] + valid_prog_environs = ['*'] + valid_systems = ['*'] test = MyTest() test.setup(*local_exec_ctx) @@ -509,9 +496,8 @@ def __init__(self): def test_sourcepath_non_existent(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.CompileOnlyRegressionTest): - def __init__(self): - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] + valid_prog_environs = ['*'] + valid_systems = ['*'] test = MyTest() test.setup(*local_exec_ctx) @@ -524,11 +510,7 @@ def __init__(self): def test_extra_resources(HelloTest, testsys_system): @test_util.custom_prefix('unittests/resources/checks') class MyTest(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.local = True + local = True @run_after('setup') def set_resources(self): @@ -610,11 +592,7 @@ def __init__(self): def test_setup_hooks(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.count = 0 + count = variable(int, value=0) @run_before('setup') def prefoo(self): @@ -634,11 +612,7 @@ def postfoo(self): def test_compile_hooks(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.count = 0 + count = variable(int, value=0) @run_before('compile') def setflags(self): @@ -659,11 +633,6 @@ def check_executable(self): def test_run_hooks(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - @run_before('run') def setflags(self): self.postrun_cmds = ['echo hello > greetings.txt'] @@ -681,11 +650,7 @@ def check_executable(self): def test_multiple_hooks(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.var = 0 + var = variable(int, value=0) @run_after('setup') def x(self): @@ -707,11 +672,7 @@ def z(self): def test_stacked_hooks(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.var = 0 + var = variable(int, value=0) @run_before('setup') @run_after('setup') @@ -733,11 +694,7 @@ class MyTest(rfm.RunOnlyRegressionTest, HelloTest): def test_inherited_hooks(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class BaseTest(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.var = 0 + var = variable(int, value=0) @run_after('setup') def x(self): @@ -822,11 +779,7 @@ def test_inherited_hooks_order(weird_mro_test, local_exec_ctx): def test_inherited_hooks_from_instantiated_tests(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class T0(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.var = 0 + var = variable(int, value=0) @run_after('setup') def x(self): @@ -851,12 +804,8 @@ def y(self): def test_overriden_hooks(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class BaseTest(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.var = 0 - self.foo = 0 + var = variable(int, value=0) + foo = variable(int, value=0) @run_after('setup') def x(self): @@ -885,12 +834,8 @@ def y(self): def test_disabled_hooks(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class BaseTest(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.var = 0 - self.foo = 0 + var = variable(int, value=0) + foo = variable(int, value=0) @run_after('setup') def x(self): @@ -918,18 +863,12 @@ def test_require_deps(HelloTest, local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class T0(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) - self.x = 1 + x = variable(int, value=1) @test_util.custom_prefix('unittests/resources/checks') class T1(HelloTest): - def __init__(self): - super().__init__() - self.name = type(self).__name__ - self.executable = os.path.join('.', self.name) + @run_after('init') + def setdeps(self): self.depends_on('T0') @require_deps @@ -955,6 +894,9 @@ def setz(self, T0): assert t.z == 3 +# All the following tests about naming are for the deprecated +# @parameterized_test decorator + def test_regression_test_name(): class MyTest(rfm.RegressionTest): def __init__(self, a, b): @@ -963,7 +905,7 @@ def __init__(self, a, b): test = MyTest(1, 2) assert os.path.abspath(os.path.dirname(__file__)) == test.prefix - assert 'test_regression_test_name..MyTest_1_2' == test.name + assert 'MyTest_1_2' == test.name def test_strange_test_names(): @@ -980,8 +922,7 @@ def __init__(self, a, b): self.b = b test = MyTest('(a*b+c)/12', C(33)) - assert ('test_strange_test_names..MyTest__a_b_c__12_C_33_' == - test.name) + assert 'MyTest__a_b_c__12_C_33_' == test.name def test_name_user_inheritance(): @@ -995,7 +936,7 @@ def __init__(self): super().__init__(1, 2) test = MyTest() - assert 'test_name_user_inheritance..MyTest' == test.name + assert 'MyTest' == test.name def test_name_runonly_test(): @@ -1006,7 +947,7 @@ def __init__(self, a, b): test = MyTest(1, 2) assert os.path.abspath(os.path.dirname(__file__)) == test.prefix - assert 'test_name_runonly_test..MyTest_1_2' == test.name + assert 'MyTest_1_2' == test.name def test_name_compileonly_test(): @@ -1017,7 +958,7 @@ def __init__(self, a, b): test = MyTest(1, 2) assert os.path.abspath(os.path.dirname(__file__)) == test.prefix - assert 'test_name_compileonly_test..MyTest_1_2' == test.name + assert 'MyTest_1_2' == test.name def test_trap_job_errors_without_sanity_patterns(local_exec_ctx): @@ -1025,10 +966,9 @@ def test_trap_job_errors_without_sanity_patterns(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] - self.executable = 'exit 10' + valid_prog_environs = ['*'] + valid_systems = ['*'] + executable = 'exit 10' with pytest.raises(SanityError, match='job exited with exit code 10'): _run(MyTest(), *local_exec_ctx) @@ -1039,12 +979,14 @@ def test_trap_job_errors_with_sanity_patterns(local_exec_ctx): @test_util.custom_prefix('unittests/resources/checks') class MyTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] - self.prerun_cmds = ['echo hello'] - self.executable = 'true' - self.sanity_patterns = sn.assert_not_found(r'hello', self.stdout) + valid_prog_environs = ['*'] + valid_systems = ['*'] + prerun_cmds = ['echo hello'] + executable = 'true' + + @sanity_function + def validate(self): + return sn.assert_not_found(r'hello', self.stdout) with pytest.raises(SanityError): _run(MyTest(), *local_exec_ctx) @@ -1074,6 +1016,9 @@ def sanity_file(tmp_path): yield tmp_path / 'sanity.out' +# NOTE: The following series of tests test the `perf_patterns` syntax, so they +# should not change to the `@performance_function` syntax` + @pytest.fixture def dummytest(testsys_system, perf_file, sanity_file): class MyTest(rfm.RunOnlyRegressionTest): @@ -1434,18 +1379,22 @@ def container_test(tmp_path): def _container_test(platform, image): @test_util.custom_prefix(tmp_path) class ContainerTest(rfm.RunOnlyRegressionTest): - def __init__(self): - self.name = 'container_test' - self.valid_prog_environs = ['*'] - self.valid_systems = ['*'] + valid_prog_environs = ['*'] + valid_systems = ['*'] + prerun_cmds = ['touch foo'] + + @run_after('init') + def setup_container_platf(self): self.container_platform = platform self.container_platform.image = image self.container_platform.command = ( f"bash -c 'cd {_STAGEDIR_MOUNT}; pwd; ls; " f"cat /etc/os-release'" ) - self.prerun_cmds = ['touch foo'] - self.sanity_patterns = sn.all([ + + @sanity_function + def assert_os_release(self): + return sn.all([ sn.assert_found(rf'^{_STAGEDIR_MOUNT}', self.stdout), sn.assert_found(r'^foo', self.stdout), sn.assert_found( @@ -1540,3 +1489,63 @@ def access_topo(self): # This test should run to completion without problems _run(EchoTest(), *local_exec_ctx) + + +def test_make_test_without_builtins(local_exec_ctx): + hello_cls = make_test( + 'HelloTest', (rfm.RunOnlyRegressionTest,), + { + 'valid_systems': ['*'], + 'valid_prog_environs': ['*'], + 'executable': 'echo', + 'sanity_patterns': sn.assert_true(1) + } + ) + + assert hello_cls.__name__ == 'HelloTest' + _run(hello_cls(), *local_exec_ctx) + + +def test_make_test_with_builtins(local_exec_ctx): + class _X(rfm.RunOnlyRegressionTest): + valid_systems = ['*'] + valid_prog_environs = ['*'] + executable = 'echo' + message = variable(str) + + @run_before('run') + def set_message(self): + self.executable_opts = [self.message] + + @sanity_function + def validate(self): + return sn.assert_found(self.message, self.stdout) + + hello_cls = make_test('HelloTest', (_X,), {}) + hello_cls.setvar('message', 'hello') + assert hello_cls.__name__ == 'HelloTest' + _run(hello_cls(), *local_exec_ctx) + + +def test_set_name_deprecation(): + from reframe.core.warnings import ReframeDeprecationWarning + + with pytest.warns(ReframeDeprecationWarning): + class _X(rfm.RegressionTest): + @run_after('init') + def set_name(self): + self.name = 'foo' + + x = _X() + + assert x.name == 'foo' + assert x.unique_name == 'foo' + + with pytest.warns(ReframeDeprecationWarning): + class _X(rfm.RegressionTest): + name = 'foo' + + x = _X() + + assert x.name == 'foo' + assert x.unique_name == 'foo' diff --git a/unittests/test_policies.py b/unittests/test_policies.py index d4a70fdd97..66dc4924c2 100644 --- a/unittests/test_policies.py +++ b/unittests/test_policies.py @@ -35,11 +35,9 @@ CompileFailureCheck, KeyboardInterruptCheck, RetriesCheck, - SelfKillCheck, SleepCheck, - SleepCheckPollFail, - SleepCheckPollFailLate, - SystemExitCheck, + SelfKillCheck, + SystemExitCheck ) @@ -80,6 +78,26 @@ def testsys_exec_ctx(make_exec_ctx_g): yield from make_exec_ctx_g(system='testsys:gpu') +def make_kbd_check(phase='wait'): + return test_util.make_check(KeyboardInterruptCheck, phase=phase) + + +@pytest.fixture +def make_sleep_check(): + test_id = 0 + + def _do_make_check(sleep_time, poll_fail=None): + nonlocal test_id + test = test_util.make_check(SleepCheck, + sleep_time=sleep_time, + poll_fail=poll_fail, + alt_name=f'SleepCheck_{test_id}') + test_id += 1 + return test + + return _do_make_check + + @pytest.fixture(params=[policies.SerialExecutionPolicy, policies.AsynchronousExecutionPolicy]) def make_runner(request): @@ -143,7 +161,7 @@ class _T1(rfm.RunOnlyRegressionTest): sanity_patterns = sn.assert_true(1) def __init__(self): - self.depends_on(_T0.__qualname__) + self.depends_on('_T0') cases = executors.generate_testcases([_T0(), _T1()]) depgraph, _ = dependencies.build_deps(cases) @@ -260,13 +278,13 @@ def test_runall(make_runner, make_cases, common_exec_ctx, tmp_path): with pytest.raises(ReframeError, match=r'is not a valid JSON file'): runreport.load_report(tmp_path / 'invalid.json') - # Generate a report with an incorrect data version - report['session_info']['data_version'] = '10.0.0' + # Generate a report that does not comply to the schema + del report['session_info']['data_version'] with open(tmp_path / 'invalid-version.json', 'w') as fp: jsonext.dump(report, fp) with pytest.raises(ReframeError, - match=r'incompatible report data versions'): + match=r'invalid report'): runreport.load_report(tmp_path / 'invalid-version.json') @@ -403,7 +421,7 @@ def test_force_local_execution(make_runner, make_cases, testsys_exec_ctx): def test_kbd_interrupt_within_test(make_runner, make_cases, common_exec_ctx): runner = make_runner() with pytest.raises(KeyboardInterrupt): - runner.runall(make_cases([KeyboardInterruptCheck()])) + runner.runall(make_cases([make_kbd_check()])) stats = runner.stats assert 1 == len(stats.failed()) @@ -447,14 +465,15 @@ def test_pass_in_retries(make_runner, make_cases, tmp_path, common_exec_ctx): tmpfile = tmp_path / 'out.txt' tmpfile.write_text('0\n') runner = make_runner(max_retries=3) - pass_run_no = 2 - runner.runall(make_cases([RetriesCheck(pass_run_no, tmpfile)])) + runner.runall(make_cases([ + test_util.make_check(RetriesCheck, filename=str(tmpfile), num_runs=2) + ])) # Ensure that the test passed after retries in run `pass_run_no` assert 1 == runner.stats.num_cases() assert_runall(runner) assert 1 == len(runner.stats.failed(run=0)) - assert pass_run_no == rt.runtime().current_run + assert 2 == rt.runtime().current_run assert 0 == len(runner.stats.failed()) @@ -603,11 +622,13 @@ def _read_timestamps(tasks): return begin_stamps, end_stamps -def test_concurrency_unlimited(make_async_runner, make_cases, make_exec_ctx): +def test_concurrency_unlimited(make_async_runner, make_cases, + make_sleep_check, make_exec_ctx): num_checks = 3 make_exec_ctx(options=max_jobs_opts(num_checks)) runner, monitor = make_async_runner() - runner.runall(make_cases([SleepCheck(.5) for i in range(num_checks)])) + runner.runall(make_cases([make_sleep_check(.5) + for i in range(num_checks)])) # Ensure that all tests were run and without failures. assert num_checks == runner.stats.num_cases() @@ -628,13 +649,15 @@ def test_concurrency_unlimited(make_async_runner, make_cases, make_exec_ctx): pytest.skip('the system seems too much loaded.') -def test_concurrency_limited(make_async_runner, make_cases, make_exec_ctx): +def test_concurrency_limited(make_async_runner, make_cases, + make_sleep_check, make_exec_ctx): # The number of checks must be <= 2*max_jobs. num_checks, max_jobs = 5, 3 make_exec_ctx(options=max_jobs_opts(max_jobs)) runner, monitor = make_async_runner() - runner.runall(make_cases([SleepCheck(.5) for i in range(num_checks)])) + runner.runall(make_cases([make_sleep_check(.5) + for i in range(num_checks)])) # Ensure that all tests were run and without failures. assert num_checks == runner.stats.num_cases() @@ -655,13 +678,13 @@ def test_concurrency_limited(make_async_runner, make_cases, make_exec_ctx): end_stamps[:-max_jobs])) assert all(begin_after_end) - # NOTE: to ensure that these remaining jobs were also run - # in parallel one could do the command hereafter; however, it would - # require to substantially increase the sleep time (in SleepCheck), - # because of the delays in rescheduling (1s, 2s, 3s, 1s, 2s,...). - # We currently prefer not to do this last concurrency test to avoid an - # important prolongation of the unit test execution time. - # self.assertTrue(self.begin_stamps[-1] < self.end_stamps[max_jobs]) + # NOTE: to ensure that these remaining jobs were also run in parallel one + # could do the command hereafter; however, it would require to + # substantially increase the sleep time, because of the delays in + # rescheduling (1s, 2s, 3s, 1s, 2s,...). We currently prefer not to do + # this last concurrency test to avoid an important prolongation of the + # unit test execution time. self.assertTrue(self.begin_stamps[-1] < + # self.end_stamps[max_jobs]) # Warn if the first #max_jobs jobs were not run in parallel; the # corresponding strict check would be: @@ -670,12 +693,14 @@ def test_concurrency_limited(make_async_runner, make_cases, make_exec_ctx): pytest.skip('the system seems too loaded.') -def test_concurrency_none(make_async_runner, make_cases, make_exec_ctx): +def test_concurrency_none(make_async_runner, make_cases, + make_sleep_check, make_exec_ctx): num_checks = 3 make_exec_ctx(options=max_jobs_opts(1)) runner, monitor = make_async_runner() - runner.runall(make_cases([SleepCheck(.5) for i in range(num_checks)])) + runner.runall(make_cases([make_sleep_check(.5) + for i in range(num_checks)])) # Ensure that all tests were run and without failures. assert num_checks == runner.stats.num_cases() @@ -710,21 +735,22 @@ def assert_interrupted_run(runner): assert t.exc_info[0] == AbortTaskError -def test_kbd_interrupt_in_wait_with_concurrency(make_async_runner, make_cases, - make_exec_ctx): +def test_kbd_interrupt_in_wait_with_concurrency( + make_async_runner, make_cases, make_sleep_check, make_exec_ctx +): make_exec_ctx(options=max_jobs_opts(4)) runner, _ = make_async_runner() with pytest.raises(KeyboardInterrupt): runner.runall(make_cases([ - KeyboardInterruptCheck(), SleepCheck(10), - SleepCheck(10), SleepCheck(10) + make_kbd_check(), make_sleep_check(10), + make_sleep_check(10), make_sleep_check(10) ])) assert_interrupted_run(runner) def test_kbd_interrupt_in_wait_with_limited_concurrency( - make_async_runner, make_cases, make_exec_ctx + make_async_runner, make_cases, make_sleep_check, make_exec_ctx ): # The general idea for this test is to allow enough time for all the # four checks to be submitted and at the same time we need the @@ -735,82 +761,86 @@ def test_kbd_interrupt_in_wait_with_limited_concurrency( runner, _ = make_async_runner() with pytest.raises(KeyboardInterrupt): runner.runall(make_cases([ - KeyboardInterruptCheck(), SleepCheck(10), - SleepCheck(10), SleepCheck(10) + make_kbd_check(), make_sleep_check(10), + make_sleep_check(10), make_sleep_check(10) ])) assert_interrupted_run(runner) -def test_kbd_interrupt_in_setup_with_concurrency(make_async_runner, make_cases, - make_exec_ctx): +def test_kbd_interrupt_in_setup_with_concurrency( + make_async_runner, make_cases, make_sleep_check, make_exec_ctx +): make_exec_ctx(options=max_jobs_opts(4)) runner, _ = make_async_runner() with pytest.raises(KeyboardInterrupt): runner.runall(make_cases([ - SleepCheck(1), SleepCheck(1), SleepCheck(1), - KeyboardInterruptCheck(phase='setup') + make_sleep_check(1), make_sleep_check(1), make_sleep_check(1), + make_kbd_check(phase='setup') ])) assert_interrupted_run(runner) def test_kbd_interrupt_in_setup_with_limited_concurrency( - make_async_runner, make_cases, make_exec_ctx + make_async_runner, make_sleep_check, make_cases, make_exec_ctx ): make_exec_ctx(options=max_jobs_opts(2)) runner, _ = make_async_runner() with pytest.raises(KeyboardInterrupt): runner.runall(make_cases([ - SleepCheck(1), SleepCheck(1), SleepCheck(1), - KeyboardInterruptCheck(phase='setup') + make_sleep_check(1), make_sleep_check(1), make_sleep_check(1), + make_kbd_check(phase='setup') ])) assert_interrupted_run(runner) def test_run_complete_fails_main_loop(make_async_runner, make_cases, - make_exec_ctx): + make_sleep_check, make_exec_ctx): make_exec_ctx(options=max_jobs_opts(1)) runner, _ = make_async_runner() num_checks = 3 - runner.runall(make_cases([SleepCheckPollFail(10), - SleepCheck(0.1), SleepCheckPollFail(10)])) + runner.runall(make_cases([make_sleep_check(10, poll_fail='early'), + make_sleep_check(0.1), + make_sleep_check(10, poll_fail='early')])) assert_runall(runner) stats = runner.stats assert stats.num_cases() == num_checks assert len(stats.failed()) == 2 - # Verify that the succeeded test is the SleepCheck + # Verify that the succeeded test is a SleepCheck for t in stats.tasks(): if not t.failed: - assert isinstance(t.check, SleepCheck) + assert t.check.name.startswith('SleepCheck') def test_run_complete_fails_busy_loop(make_async_runner, make_cases, - make_exec_ctx): + make_sleep_check, make_exec_ctx): make_exec_ctx(options=max_jobs_opts(1)) runner, _ = make_async_runner() num_checks = 3 - runner.runall(make_cases([SleepCheckPollFailLate(1), - SleepCheck(0.1), SleepCheckPollFailLate(0.5)])) + runner.runall(make_cases([make_sleep_check(1, poll_fail='late'), + make_sleep_check(0.1), + make_sleep_check(0.5, poll_fail='late')])) assert_runall(runner) stats = runner.stats assert stats.num_cases() == num_checks assert len(stats.failed()) == 2 - # Verify that the succeeded test is the SleepCheck + # Verify that the succeeded test is a SleepCheck for t in stats.tasks(): if not t.failed: - assert isinstance(t.check, SleepCheck) + assert t.check.name.startswith('SleepCheck') def test_compile_fail_reschedule_main_loop(make_async_runner, make_cases, - make_exec_ctx): + make_sleep_check, make_exec_ctx): make_exec_ctx(options=max_jobs_opts(1)) runner, _ = make_async_runner() num_checks = 2 - runner.runall(make_cases([SleepCheckPollFail(.1), CompileFailureCheck()])) + runner.runall(make_cases([make_sleep_check(.1, poll_fail='early'), + CompileFailureCheck()])) stats = runner.stats assert num_checks == stats.num_cases() @@ -819,12 +849,13 @@ def test_compile_fail_reschedule_main_loop(make_async_runner, make_cases, def test_compile_fail_reschedule_busy_loop(make_async_runner, make_cases, - make_exec_ctx): + make_sleep_check, make_exec_ctx): make_exec_ctx(options=max_jobs_opts(1)) runner, _ = make_async_runner() num_checks = 2 runner.runall( - make_cases([SleepCheckPollFailLate(1.5), CompileFailureCheck()]) + make_cases([make_sleep_check(1.5, poll_fail='late'), + CompileFailureCheck()]) ) stats = runner.stats assert num_checks == stats.num_cases() diff --git a/unittests/test_variables.py b/unittests/test_variables.py index 2a7777578b..3d755aa99c 100644 --- a/unittests/test_variables.py +++ b/unittests/test_variables.py @@ -155,7 +155,8 @@ def test_require_var(OneVarTest): class MyTest(OneVarTest): foo = required - def __init__(self): + @run_after('init') + def print_foo(self): print(self.foo) with pytest.raises(AttributeError): @@ -166,9 +167,6 @@ def test_required_var_not_present(OneVarTest): class MyTest(OneVarTest): foo = required - def __init__(self): - pass - MyTest() @@ -456,3 +454,35 @@ class A(rfm.RegressionTest): assert math.trunc(npi) == -3 assert math.floor(npi) == -4 assert math.ceil(npi) == -3 + + +def test_var_deprecation(): + from reframe.core.variables import DEPRECATE_RD, DEPRECATE_WR + from reframe.core.warnings import ReframeDeprecationWarning + + # Check read deprecation + class A(rfm.RegressionMixin): + x = deprecate(variable(int, value=3), + 'accessing x is deprecated', DEPRECATE_RD) + y = deprecate(variable(int, value=5), + 'setting y is deprecated', DEPRECATE_WR) + + class B(A): + z = variable(int, value=y) + + with pytest.warns(ReframeDeprecationWarning): + class C(A): + w = variable(int, value=x) + + with pytest.warns(ReframeDeprecationWarning): + class D(A): + y = 3 + + # Check that deprecation warnings are raised properly after instantiation + a = A() + with pytest.warns(ReframeDeprecationWarning): + c = a.x + + c = a.y + with pytest.warns(ReframeDeprecationWarning): + a.y = 10 diff --git a/unittests/test_warnings.py b/unittests/test_warnings.py index f571345a98..25a2412d15 100644 --- a/unittests/test_warnings.py +++ b/unittests/test_warnings.py @@ -63,3 +63,11 @@ def test_random_warning_formatting(): 'deprecated', UserWarning, 'file', 10, 'a = 1' ) assert message == 'file:10: UserWarning: deprecated\n a = 1\n' + + +def test_suppress_deprecations(): + with warn.suppress_deprecations(): + warn.user_deprecation_warning('warning 1') + + with pytest.warns(warn.ReframeDeprecationWarning): + warn.user_deprecation_warning('warning 2') diff --git a/unittests/utility.py b/unittests/utility.py index 1384d3aa31..de4ab22f89 100644 --- a/unittests/utility.py +++ b/unittests/utility.py @@ -15,6 +15,7 @@ import reframe.core.config as config import reframe.core.modules as modules import reframe.core.runtime as rt +from reframe.core.meta import make_test TEST_RESOURCES = os.path.join( @@ -159,3 +160,20 @@ def _wrapped(*args, **kwargs): return _wrapped return _dispatch_deco + + +def make_check(cls, *, alt_name=None, **vars): + '''Create a new test from class `cls`. + + :arg cls: the class of the test. + 'arg alt_name: an alternative name to be given to the test class + :arg vars: variables to set in the test upon creation + ''' + + if alt_name: + cls = make_test(alt_name, (cls,), {}) + + for k, v in vars.items(): + cls.setvar(k, v) + + return cls()