diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..50e2935246 --- /dev/null +++ b/.gitignore @@ -0,0 +1,92 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# IPython Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# dotenv +.env + +# virtualenv +venv/ +ENV/ + +# Spyder project settings +.spyderproject + +# Rope project settings +.ropeproject + +# TextMATE files +._* diff --git a/README.md b/README.md index 021df5122c..80a5469189 100644 --- a/README.md +++ b/README.md @@ -14,4 +14,4 @@ Users can create their own test hierarchies, create test factories for generatin ## Documentation -The official documentation is maintaned [here](https://eth-cscs.github.io/reframe). +The official documentation is maintaned [here](https://madra.cscs.ch/scs/reframe/wikis/home). diff --git a/ci-scripts/ci-runner.bash b/ci-scripts/ci-runner.bash index 4ae9f0d989..18c912d367 100644 --- a/ci-scripts/ci-runner.bash +++ b/ci-scripts/ci-runner.bash @@ -19,6 +19,20 @@ MODULEUSE="" CI_EXITCODE=0 +swap_files() +{ + if [ $# -lt 2 ]; then + echo "too few arguments to swap_files()" 1>&2 + exit 1 + fi + + tmp="${1}_save" + cp $1 $tmp + cp $2 $1 + cp $tmp $2 + /bin/rm $tmp +} + usage() { cat < $log; -# make the newly created $log file group readable and writeable - chmod g+rw $log; -elif [ ! -r "$log" ]; then -# echo an error if $log file is not group readable and exit - echo -e "$machine $cdate $SLURM_JOB_ID \t ERROR: log file $log is not group readable!" >> /dev/stderr; - exit 1; -elif [ ! -w "$log" ]; then -# echo an error if $log file is not group writeable and exit - echo -e "$machine $cdate $SLURM_JOB_ID \t ERROR: log file $log is not group writeable!" >> /dev/stderr; - exit 1; -fi -# reference performance file -reffile="$SLURM_SUBMIT_DIR/$TESTNAME.ref"; -# SLURM variables -SLURM_NTASKS=$[$SLURM_NTASKS_PER_NODE*$SLURM_JOB_NUM_NODES]; -echo -e "\nRunning on $machine (job ID $SLURM_JOB_ID)"; -echo -e "Log file : $log"; -echo -e "Output folder : $outdir"; -echo -e "Running folder: $scrdir"; -echo -e "$SLURM_JOB_NUM_NODES nodes, $SLURM_NTASKS_PER_NODE tasks per node ($SLURM_TASKS_PER_NODE), $SLURM_CPUS_PER_TASK cpus per task, $SLURM_NTASKS total tasks\n"; -#======================================== -# RUN AND CHECK (APPLICATION SPECIFIC) -#======================================== -# copy files to $scrdir and change directory -cp INCAR KPOINTS POSCAR POTCAR $scrdir; -cd $scrdir -# OpenMP threads -export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK; -# set stack size unlimited -ulimit -s unlimited; -# load modules and run simulation -module load slurm; -module load VASP; -EXE="vasp_std"; -srun -n $SLURM_NTASKS --ntasks-per-node=$SLURM_NTASKS_PER_NODE -c $SLURM_CPUS_PER_TASK $EXE; -#======================================== -# check output file -check=$(grep "aborting loop because EDIFF is reached" "$out" | awk '{print $7}'); -if [[ "$check" != "reached" ]]; then - echo -e "$machine $timestamp $SLURM_JOB_ID \t Job output NOT OK: test FAILED!" >> "$log"; - exit 1; -fi -#======================================== -# summary of results: new time (s) -new=$(grep "Total CPU time used" "$out" | awk '{print $6}'); -# reference time (s), performance difference (%) -if [ -f "$reffile" ]; then - ref=$(grep -v '#' $reffile | grep $machine | tail -1 | sed 's/Time\=//' | awk '{printf"%.3lf\n", $(NF-1)}'); - if [ -n "$ref" ]; then - delta=$(printf '%.3lf\n' $(echo "(1-$new/$ref)*100" | bc -l)); - else - delta=0; - fi -else - delta=0; -fi -# print results on log file -echo -e "$machine $cdate $SLURM_JOB_ID \t Time=$new Difference=$delta%" >> "$log"; -#======================================== -# COPY OUTPUT (GENERAL) -#======================================== -# copy output and log files to outdir -store="$SLURM_JOB_NAME-$SLURM_JOB_ID.out"; -chmod g+rw $out; -cp -p $out $outdir/$store; -exit 0; -#======================================== diff --git a/examples/apps/vasp/src/gpu/9062-vasp_gpu-dom.sbatch b/examples/apps/vasp/src/gpu/9062-vasp_gpu-dom.sbatch deleted file mode 100644 index 5699911bca..0000000000 --- a/examples/apps/vasp/src/gpu/9062-vasp_gpu-dom.sbatch +++ /dev/null @@ -1,134 +0,0 @@ -#!/bin/bash -l -# -#======================================== -# DESCRIPTION (APPLICATION SPECIFIC) -#======================================== -# The script runs the CeO2 test of Peter Larsson's VASP test suite -# It can be launched by the following scripts: -# - run_regression.sh -# - 9000-scientific_applications_launcher.sh -# - only if $USER has write permissions in $SLURM_SUBMIT_DIR: "sbatch $TESTID-$TESTNAME-$machine.sbatch" -# The test will run on the $SCRATCH of the $USER that runs this script -# The output and log file will be saved ... -# -# Input files: -# INCAR -# KPOINTS -# POSCAR -# POTCAR -# -# Output files: -# $outdir/$TESTID-$TESTNAME-$SLURM_JOB_ID.out -# -# Log and reference file for perfomance: -# $logdir/$TESTID-$TESTNAME.log -# $TESTNAME.ref -#======================================== -# SLURM VARIABLES (APPLICATION SPECIFIC) -#======================================== -#SBATCH --job-name=9062-vasp_gpu -#SBATCH --time=00:05:00 -#SBATCH --nodes=16 -#SBATCH --ntasks-per-core=1 -#SBATCH --ntasks-per-node=1 -#SBATCH --cpus-per-task=1 -#SBATCH --gres=gpu -#=================================== -# SETUP (GENERAL) -#=================================== -TESTID="${SLURM_JOB_NAME:0:4}"; -TESTNAME="${SLURM_JOB_NAME:5}"; -echo -e "\n Test $TESTID: $TESTNAME"; -# check if outdir is defined -if [ -z $logdir ]; then - logdir=$SLURM_SUBMIT_DIR; -fi -# check if outdir is defined -if [ -z $outdir ]; then - outdir=$SLURM_SUBMIT_DIR; -fi -# check if scrdir is defined -if [ -z $scrdir ]; then - cdate=$(date "+%d-%m-%Y_%H-%M-%S"); - scrdir=$SCRATCH/$cdate/$TESTID; - mkdir -p $scrdir; -fi -# check if machine is defined -if [ -z $machine ]; then - machine=$(echo $HOSTNAME | sed 's/[0-9]*//g'); -fi -# output file -out="OUTCAR"; -# logfile with history of performance -log="$logdir/$TESTID-$TESTNAME.log"; -# check if logfile exists and if it is group readable and writeable -if [ ! -f "$log" ]; then - echo "# wall-time, performance (day/ns) and % performance difference on $SLURM_JOB_NUM_NODES nodes, $SLURM_NTASKS_PER_NODE MPI tasks per node" > $log; -# make the newly created $log file group readable and writeable - chmod g+rw $log; -elif [ ! -r "$log" ]; then -# echo an error if $log file is not group readable and exit - echo -e "$machine $cdate $SLURM_JOB_ID \t ERROR: log file $log is not group readable!" >> /dev/stderr; - exit 1; -elif [ ! -w "$log" ]; then -# echo an error if $log file is not group writeable and exit - echo -e "$machine $cdate $SLURM_JOB_ID \t ERROR: log file $log is not group writeable!" >> /dev/stderr; - exit 1; -fi -# reference performance file -reffile="$SLURM_SUBMIT_DIR/$TESTNAME.ref"; -# SLURM variables -SLURM_NTASKS=$[$SLURM_NTASKS_PER_NODE*$SLURM_JOB_NUM_NODES]; -echo -e "\nRunning on $machine (job ID $SLURM_JOB_ID)"; -echo -e "Log file : $log"; -echo -e "Output folder : $outdir"; -echo -e "Running folder: $scrdir"; -echo -e "$SLURM_JOB_NUM_NODES nodes, $SLURM_NTASKS_PER_NODE tasks per node ($SLURM_TASKS_PER_NODE), $SLURM_CPUS_PER_TASK cpus per task, $SLURM_NTASKS total tasks\n"; -#======================================== -# RUN AND CHECK (APPLICATION SPECIFIC) -#======================================== -# copy files to $scrdir and change directory -cp INCAR KPOINTS POSCAR POTCAR $scrdir; -cd $scrdir -# OpenMP threads -export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK; -# set stack size unlimited -ulimit -s unlimited; -# load modules and run simulation -module load slurm; -module use /apps/dom/UES/6.0.UP02/sandbox-lm/easybuild/modules/all; -module load VASP/5.4.1-CrayIntel-2016.11-cuda-8.0; -EXE="vasp_gpu"; -srun -n $SLURM_NTASKS --ntasks-per-node=$SLURM_NTASKS_PER_NODE -c $SLURM_CPUS_PER_TASK $EXE; -#======================================== -# check output file -check=$(grep "aborting loop because EDIFF is reached" "$out" | awk '{print $7}'); -if [[ "$check" != "reached" ]]; then - echo -e "$machine $timestamp $SLURM_JOB_ID \t Job output NOT OK: test FAILED!" >> "$log"; - exit 1; -fi -#======================================== -# summary of results: new time (s) -new=$(grep "Total CPU time used" "$out" | awk '{print $6}'); -# reference time (s), performance difference (%) -if [ -f "$reffile" ]; then - ref=$(grep -v '#' $reffile | grep $machine | tail -1 | sed 's/Time\=//' | awk '{printf"%.3lf\n", $(NF-1)}'); - if [ -n "$ref" ]; then - delta=$(printf '%.3lf\n' $(echo "(1-$new/$ref)*100" | bc -l)); - else - delta=0; - fi -else - delta=0; -fi -# print results on log file -echo -e "$machine $cdate $SLURM_JOB_ID \t Time=$new Difference=$delta%" >> "$log"; -#======================================== -# COPY OUTPUT (GENERAL) -#======================================== -# copy output and log files to outdir -store="$SLURM_JOB_NAME-$SLURM_JOB_ID.out"; -chmod g+rw $out; -cp -p $out $outdir/$store; -exit 0; -#======================================== diff --git a/reframe/core/environments.py b/reframe/core/environments.py index 5393a8cd6b..77fa29dd30 100644 --- a/reframe/core/environments.py +++ b/reframe/core/environments.py @@ -128,7 +128,7 @@ def __str__(self): return \ 'Name: %s\n' % self.name + \ 'Modules: %s\n' % str(self.modules) + \ - 'Environment: %s\n' % str(self.variables) + 'Environment: %s' % str(self.variables) def swap_environments(src, dst): @@ -171,11 +171,11 @@ def __init__(self, cc = 'cc', cxx = 'CC', ftn = 'ftn', - cppflags = '', - cflags = '', - cxxflags = '', - fflags = '', - ldflags = '', + cppflags = None, + cflags = None, + cxxflags = None, + fflags = None, + ldflags = None, **kwargs): super().__init__(name, modules, variables) self.cc = cc @@ -187,6 +187,8 @@ def __init__(self, self.fflags = fflags self.ldflags = ldflags self.include_search_path = [] + self.propagate = True + def guess_language(self, filename): ext = filename.split('.')[-1] @@ -222,28 +224,34 @@ def _compile_file(self, source_file, executable, lang, options): if not lang: lang = self.guess_language(source_file) - flags = self.cppflags + # Replace None's with empty strings + cppflags = self.cppflags if self.cppflags else '' + cflags = self.cflags if self.cflags else '' + cxxflags = self.cxxflags if self.cxxflags else '' + fflags = self.fflags if self.fflags else '' + ldflags = self.ldflags if self.ldflags else '' + + flags = [ cppflags ] if lang == 'C': compiler = self.cc - flags += ' ' + self.cflags + flags.append(cflags) elif lang == 'C++': compiler = self.cxx - flags += ' ' + self.cxxflags + flags.append(cxxflags) elif lang == 'Fortran': compiler = self.ftn - flags += ' ' + self.fflags + flags.append(fflags) elif lang == 'CUDA': compiler = 'nvcc' - flags += ' ' + self.cxxflags + flags.append(cxxflags) else: raise ReframeError('Unknown language') - # append include search path - for d in self.include_search_path: - flags += ' -I%s' % d - + # Append include search path + flags += [ '-I' + d for d in self.include_search_path ] cmd = '%s %s %s -o %s %s %s' % \ - (compiler, flags, source_file, executable, self.ldflags, options) + (compiler, ' '.join(flags), source_file, + executable, ldflags, options) try: return os_ext.run_command(cmd, check=True) except CommandError as e: @@ -256,20 +264,34 @@ def _compile_file(self, source_file, executable, lang, options): def _compile_dir(self, source_dir, makefile, options): if makefile: - cmd = 'make -C %s -f %s %s' % (source_dir, makefile, options) + cmd = 'make -C %s -f %s %s ' % (source_dir, makefile, options) else: - cmd = 'make -C %s %s' % (source_dir, options) - - # pass a set of predefined options to the Makefile - # Naming convetion for implicit make variables - cmd = cmd + \ - " CC='%s'" % self.cc + \ - " CXX='%s'" % self.cxx + \ - " FC='%s'" % self.ftn + \ - " CFLAGS='%s'" % self.cflags + \ - " CXXFLAGS='%s'" % self.cxxflags + \ - " FFLAGS='%s'" % self.fflags + \ - " LDFLAGS='%s'" % self.ldflags + cmd = 'make -C %s %s ' % (source_dir, options) + + # Pass a set of predefined options to the Makefile + if self.propagate: + flags = [ "CC='%s'" % self.cc, + "CXX='%s'" % self.cxx, + "FC='%s'" % self.ftn ] + + # Explicitly check against None here; the user may explicitly want + # to clear the flags + if self.cppflags != None: + flags.append("CPPFLAGS='%s'" % self.cppflags) + + if self.cflags != None: + flags.append("CFLAGS='%s'" % self.cflags) + + if self.cxxflags != None: + flags.append("CXXFLAGS='%s'" % self.cxxflags) + + if self.fflags != None: + flags.append("FFLAGS='%s'" % self.fflags) + + if self.ldflags != None: + flags.append("LDFLAGS='%s'" % self.ldflags) + + cmd += ' '.join(flags) try: return os_ext.run_command(cmd, check=True) diff --git a/reframe/core/modules.py b/reframe/core/modules.py index 4bfc846b3e..d1a6666c2e 100644 --- a/reframe/core/modules.py +++ b/reframe/core/modules.py @@ -14,8 +14,7 @@ class Module: """Module wrapper. We basically need it for defining operators for use in standard Python - algorithms. - """ + algorithms.""" def __init__(self, name): if not name: raise ModuleError('no module name specified') @@ -58,15 +57,14 @@ def module_equal(rhs, lhs): def module_list(): try: - return os.environ['LOADEDMODULES'].split(':') + # LOADEDMODULES may be defined but empty + return [ m for m in os.environ['LOADEDMODULES'].split(':') if m ] except KeyError: return [] def module_conflict_list(name): - """ - Return the list of conflicted packages - """ + """Return the list of conflicted packages""" conflict_list = [] completed = os_ext.run_command( cmd = '%s show %s' % (reframe.MODULECMD_PYTHON, name)) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 61ff549e5d..f7809d4e95 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -33,7 +33,7 @@ class RegressionTest: descr = StringField('descr') sourcepath = StringField('sourcepath') prebuild_cmd = TypedListField('prebuild_cmd', str) - postbuild_cmd = TypedListField('prebuild_cmd', str) + postbuild_cmd = TypedListField('postbuild_cmd', str) executable = StringField('executable') executable_opts = TypedListField('executable_opts', str) current_system = TypedField('current_system', System) @@ -42,6 +42,7 @@ class RegressionTest: current_environ = TypedField('current_environ', Environment, allow_none=True) keep_files = TypedListField('keep_files', str) + readonly_files = TypedListField('readonly_files', str) tags = TypedSetField('tags', str) maintainers = TypedListField('maintainers', str) strict_check = BooleanField('strict_check') @@ -85,6 +86,7 @@ def __init__(self, name, prefix, system, resources): self.job = None self.job_resources = {} self.keep_files = [] + self.readonly_files = [] self.tags = set() self.maintainers = [] @@ -303,7 +305,7 @@ def setup(self, system, environ, **job_opts): def _copy_to_stagedir(self, path): - os_ext.copytree(path, self.stagedir) + os_ext.copytree_virtual(path, self.stagedir, self.readonly_files) def prebuild(self): @@ -372,26 +374,19 @@ def wait(self): def check_sanity(self): - # Check explicitly against None; otherwise the if will be triggered also - # on empty sanity_patterns - if self.sanity_patterns == None: - return False - return self._match_patterns(self.sanity_patterns, None) - def check_performance(self): - # Check explicitly against None; otherwise the if will be triggered also - # on empty perf_patterns - if self.perf_patterns == None: - return True - - # We don't want to skip performance check (because of logging) in case - # strict_check is False - ret = self._match_patterns(self.perf_patterns, self.reference) + def check_performance_relaxed(self): + """Implements the relaxed performance check logic.""" + ret = self.check_performance() return ret if self.strict_check else True + def check_performance(self): + return self._match_patterns(self.perf_patterns, self.reference) + + def cleanup(self, remove_files=False, unload_env=True): # Copy stdout/stderr and job script shutil.copy(self.stdout, self.outputdir) @@ -456,8 +451,8 @@ def _resolve_tag(tag): def _match_patterns(self, multi_patterns, reference): - if not len(multi_patterns): - return False + if not multi_patterns: + return True for file_patt, patterns in multi_patterns.items(): if file_patt == '-' or file_patt == '&1': diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index 899d0a1706..4601de5a94 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -42,13 +42,12 @@ def run_checks_partition(checks, options, partition, printer, stats): # Sandbox variables passed to setup sandbox = Sandbox() - sandbox.system = partition # Prepare for running the tests environ_save = EnvironmentSnapshot() for check in checks: if not options.skip_system_check and \ - not check.supports_system(sandbox.system.name): + not check.supports_system(partition.name): printer.print_unformatted( 'Skipping unsupported test %s...' % check.name) continue @@ -60,8 +59,9 @@ def run_checks_partition(checks, options, partition, printer, stats): if not options.relax_performance_check: check.strict_check = True - for env in sandbox.system.environs: - # Add current environment to the sandbox + for env in partition.environs: + # Add current partition and environment to the sandbox + sandbox.system = partition sandbox.environ = env try: if not options.skip_prgenv_check and \ @@ -101,9 +101,10 @@ def run_checks_partition(checks, options, partition, printer, stats): success = False if not options.skip_performance_check and \ - not printer.print_check_progress('Verifying performance', - check.check_performance, - expected_ret=True): + not printer.print_check_progress( + 'Verifying performance', + check.check_performance_relaxed, + expected_ret=True): if check._logfile: printer.print_unformatted( 'Check log file: %s' % check._logfile @@ -393,10 +394,11 @@ def main(): # Print command line print('Command line:', ' '.join(sys.argv)) + print('Reframe version: ' + settings.version) # Print important paths - print('Regression paths') - print('================') + print('Reframe paths') + print('=============') print(' Check prefix :', loader.prefix) print('%03s Check search path :' % ('(R)' if loader.recurse else ''), "'%s'" % ':'.join(loader.load_path)) diff --git a/reframe/settings.py b/reframe/settings.py index 8405c2f44e..1ed680b8a7 100644 --- a/reframe/settings.py +++ b/reframe/settings.py @@ -8,7 +8,7 @@ from reframe.core.fields import ReadOnlyField class RegressionSettings: - version = ReadOnlyField('2.2') + version = ReadOnlyField('2.3') module_name = ReadOnlyField('PyRegression') job_state_poll_intervals = ReadOnlyField([ 1, 2, 3 ]) job_init_poll_intervals = ReadOnlyField([ 1 ]) diff --git a/reframe/utility/os.py b/reframe/utility/os.py index a7e74727e7..7582b1dc80 100644 --- a/reframe/utility/os.py +++ b/reframe/utility/os.py @@ -51,7 +51,8 @@ def grep_command_output(cmd, pattern, where = 'stdout'): return False -def run_command_async(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1): +def run_command_async(cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, bufsize=1): return subprocess.Popen(args=shlex.split(cmd), stdout=stdout, stderr=stderr, @@ -61,10 +62,10 @@ def run_command_async(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsi def copytree(src, dst, symlinks=False, ignore=None, copy_function=shutil.copy2, ignore_dangling_symlinks=False): - """ - Same as shutil.copytree() but valid also if 'dst' exists, in which case it - will first remove it and then call the standard shutil.copytree() - """ + """Same as shutil.copytree() but valid also if 'dst' exists. + + In this case it will first remove it and then call the standard + shutil.copytree().""" if os.path.exists(dst): shutil.rmtree(dst) @@ -72,17 +73,67 @@ def copytree(src, dst, symlinks=False, ignore=None, copy_function=shutil.copy2, ignore_dangling_symlinks) +def copytree_virtual(src, dst, file_links=[], + symlinks=False, copy_function=shutil.copy2, + ignore_dangling_symlinks=False): + """Copy `dst` to `src`, but create symlinks for the files in `file_links`. + + If `file_links` is empty, this is equivalent to `copytree()`. The rest of + the arguments are passed as-is to `copytree()`. Paths in `file_links` must + be relative to `src`. If you try to pass `.` in `file_links`, `OSError` will + be raised.""" + + # Work with absolute paths + src = os.path.abspath(src) + dst = os.path.abspath(dst) + + # 1. Check that the link targets are valid + # 2. Convert link targes to absolute paths + # 3. Store them in a set for quick look up inside the ignore function + link_targets = set() + for f in file_links: + if os.path.isabs(f): + raise ReframeError( + "copytree_virtual() failed: `%s': " + "absolute paths not allowed in file_links" % f) + + target = os.path.join(src, f) + if not os.path.exists(target): + raise ReframeError( + "copytree_virtual() failed: `%s' does not exist" % target) + + if os.path.commonpath([src, target]) != src: + raise ReframeError( + "copytree_virtual() failed: " + "`%s' not under `%s'" % (target, src)) + + link_targets.add(os.path.abspath(target)) + + + if not file_links: + ignore = None + else: + ignore = lambda dir, contents: \ + [ c for c in contents if os.path.join(dir, c) in link_targets ] + + # Copy to dst ignoring the file_links + copytree(src, dst, symlinks, ignore, copy_function, ignore_dangling_symlinks) + + # Now create the symlinks + for f in link_targets: + link_name = f.replace(src, dst) + os.symlink(f, link_name) + + def inpath(entry, pathvar): """Check if entry is in pathvar. pathvar is a string of the form - 'entry1:entry2:entry3' - """ + `entry1:entry2:entry3`.""" return entry in set(pathvar.split(':')) def subdirs(dirname, recurse=False): """Returns a list of dirname + its subdirectories. If recurse is True, - recursion is performed in pre-order. - """ + recursion is performed in pre-order.""" dirs = [] if os.path.isdir(dirname): dirs.append(dirname) diff --git a/test_reframe.py b/test_reframe.py index 9c5bc40c44..3fa45b4d11 100755 --- a/test_reframe.py +++ b/test_reframe.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import nose diff --git a/unittests/fixtures.py b/unittests/fixtures.py index 4e06b58eef..00eb805d74 100644 --- a/unittests/fixtures.py +++ b/unittests/fixtures.py @@ -87,16 +87,25 @@ def guess_system(): return autodetect_system(site_config) -def system_with_scheduler(sched_type): +# FIXME: This may conflict in the unlikely situation that a user defines a +# system named `kesch` with a partition named `pn`. +def system_with_scheduler(sched_type, skip_partitions = [ 'kesch:pn' ]): """Retrieve a partition from the current system with a specific scheduler. - If sched_type == None, the first partition encountered with a non-local - scheduler will be returned.""" + If `sched_type == None`, the first partition with a non-local scheduler will + be returned. + + Partitions in `skip_partitions` will be skipped from searching. Items of + `skip_partitions` are of the form `:`.""" system = guess_system() if not system: return None for p in system.partitions: + canon_name = '%s:%s' % (system.name, p) + if canon_name in skip_partitions: + continue + if sched_type == None and p.scheduler != 'local': return p diff --git a/unittests/modules/testmod_base b/unittests/modules/testmod_base new file mode 100644 index 0000000000..3cf9bb4eda --- /dev/null +++ b/unittests/modules/testmod_base @@ -0,0 +1,8 @@ +#%Module +proc ModulesHelp { } { + Helper module for ReFrame unit tests +} + +module-whatis { Helper module for ReFrame unit tests } + +setenv TESTMOD_BASE "BASE" diff --git a/unittests/test_cli.py b/unittests/test_cli.py index 627048fb97..4b476132e3 100644 --- a/unittests/test_cli.py +++ b/unittests/test_cli.py @@ -9,7 +9,7 @@ import reframe.utility.os as os_ext from reframe.frontend.loader import SiteConfiguration, autodetect_system -from unittests.fixtures import system_with_scheduler +from unittests.fixtures import guess_system, system_with_scheduler class TestFrontend(unittest.TestCase): def setUp(self): @@ -17,7 +17,7 @@ def setUp(self): self.stagedir = os.path.join(self.prefix, 'stage') self.outputdir = os.path.join(self.prefix, 'output') self.logdir = os.path.join(self.prefix, 'logs') - self.python = 'python' + self.python = 'python3' self.executable = 'reframe.py' self.sysopt = 'generic:login' self.checkfile = 'unittests/resources/hellocheck.py' @@ -88,11 +88,14 @@ def test_check_success(self): 'job submission not supported') def test_check_submit_success(self): # This test will run on the auto-detected system + system = guess_system() + partition = system_with_scheduler(None) + self.local = False - self.sysopt = None + self.sysopt = '%s:%s' % (system.name, partition.name) # pick up the programming environment of the partition - self.prgenv = system_with_scheduler(None).environs[0].name + self.prgenv = partition.environs[0].name command = os_ext.run_command(self._invocation_cmd(), check=True) self.assertNotIn('FAILED', command.stdout) diff --git a/unittests/test_core.py b/unittests/test_core.py index ec271ce843..4227f0f4e6 100644 --- a/unittests/test_core.py +++ b/unittests/test_core.py @@ -33,6 +33,13 @@ def assertModulesNotLoaded(self, modules): def setUp(self): module_path_add([TEST_MODULES]) + + # Always add a base module; this is a workaround for the modules + # environment's inconsistent behaviour, that starts with an empty + # LOADEDMODULES variable and ends up removing it completely if all + # present modules are removed. + module_load('testmod_base') + os.environ['_fookey1'] = 'origfoo' self.environ_save = EnvironmentSnapshot() self.environ = Environment(name='TestEnv1', modules=['testmod_foo']) @@ -293,5 +300,6 @@ def test_module_force_load(self): def test_module_purge(self): + module_load('testmod_base') module_purge() self.assertNotIn('LOADEDMODULES', os.environ) diff --git a/unittests/test_pipeline.py b/unittests/test_pipeline.py index 7b96496fbc..17f91de895 100644 --- a/unittests/test_pipeline.py +++ b/unittests/test_pipeline.py @@ -312,6 +312,14 @@ def test_success(self): self.assertTrue(self.test.check_performance()) + def test_empty_file(self): + self.output_file.close() + self.test.sanity_patterns = { + self.output_file.name : { '.*' : [] } + } + self.assertFalse(self.test.check_sanity()) + + def test_sanity_failure(self): self.output_file.write('result = failure\n') self.output_file.close() @@ -425,7 +433,7 @@ def test_nostrict_performance_check(self): performance2=2.7, performance3=3.2) self.test.strict_check = False - self.assertTrue(self.test.check_performance()) + self.assertTrue(self.test.check_performance_relaxed()) def test_invalid_threshold(self): @@ -738,19 +746,16 @@ def match_eof(self, **kwargs): self.test.sanity_patterns[self.output_file.name].keys()) - def test_patterns_empty(self): self.test.perf_patterns = {} self.test.sanity_patterns = {} - self.assertFalse(self.test.check_sanity()) - self.assertFalse(self.test.check_performance()) - + self.assertTrue(self.test.check_sanity()) + self.assertTrue(self.test.check_performance()) - def test_patterns_empty(self): - self.test.perf_patterns = {} - self.test.sanity_patterns = {} - self.assertFalse(self.test.check_sanity()) - self.assertFalse(self.test.check_performance()) + self.test.sanity_patterns = None + self.test.perf_patterns = None + self.assertTrue(self.test.check_sanity()) + self.assertTrue(self.test.check_performance()) def test_file_not_found(self): diff --git a/unittests/test_utility.py b/unittests/test_utility.py index f1fcc9e57b..8e881ba334 100644 --- a/unittests/test_utility.py +++ b/unittests/test_utility.py @@ -89,8 +89,8 @@ def test_subdirs(self): os.makedirs(os.path.join(prefix, 'loo', 'bar'), exist_ok=True) # Try to fool the algorithm by adding normal files - os.mknod(os.path.join(prefix, 'foo', 'bar', 'file.txt'), stat.S_IFREG) - os.mknod(os.path.join(prefix, 'loo', 'file.txt'), stat.S_IFREG) + open(os.path.join(prefix, 'foo', 'bar', 'file.txt'), 'w').close() + open(os.path.join(prefix, 'loo', 'file.txt'), 'w').close() expected_subdirs = { prefix, os.path.join(prefix, 'foo'), @@ -108,6 +108,98 @@ def test_subdirs(self): shutil.rmtree(prefix) +class TestCopyTree(unittest.TestCase): + def setUp(self): + # Create a test directory structure + # + # prefix/ + # bar/ + # bar.txt + # foo.txt + # foobar.txt + # foo/ + # bar.txt + # bar.txt + # foo.txt + # + self.prefix = os.path.abspath(tempfile.mkdtemp()) + self.target = os.path.abspath(tempfile.mkdtemp()) + os.makedirs(os.path.join(self.prefix, 'bar'), exist_ok=True) + os.makedirs(os.path.join(self.prefix, 'foo'), exist_ok=True) + open(os.path.join(self.prefix, 'bar', 'bar.txt'), 'w').close() + open(os.path.join(self.prefix, 'bar', 'foo.txt'), 'w').close() + open(os.path.join(self.prefix, 'bar', 'foobar.txt'), 'w').close() + open(os.path.join(self.prefix, 'foo', 'bar.txt'), 'w').close() + open(os.path.join(self.prefix, 'bar.txt'), 'w').close() + open(os.path.join(self.prefix, 'foo.txt'), 'w').close() + + + def verify_target_directory(self, file_links = []): + """Verify the directory structure""" + self.assertTrue( + os.path.exists(os.path.join(self.target, 'bar', 'bar.txt'))) + self.assertTrue( + os.path.exists(os.path.join(self.target, 'bar', 'foo.txt'))) + self.assertTrue( + os.path.exists(os.path.join(self.target, 'bar', 'foobar.txt'))) + self.assertTrue( + os.path.exists(os.path.join(self.target, 'foo', 'bar.txt'))) + self.assertTrue(os.path.exists(os.path.join(self.target, 'bar.txt'))) + self.assertTrue(os.path.exists(os.path.join(self.target, 'foo.txt'))) + + # Verify the symlinks + for lf in file_links: + target_name = os.path.abspath(os.path.join(self.prefix, lf)) + link_name = os.path.abspath(os.path.join(self.target, lf)) + self.assertTrue(os.path.islink(link_name)) + self.assertEqual(target_name, os.readlink(link_name)) + + + def test_virtual_copy_nolinks(self): + os_ext.copytree_virtual(self.prefix, self.target) + self.verify_target_directory() + + + def test_virtual_copy_valid_links(self): + file_links = [ 'bar/', 'foo/bar.txt', 'foo.txt' ] + os_ext.copytree_virtual(self.prefix, self.target, file_links) + self.verify_target_directory(file_links) + + + def test_virtual_copy_inexistent_links(self): + file_links = [ 'foobar/', 'foo/bar.txt', 'foo.txt' ] + self.assertRaises(ReframeError, os_ext.copytree_virtual, + self.prefix, self.target, file_links) + + + def test_virtual_copy_absolute_paths(self): + file_links = [ os.path.join(self.prefix, 'bar'), + 'foo/bar.txt', 'foo.txt' ] + self.assertRaises(ReframeError, os_ext.copytree_virtual, + self.prefix, self.target, file_links) + + + def test_virtual_copy_irrelevenant_paths(self): + file_links = [ '/bin', 'foo/bar.txt', 'foo.txt' ] + self.assertRaises(ReframeError, os_ext.copytree_virtual, + self.prefix, self.target, file_links) + + file_links = [ os.path.dirname(self.prefix), 'foo/bar.txt', 'foo.txt' ] + self.assertRaises(ReframeError, os_ext.copytree_virtual, + self.prefix, self.target, file_links) + + + def test_virtual_copy_linkself(self): + file_links = [ '.' ] + self.assertRaises(OSError, os_ext.copytree_virtual, + self.prefix, self.target, file_links) + + + def tearDown(self): + shutil.rmtree(self.prefix) + shutil.rmtree(self.target) + + class TestUtilityFunctions(unittest.TestCase): def test_standard_threshold(self): self.assertTrue(standard_threshold(0.9, (1.0, -0.2, 0.2)))