-
Notifications
You must be signed in to change notification settings - Fork 5
/
osu_latency.slurm.template
33 lines (26 loc) · 1.37 KB
/
osu_latency.slurm.template
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
#!/bin/bash
# Slurm job options (name, compute nodes, job time)
#SBATCH --job-name=[* ENTER YOUR JOB NAME HERE *]
#SBATCH --time=0:10:0
#SBATCH --nodes=2
#SBATCH --tasks-per-node=1
#SBATCH --cpus-per-task=1
# Replace [budget code] below with your budget code (e.g. t01)
#SBATCH --account=[* ENTER YOUR BUDGET CODE HERE *]
# We use the "standard" partition as we are running on CPU nodes
#SBATCH --partition=standard
# We use the "standard" QoS as our runtime is less than 4 days
#SBATCH --qos=standard
# Setup the job environment (this module needs to be loaded before any other modules)
module load epcc-job-env
# Set the number of threads to 1
# This prevents any threaded system libraries from automatically
# using threading.
export OMP_NUM_THREADS=1
# Set the LD_LIBRARY_PATH environment variable within the Singularity container
# to ensure that it used the correct MPI libraries
export SINGULARITYENV_LD_LIBRARY_PATH=/opt/cray/pe/mpich/8.0.16/ofi/gnu/9.1/lib-abi-mpich:/usr/lib/x86_64-linux-gnu/libibverbs:/opt/cray/pe/pmi/6.0.7/lib:/opt/cray/libfabric/1.11.0.0.233/lib64:/usr/lib64/host:/.singularity.d/libs
# Set the options for the Singularity executable
singopts="-B /opt/cray,/usr/lib64:/usr/lib64/host,/usr/lib64/tcl,/var/spool/slurmd/mpi_cray_shasta"
# Launch the parallel job
srun --cpu-bind=cores singularity run $singopts [* YOUR SIF IMAGE NAME *] collective/osu_gather