-
Notifications
You must be signed in to change notification settings - Fork 18
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Added hybrid MPI OpenMP job script example.
- Loading branch information
Showing
2 changed files
with
70 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,52 @@ | ||
#!/bin/bash | ||
|
||
####################################### | ||
# example for a hybrid MPI OpenMP job # | ||
####################################### | ||
|
||
#SBATCH --job-name=example | ||
|
||
# we ask for 2 MPI tasks with 20 cores each | ||
#SBATCH --ntasks=2 | ||
#SBATCH --cpus-per-task=20 | ||
|
||
# run for five minutes | ||
# d-hh:mm:ss | ||
#SBATCH --time=0-00:05:00 | ||
|
||
# short partition should do it | ||
#SBATCH --partition short | ||
|
||
# 500MB memory per core | ||
# this is a hard limit | ||
#SBATCH --mem-per-cpu=500MB | ||
|
||
# turn on all mail notification | ||
#SBATCH --mail-type=ALL | ||
|
||
# you may not place bash commands before the last SBATCH directive | ||
|
||
# define and create a unique scratch directory | ||
SCRATCH_DIRECTORY=/global/work/${USER}/example/${SLURM_JOBID} | ||
mkdir -p ${SCRATCH_DIRECTORY} | ||
cd ${SCRATCH_DIRECTORY} | ||
|
||
# we copy everything we need to the scratch directory | ||
# ${SLURM_SUBMIT_DIR} points to the path where this script was submitted from | ||
cp ${SLURM_SUBMIT_DIR}/my_binary.x ${SCRATCH_DIRECTORY} | ||
|
||
# we set OMP_NUM_THREADS to the number cpu cores per MPI task | ||
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK} | ||
|
||
# we execute the job and time it | ||
time ./my_binary.x > my_output | ||
|
||
# after the job is done we copy our output back to $SLURM_SUBMIT_DIR | ||
cp ${SCRATCH_DIRECTORY}/my_output ${SLURM_SUBMIT_DIR} | ||
|
||
# we step out of the scratch directory and remove it | ||
cd ${SLURM_SUBMIT_DIR} | ||
rm -rf ${SCRATCH_DIRECTORY} | ||
|
||
# happy end | ||
exit 0 |