Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement HPC execution #192

Merged
merged 13 commits into from
Dec 10, 2021
66 changes: 66 additions & 0 deletions examples/docking-protein-protein/docking-protein-protein-hpc.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
# ====================================================================
# Rigid-body docking example

# directory in which the scoring will be done
run_dir = "run1"

# ###
mode = 'hpc'
# concatenate models inside each job, concat = 5 each .job will produce 5 models
concat = 1
# Limit the number of concurrent submissions to the queue
queue_limit = 100
# cns_exec = "path/to/bin/cns" # optional
# ###

# molecules to be docked
molecules = [
"data/e2aP_1F3G.pdb",
"data/hpr_ensemble.pdb"
]

# ====================================================================
# Parameters for each stage are defined below, prefer full paths
#####################################################################
# WARNING: THE PARAMETERS HERE ARE ILLUSTRATIVE
# THE WORKFLOW IS WORK-IN-PROGRESS
#####################################################################
[topoaa]
autohis = false
[topoaa.input.mol1]
nhisd = 0
nhise = 1
hise_1 = 75
[topoaa.input.mol2]
nhisd = 1
hisd_1 = 76
nhise = 1
hise_1 = 15

[rigidbody]
ambig_fname = 'data/e2a-hpr_air.tbl'
sampling = 1000
noecv = true

[caprieval]
reference = 'data/e2a-hpr_1GGR.pdb'

[seletop]
select = 200

[flexref]
ambig_fname = 'data/e2a-hpr_air.tbl'
noecv = true

[caprieval]
reference = 'data/e2a-hpr_1GGR.pdb'

[emref]
ambig_fname = 'data/e2a-hpr_air.tbl'
noecv = true

[caprieval]
reference = 'data/e2a-hpr_1GGR.pdb'

# ====================================================================

94 changes: 3 additions & 91 deletions src/haddock/clis/cli_bm.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
from pathlib import Path

from haddock import log
from haddock.libs.libhpc import create_job_header_funcs


# first character allowed for benchmark test cases, we use digits and
Expand Down Expand Up @@ -312,88 +313,6 @@ def create_job(
return job_header + job_body + job_tail


def create_torque_header(
job_name,
work_dir,
stdout_path,
stderr_path,
queue='medium',
ncores=48,
):
"""
Create HADDOCK3 Alcazar job file.

Parameters
----------
job_name : str
The name of the job.

work_dir : pathlib.Path
The working dir of the example. That is, the directory where
`input`, `jobs`, and `logs` reside. Injected in `create_job_header`.

**job_params
According to `job_setup`.

Return
------
str
Torque-based job file for HADDOCK3 benchmarking.
"""
header = \
f"""#!/usr/bin/env tcsh
#PBS -N {job_name}
#PBS -q {queue}
#PBS -l nodes=1:ppn={str(ncores)}
#PBS -S /bin/tcsh
#PBS -o {stdout_path}
#PBS -e {stderr_path}
#PBS -wd {work_dir}
"""
return header


def create_slurm_header(
job_name,
work_dir,
stdout_path,
stderr_path,
queue='medium',
ncores=48,
):
"""
Create HADDOCK3 Slurm Batch job file.

Parameters
----------
job_name : str
The name of the job.

work_dir : pathlib.Path
The working dir of the example. That is, the directory where
`input`, `jobs`, and `logs` reside. Injected in `create_job_header`.

**job_params
According to `job_setup`.

Return
------
str
Slurm-based job file for HADDOCK3 benchmarking.
"""
header = \
f"""#!/usr/bin/env bash
#SBATCH -J {job_name}
#SBATCH -p {queue}
#SBATCH --nodes=1
#SBATCH --tasks-per-node={str(ncores)}
#SBATCH --output={stdout_path}
#SBATCH --error={stderr_path}
#SBATCH --workdir={work_dir}
"""
return header


def setup_haddock3_job(available_flag, running_flag, conf_f):
"""
Write body for the job script.
Expand Down Expand Up @@ -567,8 +486,8 @@ def make_daemon_job(
):
"""Make a daemon-ready job."""
job_header = create_job_func(
job_name,
workdir,
job_name=job_name,
work_dir=workdir,
stdout_path=stdout_path,
stderr_path=stderr_path,
queue=queue,
Expand All @@ -589,13 +508,6 @@ def make_daemon_job(

# helper dictionaries

# the different job submission queues
create_job_header_funcs = {
'torque': create_torque_header,
'slurm': create_slurm_header,
}


# the different scenarios covered
benchmark_scenarios = {
'true-interface': create_cfg_scn_1,
Expand Down
Loading