Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions pympipool/flux/fluxbroker.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,18 @@


class PyFluxExecutor(ExecutorBase):
"""
Args:
max_workers (int): defines the number workers which can execute functions in parallel
cores_per_worker (int): number of MPI cores to be used for each function call
threads_per_core (int): number of OpenMP threads to be used for each function call
gpus_per_worker (int): number of GPUs per worker - defaults to 0
init_function (None): optional function to preset arguments for functions which are submitted later
cwd (str/None): current working directory where the parallel python task is executed
sleep_interval (float): synchronization interval - default 0.1
executor (flux.job.FluxExecutor): Flux Python interface to submit the workers to flux
"""

def __init__(
self,
max_workers,
Expand Down
13 changes: 13 additions & 0 deletions pympipool/mpi/mpibroker.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,19 @@


class PyMPIExecutor(ExecutorBase):
"""
Args:
max_workers (int): defines the number workers which can execute functions in parallel
cores_per_worker (int): number of MPI cores to be used for each function call
threads_per_core (int): number of OpenMP threads to be used for each function call
gpus_per_worker (int): number of GPUs per worker - defaults to 0
oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI only) - default False
init_function (None): optional function to preset arguments for functions which are submitted later
cwd (str/None): current working directory where the parallel python task is executed
sleep_interval (float): synchronization interval - default 0.1
enable_slurm_backend (bool): enable the SLURM queueing system as backend - defaults to False
"""

def __init__(
self,
max_workers,
Expand Down