Skip to content

Commit

Permalink
Add max_workers parameter for backwards compatiblity
Browse files Browse the repository at this point in the history
  • Loading branch information
jan-janssen committed May 7, 2024
1 parent 0e758f1 commit ccd526a
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 7 deletions.
8 changes: 8 additions & 0 deletions pympipool/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@ class Executor:
an interactive Jupyter notebook.
Args:
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
cores which can be used in parallel - just like the max_cores parameter. Using max_cores is
recommended, as computers have a limited number of compute cores.
max_cores (int): defines the number cores which can be used in parallel
cores_per_worker (int): number of MPI cores to be used for each function call
threads_per_core (int): number of OpenMP threads to be used for each function call
Expand Down Expand Up @@ -64,6 +67,7 @@ class Executor:

def __init__(
self,
max_workers: int = 1,
max_cores: int = 1,
cores_per_worker: int = 1,
threads_per_core: int = 1,
Expand All @@ -84,6 +88,7 @@ def __init__(

def __new__(
cls,
max_workers: int = 1,
max_cores: int = 1,
cores_per_worker: int = 1,
threads_per_core: int = 1,
Expand All @@ -108,6 +113,9 @@ def __new__(
requires the SLURM workload manager to be installed on the system.
Args:
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the
number of cores which can be used in parallel - just like the max_cores parameter. Using
max_cores is recommended, as computers have a limited number of compute cores.
max_cores (int): defines the number cores which can be used in parallel
cores_per_worker (int): number of MPI cores to be used for each function call
threads_per_core (int): number of OpenMP threads to be used for each function call
Expand Down
17 changes: 11 additions & 6 deletions pympipool/scheduler/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
check_executor,
check_backend,
check_init_function,
validate_number_of_cores,
)
from pympipool.scheduler.slurm import (
PySlurmExecutor,
Expand All @@ -36,6 +37,7 @@


def create_executor(
max_workers: int = 1,
max_cores: int = 1,
cores_per_worker: int = 1,
threads_per_core: int = 1,
Expand All @@ -58,19 +60,21 @@ def create_executor(
requires the SLURM workload manager to be installed on the system.
Args:
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
cores which can be used in parallel - just like the max_cores parameter. Using max_cores is
recommended, as computers have a limited number of compute cores.
max_cores (int): defines the number cores which can be used in parallel
cores_per_worker (int): number of MPI cores to be used for each function call
threads_per_core (int): number of OpenMP threads to be used for each function call
gpus_per_worker (int): number of GPUs per worker - defaults to 0
oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI and SLURM only) - default False
cwd (str/None): current working directory where the parallel python task is executed
hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the
context of an HPC cluster this essential to be able to communicate to an
Executor running on a different compute node within the same allocation. And
in principle any computer should be able to resolve that their own hostname
points to the same address as localhost. Still MacOS >= 12 seems to disable
this look up for security reasons. So on MacOS it is required to set this
option to true
context of an HPC cluster this essential to be able to communicate to an Executor
running on a different compute node within the same allocation. And in principle
any computer should be able to resolve that their own hostname points to the same
address as localhost. Still MacOS >= 12 seems to disable this look up for security
reasons. So on MacOS it is required to set this option to true
backend (str): Switch between the different backends "flux", "mpi" or "slurm". Alternatively, when "auto"
is selected (the default) the available backend is determined automatically.
block_allocation (boolean): To accelerate the submission of a series of python functions with the same
Expand All @@ -81,6 +85,7 @@ def create_executor(
command_line_argument_lst (list): Additional command line arguments for the srun call (SLURM only)
"""
max_cores = validate_number_of_cores(max_cores=max_cores, max_workers=max_workers)
check_init_function(block_allocation=block_allocation, init_function=init_function)
check_backend(backend=backend)
if backend == "flux" or (backend == "auto" and flux_installed):
Expand Down
7 changes: 7 additions & 0 deletions pympipool/shared/inputcheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,3 +78,10 @@ def check_backend(backend):
def check_init_function(block_allocation, init_function):
if not block_allocation and init_function is not None:
raise ValueError("")


def validate_number_of_cores(max_cores, max_workers):
# only overwrite max_cores when it is set to 1
if max_workers != 1 and max_cores == 1:
max_cores = max_workers
return max_cores
2 changes: 1 addition & 1 deletion tests/test_executor_backend_mpi.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def test_meta_executor_single(self):

def test_meta_executor_parallel(self):
with Executor(
max_cores=2,
max_workers=2,
cores_per_worker=2,
hostname_localhost=True,
backend="mpi",
Expand Down

0 comments on commit ccd526a

Please sign in to comment.