diff --git a/pympipool/__init__.py b/pympipool/__init__.py index b78b7d4c..479fcda1 100644 --- a/pympipool/__init__.py +++ b/pympipool/__init__.py @@ -32,6 +32,7 @@ class Executor: gpus_per_worker (int): number of GPUs per worker - defaults to 0 oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI and SLURM only) - default False cwd (str/None): current working directory where the parallel python task is executed + executor (flux.job.FluxExecutor): Flux Python interface to submit the workers to flux hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the context of an HPC cluster this essential to be able to communicate to an Executor running on a different compute node within the same allocation. And @@ -126,6 +127,7 @@ def __new__( gpus_per_worker (int): number of GPUs per worker - defaults to 0 oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI and SLURM only) - default False cwd (str/None): current working directory where the parallel python task is executed + executor (flux.job.FluxExecutor): Flux Python interface to submit the workers to flux hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the context of an HPC cluster this essential to be able to communicate to an Executor running on a different compute node within the same allocation. And diff --git a/pympipool/scheduler/flux.py b/pympipool/scheduler/flux.py index 32389de4..6980c544 100644 --- a/pympipool/scheduler/flux.py +++ b/pympipool/scheduler/flux.py @@ -39,7 +39,7 @@ class PyFluxExecutor(ExecutorBroker): Examples: >>> import numpy as np - >>> from pympipool.flux import PyFluxExecutor + >>> from pympipool.scheduler.flux import PyFluxExecutor >>> >>> def calc(i, j, k): >>> from mpi4py import MPI @@ -104,7 +104,6 @@ class PyFluxStepExecutor(ExecutorSteps): cores_per_worker (int): number of MPI cores to be used for each function call threads_per_core (int): number of OpenMP threads to be used for each function call gpus_per_worker (int): number of GPUs per worker - defaults to 0 - init_function (None): optional function to preset arguments for functions which are submitted later cwd (str/None): current working directory where the parallel python task is executed executor (flux.job.FluxExecutor): Flux Python interface to submit the workers to flux hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the @@ -118,7 +117,7 @@ class PyFluxStepExecutor(ExecutorSteps): Examples: >>> import numpy as np - >>> from pympipool.flux import PyFluxStepExecutor + >>> from pympipool.scheduler.flux import PyFluxStepExecutor >>> >>> def calc(i, j, k): >>> from mpi4py import MPI diff --git a/pympipool/scheduler/mpi.py b/pympipool/scheduler/mpi.py index 0cbec602..44570cac 100644 --- a/pympipool/scheduler/mpi.py +++ b/pympipool/scheduler/mpi.py @@ -35,7 +35,7 @@ class PyMPIExecutor(ExecutorBroker): Examples: >>> import numpy as np - >>> from pympipool.mpi import PyMPIExecutor + >>> from pympipool.scheduler.mpi import PyMPIExecutor >>> >>> def calc(i, j, k): >>> from mpi4py import MPI @@ -108,7 +108,7 @@ class PyMPIStepExecutor(ExecutorSteps): Examples: >>> import numpy as np - >>> from pympipool.mpi import PyMPIStepExecutor + >>> from pympipool.scheduler.mpi import PyMPIStepExecutor >>> >>> def calc(i, j, k): >>> from mpi4py import MPI diff --git a/pympipool/scheduler/slurm.py b/pympipool/scheduler/slurm.py index 4cddacea..1f3dc79c 100644 --- a/pympipool/scheduler/slurm.py +++ b/pympipool/scheduler/slurm.py @@ -36,7 +36,7 @@ class PySlurmExecutor(ExecutorBroker): Examples: >>> import numpy as np - >>> from pympipool.slurm import PySlurmExecutor + >>> from pympipool.scheduler.slurm import PySlurmExecutor >>> >>> def calc(i, j, k): >>> from mpi4py import MPI @@ -117,7 +117,7 @@ class PySlurmStepExecutor(ExecutorSteps): Examples: >>> import numpy as np - >>> from pympipool.slurm import PySlurmStepExecutor + >>> from pympipool.scheduler.slurm import PySlurmStepExecutor >>> >>> def calc(i, j, k): >>> from mpi4py import MPI