Skip to content

Commit

Permalink
Merge pull request #340 from pyiron/docstring_fixes
Browse files Browse the repository at this point in the history
Docstring fixes
  • Loading branch information
jan-janssen authored May 29, 2024
2 parents 213017c + da7a792 commit 62fae9c
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 7 deletions.
2 changes: 2 additions & 0 deletions pympipool/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ class Executor:
gpus_per_worker (int): number of GPUs per worker - defaults to 0
oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI and SLURM only) - default False
cwd (str/None): current working directory where the parallel python task is executed
executor (flux.job.FluxExecutor): Flux Python interface to submit the workers to flux
hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the
context of an HPC cluster this essential to be able to communicate to an
Executor running on a different compute node within the same allocation. And
Expand Down Expand Up @@ -126,6 +127,7 @@ def __new__(
gpus_per_worker (int): number of GPUs per worker - defaults to 0
oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI and SLURM only) - default False
cwd (str/None): current working directory where the parallel python task is executed
executor (flux.job.FluxExecutor): Flux Python interface to submit the workers to flux
hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the
context of an HPC cluster this essential to be able to communicate to an
Executor running on a different compute node within the same allocation. And
Expand Down
5 changes: 2 additions & 3 deletions pympipool/scheduler/flux.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class PyFluxExecutor(ExecutorBroker):
Examples:
>>> import numpy as np
>>> from pympipool.flux import PyFluxExecutor
>>> from pympipool.scheduler.flux import PyFluxExecutor
>>>
>>> def calc(i, j, k):
>>> from mpi4py import MPI
Expand Down Expand Up @@ -104,7 +104,6 @@ class PyFluxStepExecutor(ExecutorSteps):
cores_per_worker (int): number of MPI cores to be used for each function call
threads_per_core (int): number of OpenMP threads to be used for each function call
gpus_per_worker (int): number of GPUs per worker - defaults to 0
init_function (None): optional function to preset arguments for functions which are submitted later
cwd (str/None): current working directory where the parallel python task is executed
executor (flux.job.FluxExecutor): Flux Python interface to submit the workers to flux
hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the
Expand All @@ -118,7 +117,7 @@ class PyFluxStepExecutor(ExecutorSteps):
Examples:
>>> import numpy as np
>>> from pympipool.flux import PyFluxStepExecutor
>>> from pympipool.scheduler.flux import PyFluxStepExecutor
>>>
>>> def calc(i, j, k):
>>> from mpi4py import MPI
Expand Down
4 changes: 2 additions & 2 deletions pympipool/scheduler/mpi.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class PyMPIExecutor(ExecutorBroker):
Examples:
>>> import numpy as np
>>> from pympipool.mpi import PyMPIExecutor
>>> from pympipool.scheduler.mpi import PyMPIExecutor
>>>
>>> def calc(i, j, k):
>>> from mpi4py import MPI
Expand Down Expand Up @@ -108,7 +108,7 @@ class PyMPIStepExecutor(ExecutorSteps):
Examples:
>>> import numpy as np
>>> from pympipool.mpi import PyMPIStepExecutor
>>> from pympipool.scheduler.mpi import PyMPIStepExecutor
>>>
>>> def calc(i, j, k):
>>> from mpi4py import MPI
Expand Down
4 changes: 2 additions & 2 deletions pympipool/scheduler/slurm.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class PySlurmExecutor(ExecutorBroker):
Examples:
>>> import numpy as np
>>> from pympipool.slurm import PySlurmExecutor
>>> from pympipool.scheduler.slurm import PySlurmExecutor
>>>
>>> def calc(i, j, k):
>>> from mpi4py import MPI
Expand Down Expand Up @@ -117,7 +117,7 @@ class PySlurmStepExecutor(ExecutorSteps):
Examples:
>>> import numpy as np
>>> from pympipool.slurm import PySlurmStepExecutor
>>> from pympipool.scheduler.slurm import PySlurmStepExecutor
>>>
>>> def calc(i, j, k):
>>> from mpi4py import MPI
Expand Down

0 comments on commit 62fae9c

Please sign in to comment.