diff --git a/dask_jobqueue/oar.py b/dask_jobqueue/oar.py index 1d5defc8..43222cf6 100644 --- a/dask_jobqueue/oar.py +++ b/dask_jobqueue/oar.py @@ -71,7 +71,7 @@ def __init__( logger.debug("Job script: \n %s" % self.job_script()) - def _submit_job(self, fn): + async def _submit_job(self, fn): # OAR specificity: the submission script needs to exist on the worker # when the job starts on the worker. This is different from other # schedulers that only need the script on the submission node at diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst index 38a4a1cc..28b3b956 100644 --- a/docs/source/changelog.rst +++ b/docs/source/changelog.rst @@ -1,8 +1,8 @@ Changelog ========= -Devlopment version ------------------- +Development version +------------------- - ``LSFCluster``: add ``use_stdin`` to ``LSFCluster``. This switches between ``bsub < job_script`` and ``bsub job_script`` to launch a ``LSF`` job @@ -13,6 +13,7 @@ Devlopment version ``processes ~= sqrt(cores)`` so that the number of processes and the number of threads per process is roughly the same. Old default was to use one process and only threads, i.e. ``proccesses=1``, ``threads_per_process=cores``. +- fix bug (forgotten async def) in ``OARCluster._submit_job`` (:pr:`380`). 0.7.0 / 2019-10-09 ------------------