Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[draft] Drop Python 3.7 and allow tests to work in isolation #565

Draft
wants to merge 7 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion ci/environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ channels:
- conda-forge
- defaults
dependencies:
- python=3.7
- python=3.8
- dask
- distributed
- flake8
Expand Down
2 changes: 1 addition & 1 deletion ci/pbs/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ RUN curl -o miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-L
bash miniconda.sh -f -b -p /opt/anaconda && \
/opt/anaconda/bin/conda clean -tipy && \
rm -f miniconda.sh
RUN conda install --yes -c conda-forge python=3.7 dask distributed flake8 pytest pytest-asyncio
RUN conda install --yes -c conda-forge python=3.8 dask distributed flake8 pytest pytest-asyncio

# Copy entrypoint and other needed scripts
COPY ./*.sh /
Expand Down
6 changes: 3 additions & 3 deletions ci/sge/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ services:
context: .
target: master
args:
PYTHON_VERSION: 3.7
PYTHON_VERSION: 3.8
container_name: sge_master
hostname: sge_master
#network_mode: host
Expand All @@ -22,7 +22,7 @@ services:
context: .
target: slave
args:
PYTHON_VERSION: 3.7
PYTHON_VERSION: 3.8
container_name: slave_one
hostname: slave_one
#network_mode: host
Expand All @@ -40,7 +40,7 @@ services:
context: .
target: slave
args:
PYTHON_VERSION: 3.7
PYTHON_VERSION: 3.8
container_name: slave_two
hostname: slave_two
#network_mode: host
Expand Down
2 changes: 1 addition & 1 deletion ci/slurm/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ RUN curl -o miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-L
/opt/anaconda/bin/conda clean -tipy && \
rm -f miniconda.sh
ENV PATH /opt/anaconda/bin:$PATH
RUN conda install --yes -c conda-forge python=3.7 dask distributed flake8 pytest pytest-asyncio
RUN conda install --yes -c conda-forge python=3.8 dask distributed flake8 pytest pytest-asyncio

ENV LC_ALL en_US.UTF-8

Expand Down
26 changes: 19 additions & 7 deletions conftest.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
# content of conftest.py

# Make loop fixture available in all tests
from distributed.utils_test import loop # noqa: F401
from distributed.utils_test import loop, clean # noqa: F401

import pytest

import dask_jobqueue.config
import dask_jobqueue.lsf
import dask

Expand Down Expand Up @@ -89,21 +90,32 @@ def mock_lsf_version(monkeypatch, request):
}


@pytest.fixture
def cleanup():
with clean():
dask_jobqueue.config.reconfigure()
yield


@pytest.fixture(
params=[pytest.param(v, marks=[pytest.mark.env(k)]) for (k, v) in all_envs.items()]
)
def EnvSpecificCluster(request):
def EnvSpecificCluster(request, cleanup):
"""Run test only with the specific cluster class set by the environment"""
if request.param == HTCondorCluster:
# HTCondor requires explicitly specifying requested disk space
dask.config.set({"jobqueue.htcondor.disk": "1GB"})
return request.param
with dask.config.set({"jobqueue.htcondor.disk": "1GB"}):
yield request.param
else:
yield request.param


@pytest.fixture(params=list(all_envs.values()))
def Cluster(request):
def Cluster(request, cleanup):
"""Run test for each cluster class when no environment is set (test should not require the actual scheduler)"""
if request.param == HTCondorCluster:
# HTCondor requires explicitly specifying requested disk space
dask.config.set({"jobqueue.htcondor.disk": "1GB"})
return request.param
with dask.config.set({"jobqueue.htcondor.disk": "1GB"}):
yield request.param
else:
yield request.param
15 changes: 10 additions & 5 deletions dask_jobqueue/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,15 @@
import dask
import yaml

fn = os.path.join(os.path.dirname(__file__), "jobqueue.yaml")
dask.config.ensure_file(source=fn)

with open(fn) as f:
defaults = yaml.safe_load(f)
def reconfigure():
fn = os.path.join(os.path.dirname(__file__), "jobqueue.yaml")
dask.config.ensure_file(source=fn)

dask.config.update(dask.config.config, defaults, priority="old")
with open(fn) as f:
defaults = yaml.safe_load(f)

dask.config.update(dask.config.config, defaults, priority="old")


reconfigure()
24 changes: 22 additions & 2 deletions dask_jobqueue/core.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from contextlib import contextmanager, suppress
from contextlib import closing, contextmanager, suppress
import asyncio
import logging
import math
import os
Expand All @@ -22,6 +23,18 @@
from distributed.scheduler import Scheduler
from distributed.security import Security


def _maybe_get_running_loop():
try:
return asyncio.get_running_loop()
except RuntimeError:
pass


async def _acall(fn):
return fn()


logger = logging.getLogger(__name__)

job_parameters = """
Expand Down Expand Up @@ -557,7 +570,14 @@ def __init__(
"-" + str(i) for i in range(self._job_kwargs["processes"])
]

self._dummy_job # trigger property to ensure that the job is valid
def _trigger_property():
self._dummy_job # trigger property to ensure that the job is valid

if _maybe_get_running_loop() is not None:
_trigger_property()
else:
with closing(asyncio.new_event_loop()) as loop:
loop.run_until_complete(_acall(_trigger_property))

super().__init__(
scheduler=scheduler,
Expand Down
2 changes: 1 addition & 1 deletion docs/environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ name: dask-jobqueue-docs
channels:
- conda-forge
dependencies:
- python=3.7
- python=3.8
- distributed
- numpydoc
- ipython
Expand Down
2 changes: 1 addition & 1 deletion docs/source/debug.rst
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ the right module just before:
distributed.nanny - INFO - Worker closed

This happens when you created the cluster using a different python than the one
you want to use for your workers (here ``module load python/3.7.5``), giving
you want to use for your workers (here ``module load python/3.8.5``), giving
the following job script (pay attention to the last line which will show which
``python`` is used):

Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
cmdclass=versioneer.get_cmdclass(),
description="Deploy Dask on job queuing systems like PBS, Slurm, SGE or LSF",
url="https://jobqueue.dask.org",
python_requires=">=3.7",
python_requires=">=3.8",
license="BSD 3-Clause",
packages=["dask_jobqueue"],
include_package_data=True,
Expand Down