Skip to content

Commit

Permalink
Add deploy-mode to configuration (#61)
Browse files Browse the repository at this point in the history
* Add deploy-mode to configuration

Allow setting the default deploy mode in configuration

* Update travis config

* Bump timeouts for tests
  • Loading branch information
jcrist committed Apr 22, 2019
1 parent 68858c3 commit 56801c9
Show file tree
Hide file tree
Showing 4 changed files with 17 additions and 11 deletions.
8 changes: 4 additions & 4 deletions .travis.yml
Expand Up @@ -20,10 +20,10 @@ jobs:

include:
- env:
- CLUSTER_TYPE=kerberos
- CLUSTER_CONFIG=kerberos
- PYTHON=3.6
- env:
- CLUSTER_TYPE=base
- CLUSTER_CONFIG=simple
- PYTHON=2.7

before_install:
Expand All @@ -35,7 +35,7 @@ before_install:
# Install the test cluster
- pip install git+https://github.com/jcrist/hadoop-test-cluster.git
# Start the test cluster
- htcluster startup --image $CLUSTER_TYPE:latest --mount .:dask-yarn
- htcluster startup --mount .:dask-yarn --config $CLUSTER_CONFIG

install:
- htcluster exec -- ./dask-yarn/continuous_integration/travis/install.sh $PYTHON
Expand All @@ -44,7 +44,7 @@ script:
- set -e
- export CONDA_ENV=/home/testuser/miniconda/envs/test-environment
- |
if [[ "$CLUSTER_TYPE" == "kerberos" ]]; then
if [[ "$CLUSTER_CONFIG" == "kerberos" ]]; then
htcluster exec -- kinit testuser -kt testuser.keytab
fi
- htcluster exec -- ./dask-yarn/continuous_integration/travis/run-tests.sh
Expand Down
2 changes: 1 addition & 1 deletion dask_yarn/core.py
Expand Up @@ -94,7 +94,7 @@ def _make_specification(**kwargs):
return skein.ApplicationSpec.from_dict(spec)
return skein.ApplicationSpec.from_file(spec)

deploy_mode = kwargs.get('deploy_mode') or 'remote'
deploy_mode = lookup(kwargs, 'deploy_mode', 'yarn.deploy-mode')
if deploy_mode not in {'remote', 'local'}:
raise ValueError("`deploy_mode` must be one of {'remote', 'local'}, "
"got %r" % deploy_mode)
Expand Down
17 changes: 11 additions & 6 deletions dask_yarn/tests/test_core.py
Expand Up @@ -46,15 +46,15 @@ def test_basic(deploy_mode, skein_client, conda_env):
start = time.time()
while len(cluster.workers()) != 2:
time.sleep(0.1)
assert time.time() < start + 5, "timeout cluster.scale(2)"
assert time.time() < start + 30, "timeout cluster.scale(2)"

# Scale down
cluster.scale(1)

start = time.time()
while len(cluster.workers()) != 1:
time.sleep(0.1)
assert time.time() < start + 5, "timeout cluster.scale(1)"
assert time.time() < start + 30, "timeout cluster.scale(1)"

check_is_shutdown(skein_client, cluster.app_id)

Expand Down Expand Up @@ -107,7 +107,7 @@ def test_from_application_id(skein_client, conda_env):
start = time.time()
while len(cluster2.workers()) != 1:
time.sleep(0.1)
assert time.time() < start + 5, "timeout cluster.scale(1)"
assert time.time() < start + 30, "timeout cluster.scale(1)"

del cluster2

Expand Down Expand Up @@ -156,7 +156,7 @@ def test_from_current(skein_client, conda_env, monkeypatch, tmpdir):
start = time.time()
while len(cluster2.workers()) != 1:
time.sleep(0.1)
assert time.time() < start + 5, "timeout cluster.scale(1)"
assert time.time() < start + 30, "timeout cluster.scale(1)"

del cluster2

Expand All @@ -166,14 +166,16 @@ def test_from_current(skein_client, conda_env, monkeypatch, tmpdir):
check_is_shutdown(skein_client, cluster.app_id)


def test_configuration():
@pytest.mark.parametrize('deploy_mode', ['remote', 'local'])
def test_configuration(deploy_mode):
config = {'yarn': {
'environment': 'myenv.tar.gz',
'queue': 'myqueue',
'name': 'dask-yarn-tests',
'user': 'alice',
'tags': ['a', 'b', 'c'],
'specification': None,
'deploy-mode': deploy_mode,
'worker': {'memory': '1234 MiB', 'count': 1, 'vcores': 1, 'restarts': -1,
'env': {'foo': 'bar'}},
'scheduler': {'memory': '1234 MiB', 'vcores': 1}}
Expand All @@ -187,7 +189,10 @@ def test_configuration():
assert spec.tags == {'a', 'b', 'c'}
assert spec.services['dask.worker'].resources.memory == 1234
assert spec.services['dask.worker'].env == {'foo': 'bar'}
assert spec.services['dask.scheduler'].resources.memory == 1234
if deploy_mode == 'remote':
assert spec.services['dask.scheduler'].resources.memory == 1234
else:
assert 'dask.scheduler' not in spec.services


def test_configuration_full_specification(conda_env, tmpdir):
Expand Down
1 change: 1 addition & 0 deletions dask_yarn/yarn.yaml
Expand Up @@ -4,6 +4,7 @@ yarn:

name: dask # Application name
queue: default # Yarn queue to deploy to
deploy-mode: remote # The deploy mode to use (either remote or local)
environment: null # Path to conda packed environment
tags: [] # List of strings to tag applications
user: '' # The user to submit the application on behalf of,
Expand Down

0 comments on commit 56801c9

Please sign in to comment.