Skip to content

Commit

Permalink
Merge pull request #156 from mrocklin/check-dask-master
Browse files Browse the repository at this point in the history
Test against dask master
  • Loading branch information
TomAugspurger authored Jun 20, 2019
2 parents 5038808 + e677562 commit 038d2bb
Show file tree
Hide file tree
Showing 8 changed files with 133 additions and 57 deletions.
14 changes: 10 additions & 4 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ jobs:
MINIKUBE_WANTUPDATENOTIFICATION: false
MINIKUBE_WANTREPORTERRORPROMPT: false
CHANGE_MINIKUBE_NONE_USER: true
PYTHON: "3.6"
PYTHON: "3.7"
ENV_NAME: "dask-kubernetes-test"

steps:
Expand Down Expand Up @@ -41,7 +41,7 @@ jobs:
sudo kubectl get deployment
- restore_cache:
keys:
- miniconda-v1-{{ checksum "ci/environment-3.6.yml" }}
- miniconda-v1-{{ checksum "ci/environment-3.7.yml" }}
- run:
name: install miniconda
command: |
Expand All @@ -65,12 +65,18 @@ jobs:
conda env list
conda list ${ENV_NAME}
- save_cache:
key: miniconda-v1-{{ checksum "ci/environment-3.6.yml" }}
key: miniconda-v1-{{ checksum "ci/environment-3.7.yml" }}
paths:
- "/home/circleci/miniconda"
- run:
name: build docker
command: |
/home/circleci/miniconda/envs/dask-kubernetes-test/bin/py.test dask_kubernetes -s --verbose --worker-image daskdev/dask:0.19.4
# eval $(minikube docker-env)
docker build -t daskdev/dask:dev docker/
- run:
command: |
# eval $(minikube docker-env)
/home/circleci/miniconda/envs/dask-kubernetes-test/bin/py.test dask_kubernetes -s --verbose --worker-image daskdev/dask:dev
- run:
command: |
/home/circleci/miniconda/envs/dask-kubernetes-test/bin/flake8 dask-kubernetes
Expand Down
16 changes: 0 additions & 16 deletions ci/environment-3.6.yml

This file was deleted.

33 changes: 33 additions & 0 deletions ci/environment-3.7.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
name: dask-kubernetes-test
channels:
- defaults
- conda-forge
dependencies:
- python=3.7
- nomkl
- pip
# testing / CI
- flake8
- ipywidgets
- pytest
- black
- pyyaml
# dask dependencies
- cloudpickle
- toolz
- cytoolz
- numpy
- partd
# distributed dependencies
- click >=6.6
- msgpack-python
- psutil >=5.0
- six
- sortedcontainers !=2.0.0,!=2.0.1
- tblib
- tornado >=5
- zict >=0.1.3
- pip:
- kubernetes==9
- git+https://github.com/dask/dask
- git+https://github.com/dask/distributed
26 changes: 20 additions & 6 deletions dask_kubernetes/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,10 +190,7 @@ def __init__(
)
raise ValueError(msg)

self.cluster = LocalCluster(
ip=host or socket.gethostname(), scheduler_port=port, n_workers=0, **kwargs
)

pod_template = clean_pod_template(pod_template)
ClusterAuth.load_first(auth)

self.core_api = kubernetes.client.CoreV1Api()
Expand All @@ -205,15 +202,24 @@ def __init__(
user=getpass.getuser(), uuid=str(uuid.uuid4())[:10], **os.environ
)
name = escape(name)
self.pod_template = pod_template

self.pod_template = clean_pod_template(pod_template)
# Default labels that can't be overwritten
self.pod_template.metadata.labels["dask.org/cluster-name"] = name
self.pod_template.metadata.labels["user"] = escape(getpass.getuser())
self.pod_template.metadata.labels["app"] = "dask"
self.pod_template.metadata.labels["component"] = "dask-worker"
self.pod_template.metadata.namespace = namespace

self.cluster = LocalCluster(
host=host or socket.gethostname(),
scheduler_port=port,
n_workers=0,
**kwargs
)

# TODO: handle any exceptions here, ensure self.cluster is properly
# cleaned up.
self.pod_template.spec.containers[0].env.append(
kubernetes.client.V1EnvVar(
name="DASK_SCHEDULER_ADDRESS", value=self.scheduler_address
Expand All @@ -231,7 +237,11 @@ def __init__(
finalize(self, _cleanup_pods, self.namespace, self.pod_template.metadata.labels)

if n_workers:
self.scale(n_workers)
try:
self.scale(n_workers)
except Exception:
self.cluster.close()
raise

@classmethod
def from_dict(cls, pod_spec, **kwargs):
Expand Down Expand Up @@ -333,6 +343,10 @@ def pods(self):
label_selector=format_labels(self.pod_template.metadata.labels),
).items

@property
def workers(self):
return self.pods()

def logs(self, pod=None):
""" Logs from a worker pod
Expand Down
2 changes: 1 addition & 1 deletion dask_kubernetes/tests/test_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ def test_diagnostics_link_env_variable(pod_spec, loop, ns):
pytest.importorskip("ipywidgets")
with dask.config.set({"distributed.dashboard.link": "foo-{USER}-{port}"}):
with KubeCluster(pod_spec, loop=loop, namespace=ns) as cluster:
port = cluster.scheduler.services["bokeh"].port
port = cluster.scheduler.services["dashboard"].port
cluster._ipython_display_()
box = cluster._cached_widget

Expand Down
60 changes: 30 additions & 30 deletions dask_kubernetes/tests/test_objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,24 +7,24 @@ def test_extra_pod_config(image_name, loop):
"""
Test that our pod config merging process works fine
"""
cluster = KubeCluster(
with KubeCluster(
make_pod_spec(
image_name, extra_pod_config={"automountServiceAccountToken": False}
),
loop=loop,
n_workers=0,
)
) as cluster:

pod = cluster.pod_template
pod = cluster.pod_template

assert pod.spec.automount_service_account_token is False
assert pod.spec.automount_service_account_token is False


def test_extra_container_config(image_name, loop):
"""
Test that our container config merging process works fine
"""
cluster = KubeCluster(
with KubeCluster(
make_pod_spec(
image_name,
extra_container_config={
Expand All @@ -34,39 +34,39 @@ def test_extra_container_config(image_name, loop):
),
loop=loop,
n_workers=0,
)
) as cluster:

pod = cluster.pod_template
pod = cluster.pod_template

assert pod.spec.containers[0].image_pull_policy == "IfNotPresent"
assert pod.spec.containers[0].security_context == {"runAsUser": 0}
assert pod.spec.containers[0].image_pull_policy == "IfNotPresent"
assert pod.spec.containers[0].security_context == {"runAsUser": 0}


def test_container_resources_config(image_name, loop):
"""
Test container resource requests / limits being set properly
"""
cluster = KubeCluster(
with KubeCluster(
make_pod_spec(
image_name, memory_request="1G", memory_limit="2G", cpu_limit="2"
),
loop=loop,
n_workers=0,
)
) as cluster:

pod = cluster.pod_template
pod = cluster.pod_template

assert pod.spec.containers[0].resources.requests["memory"] == "1G"
assert pod.spec.containers[0].resources.limits["memory"] == "2G"
assert pod.spec.containers[0].resources.limits["cpu"] == "2"
assert "cpu" not in pod.spec.containers[0].resources.requests
assert pod.spec.containers[0].resources.requests["memory"] == "1G"
assert pod.spec.containers[0].resources.limits["memory"] == "2G"
assert pod.spec.containers[0].resources.limits["cpu"] == "2"
assert "cpu" not in pod.spec.containers[0].resources.requests


def test_extra_container_config_merge(image_name, loop):
"""
Test that our container config merging process works recursively fine
"""
cluster = KubeCluster(
with KubeCluster(
make_pod_spec(
image_name,
extra_container_config={
Expand All @@ -77,23 +77,23 @@ def test_extra_container_config_merge(image_name, loop):
loop=loop,
n_workers=0,
env={"TEST": "HI"},
)
) as cluster:

pod = cluster.pod_template
pod = cluster.pod_template

assert pod.spec.containers[0].env == [
{"name": "TEST", "value": "HI"},
{"name": "BOO", "value": "FOO"},
]
assert pod.spec.containers[0].env == [
{"name": "TEST", "value": "HI"},
{"name": "BOO", "value": "FOO"},
]

assert pod.spec.containers[0].args[-1] == "last-item"
assert pod.spec.containers[0].args[-1] == "last-item"


def test_extra_container_config_merge(image_name, loop):
"""
Test that our container config merging process works recursively fine
"""
cluster = KubeCluster(
with KubeCluster(
make_pod_spec(
image_name,
env={"TEST": "HI"},
Expand All @@ -104,14 +104,14 @@ def test_extra_container_config_merge(image_name, loop):
),
loop=loop,
n_workers=0,
)
) as cluster:

pod = cluster.pod_template
pod = cluster.pod_template

for e in [{"name": "TEST", "value": "HI"}, {"name": "BOO", "value": "FOO"}]:
assert e in pod.spec.containers[0].env
for e in [{"name": "TEST", "value": "HI"}, {"name": "BOO", "value": "FOO"}]:
assert e in pod.spec.containers[0].env

assert pod.spec.containers[0].args[-1] == "last-item"
assert pod.spec.containers[0].args[-1] == "last-item"


def test_make_pod_from_dict():
Expand Down
9 changes: 9 additions & 0 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
FROM daskdev/dask:latest

RUN conda uninstall -y --force dask-core distributed \
&& conda clean -afy
RUN pip install --no-deps --no-cache-dir \
git+https://github.com/dask/dask \
git+https://github.com/dask/distributed

ENTRYPOINT ["tini", "-g", "--", "/usr/bin/prepare.sh"]
30 changes: 30 additions & 0 deletions docker/prepare.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#!/bin/bash

set -x

# We start by adding extra apt packages, since pip modules may required library
if [ "$EXTRA_APT_PACKAGES" ]; then
echo "EXTRA_APT_PACKAGES environment variable found. Installing."
apt update -y
apt install -y $EXTRA_APT_PACKAGES
fi

if [ -e "/opt/app/environment.yml" ]; then
echo "environment.yml found. Installing packages"
/opt/conda/bin/conda env update -f /opt/app/environment.yml
else
echo "no environment.yml"
fi

if [ "$EXTRA_CONDA_PACKAGES" ]; then
echo "EXTRA_CONDA_PACKAGES environment variable found. Installing."
/opt/conda/bin/conda install -y $EXTRA_CONDA_PACKAGES
fi

if [ "$EXTRA_PIP_PACKAGES" ]; then
echo "EXTRA_PIP_PACKAGES environment variable found. Installing".
/opt/conda/bin/pip install $EXTRA_PIP_PACKAGES
fi

# Run extra commands
exec "$@"

0 comments on commit 038d2bb

Please sign in to comment.