Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix hard-coded component label for services_enabled=True #654

Merged
merged 2 commits into from Nov 10, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
26 changes: 12 additions & 14 deletions kubespawner/objects.py
Expand Up @@ -6,7 +6,7 @@
import json
import operator
import re
from typing import Optional
from typing import List, Optional
from urllib.parse import urlparse

from kubernetes_asyncio.client.models import (
Expand Down Expand Up @@ -938,12 +938,12 @@ def make_secret(


def make_service(
name,
port,
servername,
owner_references,
labels=None,
annotations=None,
name: str,
port: int,
selector: dict,
owner_references: List[V1OwnerReference],
labels: Optional[dict] = None,
annotations: Optional[dict] = None,
):
"""
Make a k8s service specification for using dns to communicate with the notebook.
Expand All @@ -953,8 +953,10 @@ def make_service(
name:
Name of the service. Must be unique within the namespace the object is
going to be created in.
env:
Dictionary of environment variables.
selector:
Labels of server pod to be used in spec.selector
owner_references:
Pod's owner references used to automatically remote service after pod removal
labels:
Labels to add to the service.
annotations:
Expand All @@ -975,11 +977,7 @@ def make_service(
spec=V1ServiceSpec(
type='ClusterIP',
ports=[V1ServicePort(name='http', port=port, target_port=port)],
selector={
'component': 'singleuser-server',
'hub.jupyter.org/servername': servername,
'hub.jupyter.org/username': metadata.labels['hub.jupyter.org/username'],
},
selector=selector,
),
)

Expand Down
9 changes: 6 additions & 3 deletions kubespawner/spawner.py
Expand Up @@ -118,6 +118,9 @@ class KubeSpawner(Spawner):
spawned by a user will have its own KubeSpawner instance.
"""

# Reflectors keeping track of the k8s api-server's state for various k8s
# resources are singletons as that state can be tracked and shared by all
# KubeSpawner objects.
reflectors = {
"pods": None,
"events": None,
Expand Down Expand Up @@ -2117,12 +2120,13 @@ def get_service_manifest(self, owner_reference):
annotations = self._build_common_annotations(
self._expand_all(self.extra_annotations)
)
selector = self._build_pod_labels(self._expand_all(self.extra_labels))

# TODO: validate that the service name
return make_service(
name=self.pod_name,
port=self.port,
servername=self.name,
selector=selector,
owner_references=[owner_reference],
labels=labels,
annotations=annotations,
Expand Down Expand Up @@ -2383,7 +2387,6 @@ def _start_reflector(
):
"""Start a shared reflector on the KubeSpawner class


kind: key for the reflector (e.g. 'pod' or 'events')
reflector_class: Reflector class to be instantiated
kwargs: extra keyword-args to be relayed to ReflectorClass
Expand Down Expand Up @@ -2418,7 +2421,7 @@ def on_reflector_failure():
async def catch_reflector_start():
try:
await f
except Exception as e:
except Exception:
self.log.exception(f"Reflector for {kind} failed to start.")
sys.exit(1)

Expand Down
20 changes: 20 additions & 0 deletions tests/conftest.py
Expand Up @@ -31,6 +31,7 @@
from kubernetes_asyncio.watch import Watch
from traitlets.config import Config

from kubespawner import KubeSpawner
from kubespawner.clients import shared_client

here = os.path.abspath(os.path.dirname(__file__))
Expand Down Expand Up @@ -623,3 +624,22 @@ async def exec_python(kube_client, kube_ns):
pod = await create_resource(kube_client, kube_ns, "pod", pod_manifest)

yield partial(_exec_python_in_pod, kube_client, kube_ns, pod_name)


@pytest.fixture(scope="function")
def reset_pod_reflector():
"""
Resets the class state KubeSpawner.reflectors["pods"] before and after the
test function executes. This enables us to start fresh if a test needs to
test configuration influencing our singleton pod reflector.
"""

def _reset_pod_reflector():
pods_reflector = KubeSpawner.reflectors["pods"]
KubeSpawner.reflectors["pods"] = None
if pods_reflector:
asyncio.ensure_future(pods_reflector.stop())

_reset_pod_reflector()
yield
_reset_pod_reflector()
109 changes: 107 additions & 2 deletions tests/test_spawner.py
Expand Up @@ -206,6 +206,40 @@ async def test_spawn_start(
assert isinstance(status, int)


async def test_spawn_component_label(
kube_ns,
kube_client,
config,
hub,
reset_pod_reflector,
):
spawner = KubeSpawner(
hub=hub,
user=MockUser(name="start"),
config=config,
api_token="abc123",
oauth_client_id="unused",
component_label="something",
)

pod_name = spawner.pod_name

# start the spawner
await spawner.start()

# verify the pod exists
pods = (await kube_client.list_namespaced_pod(kube_ns)).items
pods = [p for p in pods if p.metadata.name == pod_name]
assert pods

# component label is same as expected
pod = pods[0]
assert pod.metadata.labels["component"] == "something"

# stop the pod
await spawner.stop()


async def test_spawn_internal_ssl(
kube_ns,
kube_client,
Expand Down Expand Up @@ -301,6 +335,76 @@ async def test_spawn_internal_ssl(
assert service_name not in service_names


async def test_spawn_services_enabled(
kube_ns,
kube_client,
hub,
config,
reset_pod_reflector,
):
spawner = KubeSpawner(
config=config,
hub=hub,
user=MockUser(name="services"),
api_token="abc123",
oauth_client_id="unused",
services_enabled=True,
component_label="something",
common_labels={
"some/label": "value1",
},
extra_labels={
"extra/label": "value2",
},
)

# start the spawner
await spawner.start()
pod_name = "jupyter-%s" % spawner.user.name
# verify the pod exists
pods = (await kube_client.list_namespaced_pod(kube_ns)).items
pod_names = [p.metadata.name for p in pods]
assert pod_name in pod_names
# verify poll while running
status = await spawner.poll()
assert status is None

# verify service exist
service_name = pod_name
services = (await kube_client.list_namespaced_service(kube_ns)).items
services = [s for s in services if s.metadata.name == service_name]
assert services

# verify selector contains component_label, common_labels and extra_labels
# as well as user and server name
selector = services[0].spec.selector
assert selector["component"] == "something"
assert selector["some/label"] == "value1"
assert selector["extra/label"] == "value2"
assert selector["hub.jupyter.org/servername"] == ""
assert selector["hub.jupyter.org/username"] == "services"

# stop the pod
await spawner.stop()

# verify pod is gone
pods = (await kube_client.list_namespaced_pod(kube_ns)).items
pod_names = [p.metadata.name for p in pods]
assert "jupyter-%s" % spawner.user.name not in pod_names

# verify service is gone
# it may take a little while for them to get cleaned up
for _ in range(5):
services = (await kube_client.list_namespaced_service(kube_ns)).items
service_names = {s.metadata.name for s in services}
if service_name in service_names:
await asyncio.sleep(1)
else:
break

assert service_name not in service_names


async def test_spawn_after_pod_created_hook(
kube_ns,
kube_client,
Expand All @@ -314,11 +418,12 @@ async def after_pod_created_hook(spawner: KubeSpawner, pod: dict):
annotations = spawner._build_common_annotations(
spawner._expand_all(spawner.extra_annotations)
)
selector = spawner._build_pod_labels(spawner._expand_all(spawner.extra_labels))

service_manifest = make_service(
name=spawner.pod_name + "-hook",
port=spawner.port,
servername=spawner.name,
selector=selector,
owner_references=[owner_reference],
labels=labels,
annotations=annotations,
Expand All @@ -340,7 +445,7 @@ async def after_pod_created_hook(spawner: KubeSpawner, pod: dict):
spawner = KubeSpawner(
config=config,
hub=hub,
user=MockUser(name="ssl"),
user=MockUser(name="hook"),
api_token="abc123",
oauth_client_id="unused",
after_pod_created_hook=after_pod_created_hook,
Expand Down