Skip to content

Commit

Permalink
Fix configmaps that need to be manually set when running the user dep…
Browse files Browse the repository at this point in the history
…loyments helm chart in a different namespace (#7660)

Summary:
- We were requiring the dagster-instance configmap, even though for a long time now we have been passing through the dagster.yaml file as an instanceref instead. Instead, just don't pass through the dagster-instance configmap naymore. I tried to think it through and I don't actually think there's a back-compat issue here? As long as the pod with the run launcher is using the new version of the helm chart, it'll be passing through the instance ref (and we've been checking for a passed in instanceref in the entry point for execute_run for a very long time now)

- We were using the wrong configmap name for DAGSTER_K8S_PIPELINE_RUN_ENV_CONFIGMAP for the user code deployments - it's intended to refer to the configmap in which it is defined. Since dagsterUserDeployments.sharedEnv is defined in the -user-deployments-shared-env configmap, that's the one tha it should use here.

These two fixes allow us to remove the requirement in the integration tests that the configmaps need to be copied over from one namespace to the other.

Test Plan: BK

Co-authored-by: Sean Mackesey <s.mackesey@gmail.com>
  • Loading branch information
gibsondan and smackesey committed May 2, 2022
1 parent f46fc50 commit 9941f61
Show file tree
Hide file tree
Showing 5 changed files with 16 additions and 74 deletions.
2 changes: 1 addition & 1 deletion helm/dagster/schema/schema_tests/test_instance.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def test_k8s_run_launcher_config(template: HelmTemplate):
assert run_launcher_config["config"]["job_namespace"] == job_namespace
assert run_launcher_config["config"]["load_incluster_config"] == load_incluster_config
assert run_launcher_config["config"]["image_pull_policy"] == image_pull_policy
assert run_launcher_config["config"]["env_config_maps"][1:] == [
assert run_launcher_config["config"]["env_config_maps"] == [
configmap["name"] for configmap in env_config_maps
]
assert run_launcher_config["config"]["env_secrets"] == [
Expand Down
21 changes: 8 additions & 13 deletions helm/dagster/templates/helpers/instance/_run-launcher.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,9 @@
module: dagster_celery_k8s
class: CeleryK8sRunLauncher
config:
dagster_home:
env: DAGSTER_HOME
instance_config_map:
env: DAGSTER_K8S_INSTANCE_CONFIG_MAP
postgres_password_secret:
env: DAGSTER_K8S_PG_PASSWORD_SECRET
dagster_home: {{ .Values.global.dagsterHome | quote }}
instance_config_map: "{{ template "dagster.fullname" .}}-instance"
postgres_password_secret: {{ include "dagster.postgresql.secretName" . | quote }}
broker:
env: DAGSTER_CELERY_BROKER_URL
backend:
Expand Down Expand Up @@ -81,19 +78,17 @@ config:
{{- if (hasKey $k8sRunLauncherConfig "image") }}
job_image: {{ include "dagster.externalImage.name" (list $ $k8sRunLauncherConfig.image) | quote }}
{{- end }}
dagster_home:
env: DAGSTER_HOME
instance_config_map:
env: DAGSTER_K8S_INSTANCE_CONFIG_MAP
postgres_password_secret:
env: DAGSTER_K8S_PG_PASSWORD_SECRET
dagster_home: {{ .Values.global.dagsterHome | quote }}
instance_config_map: "{{ template "dagster.fullname" .}}-instance"
postgres_password_secret: {{ include "dagster.postgresql.secretName" . | quote }}
{{- if $k8sRunLauncherConfig.envConfigMaps }}
env_config_maps:
- env: DAGSTER_K8S_PIPELINE_RUN_ENV_CONFIGMAP
{{- range $envConfigMap := $k8sRunLauncherConfig.envConfigMaps }}
{{- if hasKey $envConfigMap "name" }}
- {{ $envConfigMap.name }}
{{- end }}
{{- end }}
{{- end }}

{{- if $k8sRunLauncherConfig.envSecrets }}
env_secrets:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -388,33 +388,6 @@ def create_postgres_secret(namespace, should_cleanup):
kube_api.delete_namespaced_secret(name="dagster-postgresql-secret", namespace=namespace)


@contextmanager
def copy_configmaps(system_namespace, user_code_namespace, should_cleanup, configmap_names):
kube_api = kubernetes.client.CoreV1Api()

for configmap_name in configmap_names:
system_configmap = kube_api.read_namespaced_config_map(
name=configmap_name, namespace=system_namespace
)

new_configmap = kubernetes.client.V1ConfigMap(
api_version="v1",
kind="ConfigMap",
data=system_configmap.data,
metadata=kubernetes.client.V1ObjectMeta(name=configmap_name),
)
kube_api.create_namespaced_config_map(namespace=user_code_namespace, body=new_configmap)

try:
yield
finally:
if should_cleanup:
for configmap_name in configmap_names:
kube_api.create_namespaced_config_map(
name=configmap_name, namespace=user_code_namespace
)


@pytest.fixture(
scope="session",
params=[
Expand Down Expand Up @@ -474,14 +447,6 @@ def helm_namespaces_for_k8s_run_launcher(
run_monitoring=True,
)
)
stack.enter_context(
copy_configmaps(
system_namespace,
namespace,
should_cleanup,
configmap_names=["dagster-instance", "dagster-pipeline-env"],
)
)
yield (namespace, system_namespace)
else:
with helm_chart_for_k8s_run_launcher(
Expand Down
20 changes: 2 additions & 18 deletions python_modules/libraries/dagster-k8s/dagster_k8s/job.py
Original file line number Diff line number Diff line change
Expand Up @@ -624,19 +624,7 @@ def construct_dagster_k8s_job(

user_defined_resources = container_config.pop("resources", {})

volume_mounts = (
[
{
"name": "dagster-instance",
"mount_path": "{dagster_home}/dagster.yaml".format(
dagster_home=job_config.dagster_home
),
"sub_path": "dagster.yaml",
}
]
+ job_config.volume_mounts
+ user_defined_k8s_volume_mounts
)
volume_mounts = job_config.volume_mounts + user_defined_k8s_volume_mounts

resources = user_defined_resources if user_defined_resources else job_config.resources

Expand All @@ -658,11 +646,7 @@ def construct_dagster_k8s_job(

user_defined_volumes = pod_spec_config.pop("volumes", [])

volumes = (
[{"name": "dagster-instance", "config_map": {"name": job_config.instance_config_map}}]
+ job_config.volumes
+ user_defined_volumes
)
volumes = job_config.volumes + user_defined_volumes

# If the user has defined custom labels, remove them from the pod_template_spec_metadata
# key and merge them with the dagster labels
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,14 +109,14 @@ def test_construct_dagster_k8s_job_with_mounts():
)
job = construct_dagster_k8s_job(cfg, ["foo", "bar"], "job123").to_dict()

assert len(job["spec"]["template"]["spec"]["volumes"]) == 2
assert len(job["spec"]["template"]["spec"]["volumes"]) == 1
foo_volumes = [
volume for volume in job["spec"]["template"]["spec"]["volumes"] if volume["name"] == "foo"
]
assert len(foo_volumes) == 1
assert foo_volumes[0]["config_map"]["name"] == "settings-cm"

assert len(job["spec"]["template"]["spec"]["containers"][0]["volume_mounts"]) == 2
assert len(job["spec"]["template"]["spec"]["containers"][0]["volume_mounts"]) == 1
foo_volumes_mounts = [
volume
for volume in job["spec"]["template"]["spec"]["containers"][0]["volume_mounts"]
Expand All @@ -140,7 +140,7 @@ def test_construct_dagster_k8s_job_with_mounts():
],
)
job = construct_dagster_k8s_job(cfg, ["foo", "bar"], "job123").to_dict()
assert len(job["spec"]["template"]["spec"]["volumes"]) == 2
assert len(job["spec"]["template"]["spec"]["volumes"]) == 1
foo_volumes = [
volume for volume in job["spec"]["template"]["spec"]["volumes"] if volume["name"] == "foo"
]
Expand Down Expand Up @@ -395,8 +395,7 @@ def user_defined_k8s_volume_mounts_tags_graph():
volume_mounts = job["spec"]["template"]["spec"]["containers"][0]["volume_mounts"]
volume_mounts_mapping = {volume_mount["name"]: volume_mount for volume_mount in volume_mounts}

assert len(volume_mounts_mapping) == 3
assert volume_mounts_mapping["dagster-instance"]
assert len(volume_mounts_mapping) == 2
assert volume_mounts_mapping["a_volume_mount_one"]
assert volume_mounts_mapping["a_volume_mount_two"]

Expand Down Expand Up @@ -446,8 +445,7 @@ def user_defined_k8s_volume_mounts_tags_graph():
volume_mounts = job["spec"]["template"]["spec"]["containers"][0]["volume_mounts"]
volume_mounts_mapping = {volume_mount["name"]: volume_mount for volume_mount in volume_mounts}

assert len(volume_mounts_mapping) == 3
assert volume_mounts_mapping["dagster-instance"]
assert len(volume_mounts_mapping) == 2
assert volume_mounts_mapping["a_volume_mount_one"]
assert volume_mounts_mapping["a_volume_mount_two"]

Expand Down

0 comments on commit 9941f61

Please sign in to comment.