From db72aed10106f81f8190b183a7b5ba8d6c8ac0c9 Mon Sep 17 00:00:00 2001 From: Diego Rodriguez Date: Thu, 28 Jun 2018 17:02:41 +0200 Subject: [PATCH] k8s: fix timeout problem * Leaves `watch` operation open without any timeout. * Uses the `Background` property for Kubernetes DeleteOptions object so orphan deletion happens asynchronously. DeleteOptions is the recommended way of handeling deletions for Kubernetes objects see more here https://v1-9.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.9/#deleteoptions-v1-meta. --- reana_job_controller/k8s.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/reana_job_controller/k8s.py b/reana_job_controller/k8s.py index 6603afce..02d09d45 100644 --- a/reana_job_controller/k8s.py +++ b/reana_job_controller/k8s.py @@ -174,8 +174,7 @@ def watch_jobs(job_db): try: w = watch.Watch() for event in w.stream( - batchv1_api_client.list_job_for_all_namespaces, - _request_timeout=60): + batchv1_api_client.list_job_for_all_namespaces): logging.info( 'New Job event received: {0}'.format(event['type'])) job = event['object'] @@ -233,7 +232,7 @@ def watch_jobs(job_db): job.metadata.name)) # Delete all depending pods. delete_options = V1DeleteOptions( - propagation_policy='Foreground') + propagation_policy='Background') batchv1_api_client.delete_namespaced_job( job.metadata.name, job.metadata.namespace, delete_options) job_db[job.metadata.name]['deleted'] = True