From b72c88c03c5bc2b6731aafbb8c4225a5c6cf2a97 Mon Sep 17 00:00:00 2001 From: Will Tran Date: Tue, 2 Jan 2018 18:53:54 -0500 Subject: [PATCH] [stable/concourse] Upgrade to Concourse 3.8.0. Make necessary improvements to Concourse worker lifecycle management. Add additional fatal errors emitted as of Concourse 3.8.0 that should trigger a restart, and remove "unkown volume" as one such error as this will happen normally when running multiple concourse-web pods. Try to start workers with a clean slate by cleaning up previous incarnations of a worker. Call retire-worker before starting. Also clear the concourse-work-dir before starting. Call retire-worker in a loop and don't exit that loop until the old worker is gone. This allows us to remove the fixed worker.postStopDelaySeconds duration. Add a note about persistent volumes being necessary. --- stable/concourse/Chart.yaml | 4 ++-- stable/concourse/README.md | 13 ++++++------ .../templates/worker-statefulset.yaml | 14 +++++++++---- stable/concourse/values.yaml | 20 +++++++------------ 4 files changed, 26 insertions(+), 25 deletions(-) diff --git a/stable/concourse/Chart.yaml b/stable/concourse/Chart.yaml index 776d27b74a47..d680876112b5 100644 --- a/stable/concourse/Chart.yaml +++ b/stable/concourse/Chart.yaml @@ -1,6 +1,6 @@ name: concourse -version: 0.10.7 -appVersion: 3.6.0 +version: 0.11.0 +appVersion: 3.8.0 description: Concourse is a simple and scalable CI system. icon: https://avatars1.githubusercontent.com/u/7809479 keywords: diff --git a/stable/concourse/README.md b/stable/concourse/README.md index 357a35ea1816..b3e5c3e238ec 100644 --- a/stable/concourse/README.md +++ b/stable/concourse/README.md @@ -55,7 +55,7 @@ $ kubectl scale statefulset my-release-worker --replicas=3 ### Restarting workers -If a worker isn't taking on work, you can restart the worker with `kubectl delete pod`. This will initiate a graceful shutdown by "retiring" the worker, with some waiting time before the worker starts up again to ensure concourse doesn't try looking for old volumes on the new worker. The values `worker.postStopDelaySeconds` and `worker.terminationGracePeriodSeconds` can be used to tune this. +If a worker isn't taking on work, you can restart the worker with `kubectl delete pod`. This will initiate a graceful shutdown by "retiring" the worker, to ensure Concourse doesn't try looking for old volumes on the new worker. The value`worker.terminationGracePeriodSeconds` can be used to provide an upper limit on graceful shutdown time before forcefully terminating the container. ### Worker Liveness Probe @@ -68,7 +68,7 @@ The following tables lists the configurable parameters of the Concourse chart an | Parameter | Description | Default | | ----------------------- | ---------------------------------- | ---------------------------------------------------------- | | `image` | Concourse image | `concourse/concourse` | -| `imageTag` | Concourse image version | `3.3.2` | +| `imageTag` | Concourse image version | `3.8.0` | | `imagePullPolicy` |Concourse image pull policy | `Always` if `imageTag` is `latest`, else `IfNotPresent` | | `concourse.username` | Concourse Basic Authentication Username | `concourse` | | `concourse.password` | Concourse Basic Authentication Password | `concourse` | @@ -124,8 +124,7 @@ The following tables lists the configurable parameters of the Concourse chart an | `worker.minAvailable` | Minimum number of workers available after an eviction | `1` | | `worker.resources` | Concourse Worker resource requests and limits | `{requests: {cpu: "100m", memory: "512Mi"}}` | | `worker.additionalAffinities` | Additional affinities to apply to worker pods. E.g: node affinity | `nil` | -| `worker.postStopDelaySeconds` | Time to wait after graceful shutdown of worker before starting up again | `60` | -| `worker.terminationGracePeriodSeconds` | Upper bound for graceful shutdown, including `worker.postStopDelaySeconds` | `120` | +| `worker.terminationGracePeriodSeconds` | Upper bound for graceful shutdown to allow the worker to drain its tasks | `60` | | `worker.fatalErrors` | Newline delimited strings which, when logged, should trigger a restart of the worker | *See [values.yaml](values.yaml)* | | `worker.updateStrategy` | `OnDelete` or `RollingUpdate` (requires Kubernetes >= 1.7) | `RollingUpdate` | | `worker.podManagementPolicy` | `OrderedReady` or `Parallel` (requires Kubernetes >= 1.7) | `Parallel` | @@ -203,7 +202,7 @@ concourse: < Insert the contents of your concourse-keys/worker_key.pub file > ``` -Alternativelly, you can provide those keys to `helm install` via parameters: +Alternatively, you can provide those keys to `helm install` via parameters: ```console @@ -241,6 +240,8 @@ persistence: size: "20Gi" ``` +It is highly recommended to use Persistent Volumes for Concourse Workers; otherwise container images managed by the Worker is stored in an `emptyDir` volume on the node's disk. This will interfere with k8s ImageGC and the node's disk will fill up as a result. This will be fixed in a future release of k8s: https://github.com/kubernetes/kubernetes/pull/57020 + ### Ingress TLS If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [kube-lego](https://github.com/jetstack/kube-lego)), please refer to the documentation for that mechanism. @@ -326,5 +327,5 @@ credentialManager: ## initial periodic token issued for concourse ## ref: https://www.vaultproject.io/docs/concepts/tokens.html#periodic-tokens ## - clientToken: PERIODIC_VAULT_TOKEN + clientToken: PERIODIC_VAULT_TOKEN ``` diff --git a/stable/concourse/templates/worker-statefulset.yaml b/stable/concourse/templates/worker-statefulset.yaml index 2cf2ec63bbbb..19cebb1d114e 100644 --- a/stable/concourse/templates/worker-statefulset.yaml +++ b/stable/concourse/templates/worker-statefulset.yaml @@ -34,8 +34,11 @@ spec: - -c - |- cp /dev/null /concourse-work-dir/.liveness_probe + rm -rf /concourse-work-dir/* + while ! concourse retire-worker --name=${HOSTNAME} | grep -q worker-not-found; do + sleep 5 + done concourse worker --name=${HOSTNAME} | tee -a /concourse-work-dir/.liveness_probe - sleep ${POST_STOP_DELAY_SECONDS} livenessProbe: exec: command: @@ -56,9 +59,12 @@ spec: preStop: exec: command: - - "/bin/sh" - - "-c" - - "concourse retire-worker --name=${HOSTNAME}" + - /bin/sh + - -c + - |- + while ! concourse retire-worker --name=${HOSTNAME} | grep -q worker-not-found; do + sleep 5 + done env: - name: CONCOURSE_TSA_HOST valueFrom: diff --git a/stable/concourse/values.yaml b/stable/concourse/values.yaml index 7f0434c1fca7..b2d1cc285ba9 100644 --- a/stable/concourse/values.yaml +++ b/stable/concourse/values.yaml @@ -13,7 +13,7 @@ image: concourse/concourse ## Concourse image version. ## ref: https://hub.docker.com/r/concourse/concourse/tags/ ## -imageTag: "3.5.0" +imageTag: "3.8.0" ## Specify a imagePullPolicy: 'Always' if imageTag is 'latest', else set to 'IfNotPresent'. ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images @@ -449,24 +449,18 @@ worker: # value: "value" # effect: "NoSchedule" - ## Time to delay after the worker process shuts down. This inserts time between shutdown and startup - ## to avoid errors caused by a worker restart. - postStopDelaySeconds: 60 - - ## Time to allow the pod to terminate before being forcefully terminated. This should include - ## postStopDelaySeconds, and should additionally provide time for the worker to retire, e.g. - ## = postStopDelaySeconds + max time to allow the worker to drain its tasks. See - ## https://concourse.ci/worker-internals.html for worker lifecycle semantics. - terminationGracePeriodSeconds: 120 + ## Time to allow the pod to terminate before being forcefully terminated. This should provide time for + ## the worker to retire, i.e. drain its tasks. See https://concourse.ci/worker-internals.html for worker + ## lifecycle semantics. + terminationGracePeriodSeconds: 60 ## If any of the strings are found in logs, the worker's livenessProbe will fail and trigger a pod restart. ## Specify one string per line, exact matching is used. ## - ## "guardian.api.garden-server.create.failed" appears when the worker's filesystem has issues. - ## "unknown handle" appears if a worker didn't cleanly restart. fatalErrors: |- guardian.api.garden-server.create.failed - unknown handle + guardian.api.garden-server.run.failed + baggageclaim.api.volume-server.create-volume-async.failed-to-create ## Strategy for StatefulSet updates (requires Kubernetes 1.6+) ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset