diff --git a/.travis.yml b/.travis.yml index 37bdb5d4..4be27ea8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,6 +23,9 @@ matrix: if: type = push AND branch = master AND env(GITHUB_TOKEN) IS present script: test/license-test/run-license-test.sh env: LICENSE_TEST=true + - stage: Test + script: make helm-sync-test + env: HELM_SYNC_TEST=true - stage: Deploy if: type = push AND tag =~ /^v\d+\.\d+(\.\d+)?(-\S*)?$/ AND env(DOCKER_USERNAME) IS present script: make docker-build && make docker-push diff --git a/Makefile b/Makefile index eaa69dcf..c463ea04 100644 --- a/Makefile +++ b/Makefile @@ -41,4 +41,7 @@ license-test: go-report-card-test: test/go-report-card-test/run-report-card-test.sh -test: e2e-test compatibility-test license-test go-report-card-test +helm-sync-test: + test/helm-sync-test/run-helm-sync-test + +test: e2e-test compatibility-test license-test go-report-card-test helm-sync-test diff --git a/config/helm/aws-node-termination-handler/.helmignore b/config/helm/aws-node-termination-handler/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/config/helm/aws-node-termination-handler/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/config/helm/aws-node-termination-handler/Chart.yaml b/config/helm/aws-node-termination-handler/Chart.yaml new file mode 100644 index 00000000..be61500e --- /dev/null +++ b/config/helm/aws-node-termination-handler/Chart.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +name: aws-node-termination-handler +description: A Helm chart for the AWS Node Termination Handler +version: 0.4.0 +appVersion: 1.1.0 +home: https://github.com/aws/eks-charts +icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png +sources: + - https://github.com/aws/eks-charts +maintainers: + - name: Nicholas Turner + url: https://github.com/nckturner + email: nckturner@users.noreply.github.com + - name: Stefan Prodan + url: https://github.com/stefanprodan + email: stefanprodan@users.noreply.github.com + - name: Jillian Montalvo + url: https://github.com/jillmon + email: jillmon@users.noreply.github.com + - name: Matthew Becker + url: https://github.com/mattrandallbecker + email: mattrandallbecker@users.noreply.github.com +keywords: + - eks + - ec2 + - node-termination + - spot diff --git a/config/helm/aws-node-termination-handler/README.md b/config/helm/aws-node-termination-handler/README.md new file mode 100644 index 00000000..a9be8838 --- /dev/null +++ b/config/helm/aws-node-termination-handler/README.md @@ -0,0 +1,73 @@ +# AWS Node Termination Handler + +AWS Node Termination Handler Helm chart for Kubernetes. For more information on this project see the project repo at https://github.com/aws/aws-node-termination-handler. + +## Prerequisites + +* Kubernetes >= 1.11 + +## Installing the Chart + +Add the EKS repository to Helm: +```sh +helm repo add eks https://aws.github.io/eks-charts +``` +Install AWS Node Termination Handler: +To install the chart with the release name aws-node-termination-handler and default configuration: + +```sh +helm install --name aws-node-termination-handler \ + --namespace kube-system eks/aws-node-termination-handler +``` + +To install into an EKS cluster where the Node Termination Handler is already installed, you can run: + +```sh +helm upgrade --install --recreate-pods --force \ + aws-node-termination-handler --namespace kube-system eks/aws-node-termination-handler +``` + +If you receive an error similar to `Error: release aws-node-termination-handler +failed: "aws-node-termination-handler" already exists`, simply rerun +the above command. + +The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `aws-node-termination-handler` deployment: + +```sh +helm delete --purge aws-node-termination-handler +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the chart and their default values. + +Parameter | Description | Default +--- | --- | --- +`image.repository` | image repository | `amazon/aws-node-termination-handler` +`image.tag` | image tag | `` +`image.pullPolicy` | image pull policy | `IfNotPresent` +`deleteLocalData` | Tells kubectl to continue even if there are pods using emptyDir (local data that will be deleted when the node is drained). | `false` +`gracePeriod` | (DEPRECATED: Renamed to podTerminationGracePeriod) The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used. | `30` +`podTerminationGracePeriod` | The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used. | `30` +`nodeTerminationGracePeriod` | Period of time in seconds given to each NODE to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. | `120` +`ignoreDaemonsSets` | Causes kubectl to skip daemon set managed pods | `true` +`instanceMetadataURL` | The URL of EC2 instance metadata. This shouldn't need to be changed unless you are testing. | `http://169.254.169.254:80` +`affinity` | node/pod affinities | None +`podSecurityContext` | Pod Security Context | `{}` +`podAnnotations` | annotations to add to each pod | `{}` +`priorityClassName` | Name of the priorityClass | `system-node-critical` +`resources` | Resources for the pods | `requests.cpu: 50m, requests.memory: 64Mi, limits.cpu: 100m, limits.memory: 128Mi` +`securityContext` | Container Security context | `privileged: true` +`nodeSelector` | Tells the daemon set where to place the node-termination-handler pods. For example: `lifecycle: "Ec2Spot"`, `on-demand: "false"`, `aws.amazon.com/purchaseType: "spot"`, etc. Value must be a valid yaml expression. | `{}` +`tolerations` | list of node taints to tolerate | `[]` +`rbac.create` | if `true`, create and use RBAC resources | `true` +`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false` +`serviceAccount.create` | If `true`, create a new service account | `true` +`serviceAccount.name` | Service account to be used | None +`serviceAccount.annotations` | Specifies the annotations for ServiceAccount | `{}` diff --git a/config/helm/aws-node-termination-handler/templates/NOTES.txt b/config/helm/aws-node-termination-handler/templates/NOTES.txt new file mode 100644 index 00000000..f2dd1cec --- /dev/null +++ b/config/helm/aws-node-termination-handler/templates/NOTES.txt @@ -0,0 +1,3 @@ +{{ .Release.Name }} has been installed or updated. To check the status of pods, run: + +kubectl get pods --namespace {{ .Values.namespace }} diff --git a/config/helm/aws-node-termination-handler/templates/_helpers.tpl b/config/helm/aws-node-termination-handler/templates/_helpers.tpl new file mode 100644 index 00000000..902844a7 --- /dev/null +++ b/config/helm/aws-node-termination-handler/templates/_helpers.tpl @@ -0,0 +1,57 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "aws-node-termination-handler.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "aws-node-termination-handler.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "aws-node-termination-handler.labels" -}} +app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }} +helm.sh/chart: {{ include "aws-node-termination-handler.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +k8s-app: aws-node-termination-handler +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "aws-node-termination-handler.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "aws-node-termination-handler.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "aws-node-termination-handler.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/config/helm/aws-node-termination-handler/templates/clusterrole.yaml b/config/helm/aws-node-termination-handler/templates/clusterrole.yaml new file mode 100644 index 00000000..3cc2ec77 --- /dev/null +++ b/config/helm/aws-node-termination-handler/templates/clusterrole.yaml @@ -0,0 +1,39 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "aws-node-termination-handler.fullname" . }} +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - pods + verbs: + - list +- apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create +- apiGroups: + - extensions + resources: + - replicasets + - daemonsets + verbs: + - get +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - get + - delete diff --git a/config/helm/aws-node-termination-handler/templates/clusterrolebinding.yaml b/config/helm/aws-node-termination-handler/templates/clusterrolebinding.yaml new file mode 100644 index 00000000..b5c25327 --- /dev/null +++ b/config/helm/aws-node-termination-handler/templates/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "aws-node-termination-handler.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "aws-node-termination-handler.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ include "aws-node-termination-handler.fullname" . }} + apiGroup: rbac.authorization.k8s.io diff --git a/config/helm/aws-node-termination-handler/templates/daemonset.yaml b/config/helm/aws-node-termination-handler/templates/daemonset.yaml new file mode 100644 index 00000000..97e5a0d3 --- /dev/null +++ b/config/helm/aws-node-termination-handler/templates/daemonset.yaml @@ -0,0 +1,92 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "aws-node-termination-handler.fullname" . }} + labels: +{{ include "aws-node-termination-handler.labels" . | indent 4 }} +spec: + updateStrategy: +{{ toYaml .Values.updateStrategy | indent 4 }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + {{- if .Values.podAnnotations }} + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "aws-node-termination-handler.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + k8s-app: aws-node-termination-handler + spec: + priorityClassName: "{{ .Values.priorityClassName }}" + affinity: + nodeAffinity: + # NOTE(jaypipes): Change when we complete + # https://github.com/aws/aws-node-termination-handler/issues/8 + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "beta.kubernetes.io/os" + operator: In + values: + - linux + - key: "beta.kubernetes.io/arch" + operator: In + values: + - amd64 + serviceAccountName: {{ template "aws-node-termination-handler.serviceAccountName" . }} + containers: + - name: {{ include "aws-node-termination-handler.name" . }} + image: {{ .Values.image.repository}}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SPOT_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: DELETE_LOCAL_DATA + value: {{ .Values.deleteLocalData | quote }} + - name: IGNORE_DAEMON_SETS + value: {{ .Values.ignoreDaemonSets | quote }} + - name: GRACE_PERIOD + value: {{ .Values.gracePeriod | quote }} + - name: POD_TERMINATION_GRACE_PERIOD + value: {{ .Values.podTerminationGracePeriod | quote }} + - name: INSTANCE_METADATA_URL + value: {{ .Values.instanceMetadataURL | quote }} + - name: NODE_TERMINATION_GRACE_PERIOD + value: {{ .Values.nodeTerminationGracePeriod | quote }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/config/helm/aws-node-termination-handler/templates/psp.yaml b/config/helm/aws-node-termination-handler/templates/psp.yaml new file mode 100644 index 00000000..2db36502 --- /dev/null +++ b/config/helm/aws-node-termination-handler/templates/psp.yaml @@ -0,0 +1,57 @@ +{{- if .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "aws-node-termination-handler.fullname" . }} + labels: +{{ include "aws-node-termination-handler.labels" . | indent 4 }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' +spec: + privileged: false + hostIPC: false + hostNetwork: false + hostPID: false + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + allowedCapabilities: + - '*' + fsGroup: + rule: RunAsAny + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - '*' +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "aws-node-termination-handler.fullname" . }}-psp + labels: +{{ include "aws-node-termination-handler.labels" . | indent 4 }} +rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "aws-node-termination-handler.fullname" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "aws-node-termination-handler.fullname" . }}-psp + labels: +{{ include "aws-node-termination-handler.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "aws-node-termination-handler.fullname" . }}-psp +subjects: + - kind: ServiceAccount + name: {{ template "aws-node-termination-handler.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/config/helm/aws-node-termination-handler/templates/serviceaccount.yaml b/config/helm/aws-node-termination-handler/templates/serviceaccount.yaml new file mode 100644 index 00000000..8bf9f0f7 --- /dev/null +++ b/config/helm/aws-node-termination-handler/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "aws-node-termination-handler.serviceAccountName" . }} +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: +{{ include "aws-node-termination-handler.labels" . | indent 4 }} +{{- end -}} diff --git a/config/helm/aws-node-termination-handler/values.yaml b/config/helm/aws-node-termination-handler/values.yaml new file mode 100644 index 00000000..25454015 --- /dev/null +++ b/config/helm/aws-node-termination-handler/values.yaml @@ -0,0 +1,67 @@ +# Default values for aws-node-termination-handler. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + repository: amazon/aws-node-termination-handler + tag: v1.1.0 + pullPolicy: IfNotPresent + +nameOverride: "" +fullnameOverride: "" + +priorityClassName: system-node-critical + +podSecurityContext: {} + +podAnnotations: {} + +securityContext: + privileged: true + +resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + +# deleteLocalData tells kubectl to continue even if there are pods using +# emptyDir (local data that will be deleted when the node is drained). +deleteLocalData: false + +# ignoreDaemonSets causes kubectl to skip Daemon Set managed pods. +ignoreDaemonSets: true + +# gracePeriod (DEPRECATED - use podTerminationGracePeriod instead) is time in seconds given to each pod to terminate gracefully. +# If negative, the default value specified in the pod will be used. +gracePeriod: "" +podTerminationGracePeriod: "" + +# nodeTerminationGracePeriod specifies the period of time in seconds given to each NODE to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. +nodeTerminationGracePeriod: "" + +# instanceMetadataURL is used to override the default metadata URL (default: http://169.254.169.254:80) +instanceMetadataURL: "" + +# nodeSelector tells the daemonset where to place the node-termination-handler +# pods. By default, this value is empty and every node will receive a pod. +nodeSelector: {} + +tolerations: [] + +affinity: {} + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. If namenot set and create is true, + # a name is generated using fullname template + name: + annotations: {} + # eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME + +rbac: + # rbac.pspEnabled: `true` if PodSecurityPolicy resources should be created + pspEnabled: false diff --git a/test/helm-sync-test/run-helm-sync-test b/test/helm-sync-test/run-helm-sync-test new file mode 100755 index 00000000..59676f78 --- /dev/null +++ b/test/helm-sync-test/run-helm-sync-test @@ -0,0 +1,27 @@ +#!/bin/bash + +SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" +TEST_ID=$(uuidgen | cut -d'-' -f1 | tr '[:upper:]' '[:lower:]') +TMP_DIR=$SCRIPTPATH/../../build/helm-sync-$TEST_ID + +mkdir -p $TMP_DIR +cd $TMP_DIR +EXIT_CODE=0 + +git clone https://github.com/aws/eks-charts.git + +DIFF_OUTPUT=$(diff -r eks-charts/stable/aws-node-termination-handler $SCRIPTPATH/../../config/helm/aws-node-termination-handler) +DIFF_ECODE=$? +echo "$DIFF_OUTPUT" > $TMP_DIR/chart-diff.txt + +if [[ $DIFF_ECODE -eq 0 ]]; then + echo "✅ AWS Node Termination Handler helm chart is in-sync with the eks-charts repo!" +else + EXIT_CODE=1 + echo "===================================== DIFF ========================================================" + echo "$DIFF_OUTPUT" + echo "===================================== END DIFF ====================================================" + echo "❌ The Helm chart is NOT in-sync with the eks-charts repo. The diff is printed above. Please make a PR to eks-charts before merging this code." +fi + +exit $EXIT_CODE