diff --git a/incubator/elasticsearch/Chart.yaml b/incubator/elasticsearch/Chart.yaml index 9600a4e21b33..29ab88017a72 100755 --- a/incubator/elasticsearch/Chart.yaml +++ b/incubator/elasticsearch/Chart.yaml @@ -1,8 +1,10 @@ +# elasticsearch has been promoted to stable +deprecated: true name: elasticsearch home: https://www.elastic.co/products/elasticsearch -version: 1.10.1 +version: 1.10.2 appVersion: 6.4.2 -description: Flexible and powerful open source, distributed real-time search and analytics +description: DEPRECATED Flexible and powerful open source, distributed real-time search and analytics engine. icon: https://static-www.elastic.co/assets/blteb1c97719574938d/logo-elastic-elasticsearch-lt.svg sources: @@ -12,10 +14,3 @@ sources: - https://github.com/GoogleCloudPlatform/elasticsearch-docker - https://github.com/clockworksoul/helm-elasticsearch - https://github.com/pires/kubernetes-elasticsearch-cluster -maintainers: -- name: simonswine - email: christian@jetstack.io -- name: icereval - email: michael.haselton@gmail.com -- name: rendhalver - email: pete.brown@powerhrg.com diff --git a/incubator/elasticsearch/README.md b/incubator/elasticsearch/README.md index 07b44e144089..8526a73b9285 100644 --- a/incubator/elasticsearch/README.md +++ b/incubator/elasticsearch/README.md @@ -1,5 +1,7 @@ # Elasticsearch Helm Chart +**Note - this chart has been deprecated and [moved to stable](../../stable/elasticsearch)**. + This chart uses a standard Docker image of Elasticsearch (docker.elastic.co/elasticsearch/elasticsearch-oss) and uses a service pointing to the master's transport port for service discovery. Elasticsearch does not communicate with the Kubernetes API, hence no need for RBAC permissions. diff --git a/incubator/elasticsearch/templates/NOTES.txt b/incubator/elasticsearch/templates/NOTES.txt index c38cbd6ba75a..384ff9a305fb 100644 --- a/incubator/elasticsearch/templates/NOTES.txt +++ b/incubator/elasticsearch/templates/NOTES.txt @@ -1,5 +1,10 @@ The elasticsearch cluster has been installed. +*** +Please note that this chart has been deprecated and moved to stable. +Going forward please use the stable version of this chart. +*** + Elasticsearch can be accessed: * Within your cluster, at the following DNS name at port 9200: diff --git a/stable/elasticsearch/.helmignore b/stable/elasticsearch/.helmignore new file mode 100644 index 000000000000..f2256510cd7d --- /dev/null +++ b/stable/elasticsearch/.helmignore @@ -0,0 +1,3 @@ +.git +# OWNERS file for Kubernetes +OWNERS \ No newline at end of file diff --git a/stable/elasticsearch/Chart.yaml b/stable/elasticsearch/Chart.yaml new file mode 100755 index 000000000000..0396aaab0762 --- /dev/null +++ b/stable/elasticsearch/Chart.yaml @@ -0,0 +1,21 @@ +name: elasticsearch +home: https://www.elastic.co/products/elasticsearch +version: 1.11.0 +appVersion: 6.4.2 +description: Flexible and powerful open source, distributed real-time search and analytics + engine. +icon: https://static-www.elastic.co/assets/blteb1c97719574938d/logo-elastic-elasticsearch-lt.svg +sources: +- https://www.elastic.co/products/elasticsearch +- https://github.com/jetstack/elasticsearch-pet +- https://github.com/giantswarm/kubernetes-elastic-stack +- https://github.com/GoogleCloudPlatform/elasticsearch-docker +- https://github.com/clockworksoul/helm-elasticsearch +- https://github.com/pires/kubernetes-elasticsearch-cluster +maintainers: +- name: simonswine + email: christian@jetstack.io +- name: icereval + email: michael.haselton@gmail.com +- name: rendhalver + email: pete.brown@powerhrg.com diff --git a/stable/elasticsearch/OWNERS b/stable/elasticsearch/OWNERS new file mode 100644 index 000000000000..89705d69a6b3 --- /dev/null +++ b/stable/elasticsearch/OWNERS @@ -0,0 +1,8 @@ +approvers: +- simonswine +- icereval +- rendhalver +reviewers: +- simonswine +- icereval +- rendhalver diff --git a/stable/elasticsearch/README.md b/stable/elasticsearch/README.md new file mode 100644 index 000000000000..7d7345fc7e3e --- /dev/null +++ b/stable/elasticsearch/README.md @@ -0,0 +1,220 @@ +# Elasticsearch Helm Chart + +This chart uses a standard Docker image of Elasticsearch (docker.elastic.co/elasticsearch/elasticsearch-oss) and uses a service pointing to the master's transport port for service discovery. +Elasticsearch does not communicate with the Kubernetes API, hence no need for RBAC permissions. + +## Warning for previous users +If you are currently using an earlier version of this Chart you will need to redeploy your Elasticsearch clusters. The discovery method used here is incompatible with using RBAC. +If you are upgrading to Elasticsearch 6 from the 5.5 version used in this chart before, please note that your cluster needs to do a full cluster restart. +The simplest way to do that is to delete the installation (keep the PVs) and install this chart again with the new version. +If you want to avoid doing that upgrade to Elasticsearch 5.6 first before moving on to Elasticsearch 6.0. + +## Prerequisites Details + +* Kubernetes 1.6+ +* PV dynamic provisioning support on the underlying infrastructure + +## StatefulSets Details +* https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ + +## StatefulSets Caveats +* https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#limitations + +## Todo + +* Implement TLS/Auth/Security +* Smarter upscaling/downscaling +* Solution for memory locking + +## Chart Details +This chart will do the following: + +* Implemented a dynamically scalable elasticsearch cluster using Kubernetes StatefulSets/Deployments +* Multi-role deployment: master, client (coordinating) and data nodes +* Statefulset Supports scaling down without degrading the cluster + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/elasticsearch +``` + +## Deleting the Charts + +Delete the Helm deployment as normal + +``` +$ helm delete my-release +``` + +Deletion of the StatefulSet doesn't cascade to deleting associated PVCs. To delete them: + +``` +$ kubectl delete pvc -l release=my-release,component=data +``` + +## Configuration + +The following table lists the configurable parameters of the elasticsearch chart and their default values. + +| Parameter | Description | Default | +| ------------------------------------ | ------------------------------------------------------------------- | --------------------------------------------------- | +| `appVersion` | Application Version (Elasticsearch) | `6.4.2` | +| `image.repository` | Container image name | `docker.elastic.co/elasticsearch/elasticsearch-oss` | +| `image.tag` | Container image tag | `6.4.2` | +| `image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `initImage.repository` | Init container image name | `busybox` | +| `initImage.tag` | Init container image tag | `latest` | +| `initImage.pullPolicy` | Init container pull policy | `Always` | +| `cluster.name` | Cluster name | `elasticsearch` | +| `cluster.xpackEnable` | Writes the X-Pack configuration options to the configuration file | `false` | +| `cluster.config` | Additional cluster config appended | `{}` | +| `cluster.keystoreSecret` | Name of secret holding secure config options in an es keystore | `nil` | +| `cluster.env` | Cluster environment variables | `{MINIMUM_MASTER_NODES: "2"}` | +| `cluster.additionalJavaOpts` | Cluster parameters to be added to `ES_JAVA_OPTS` environment variable | `""` | +| `client.name` | Client component name | `client` | +| `client.replicas` | Client node replicas (deployment) | `2` | +| `client.resources` | Client node resources requests & limits | `{} - cpu limit must be an integer` | +| `client.priorityClassName` | Client priorityClass | `nil` | +| `client.heapSize` | Client node heap size | `512m` | +| `client.podAnnotations` | Client Deployment annotations | `{}` | +| `client.nodeSelector` | Node labels for client pod assignment | `{}` | +| `client.tolerations` | Client tolerations | `[]` | +| `client.serviceAnnotations` | Client Service annotations | `{}` | +| `client.serviceType` | Client service type | `ClusterIP` | +| `client.loadBalancerIP` | Client loadBalancerIP | `{}` | +| `client.loadBalancerSourceRanges` | Client loadBalancerSourceRanges | `{}` | +| `client.antiAffinity` | Client anti-affinity policy | `soft` | +| `client.nodeAffinity` | Client node affinity policy | `{}` | +| `master.exposeHttp` | Expose http port 9200 on master Pods for monitoring, etc | `false` | +| `master.name` | Master component name | `master` | +| `master.replicas` | Master node replicas (deployment) | `2` | +| `master.resources` | Master node resources requests & limits | `{} - cpu limit must be an integer` | +| `master.priorityClassName` | Master priorityClass | `nil` | +| `master.podAnnotations` | Master Deployment annotations | `{}` | +| `master.nodeSelector` | Node labels for master pod assignment | `{}` | +| `master.tolerations` | Master tolerations | `[]` | +| `master.heapSize` | Master node heap size | `512m` | +| `master.name` | Master component name | `master` | +| `master.persistence.enabled` | Master persistent enabled/disabled | `true` | +| `master.persistence.name` | Master statefulset PVC template name | `data` | +| `master.persistence.size` | Master persistent volume size | `4Gi` | +| `master.persistence.storageClass` | Master persistent volume Class | `nil` | +| `master.persistence.accessMode` | Master persistent Access Mode | `ReadWriteOnce` | +| `master.antiAffinity` | Master anti-affinity policy | `soft` | +| `master.nodeAffinity` | Master node affinity policy | `{}` | +| `data.exposeHttp` | Expose http port 9200 on data Pods for monitoring, etc | `false` | +| `data.replicas` | Data node replicas (statefulset) | `2` | +| `data.resources` | Data node resources requests & limits | `{} - cpu limit must be an integer` | +| `data.priorityClassName` | Data priorityClass | `nil` | +| `data.heapSize` | Data node heap size | `1536m` | +| `data.persistence.enabled` | Data persistent enabled/disabled | `true` | +| `data.persistence.name` | Data statefulset PVC template name | `data` | +| `data.persistence.size` | Data persistent volume size | `30Gi` | +| `data.persistence.storageClass` | Data persistent volume Class | `nil` | +| `data.persistence.accessMode` | Data persistent Access Mode | `ReadWriteOnce` | +| `data.podAnnotations` | Data StatefulSet annotations | `{}` | +| `data.nodeSelector` | Node labels for data pod assignment | `{}` | +| `data.tolerations` | Data tolerations | `[]` | +| `data.terminationGracePeriodSeconds` | Data termination grace period (seconds) | `3600` | +| `data.antiAffinity` | Data anti-affinity policy | `soft` | +| `data.nodeAffinity` | Data node affinity policy | `{}` | +| `extraInitContainers` | Additional init container passed through the tpl | `` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +In terms of Memory resources you should make sure that you follow that equation: + +- `${role}HeapSize < ${role}MemoryRequests < ${role}MemoryLimits` + +The YAML value of cluster.config is appended to elasticsearch.yml file for additional customization ("script.inline: on" for example to allow inline scripting) + +# Deep dive + +## Application Version + +This chart aims to support Elasticsearch v2 to v6 deployments by specifying the `values.yaml` parameter `appVersion`. + +### Version Specific Features + +* Memory Locking *(variable renamed)* +* Ingest Node *(v5)* +* X-Pack Plugin *(v5)* + +Upgrade paths & more info: https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html + +## Mlocking + +This is a limitation in kubernetes right now. There is no way to raise the +limits of lockable memory, so that these memory areas won't be swapped. This +would degrade performance heavily. The issue is tracked in +[kubernetes/#3595](https://github.com/kubernetes/kubernetes/issues/3595). + +``` +[WARN ][bootstrap] Unable to lock JVM Memory: error=12,reason=Cannot allocate memory +[WARN ][bootstrap] This can result in part of the JVM being swapped out. +[WARN ][bootstrap] Increase RLIMIT_MEMLOCK, soft limit: 65536, hard limit: 65536 +``` + +## Minimum Master Nodes +> The minimum_master_nodes setting is extremely important to the stability of your cluster. This setting helps prevent split brains, the existence of two masters in a single cluster. + +>When you have a split brain, your cluster is at danger of losing data. Because the master is considered the supreme ruler of the cluster, it decides when new indices can be created, how shards are moved, and so forth. If you have two masters, data integrity becomes perilous, since you have two nodes that think they are in charge. + +>This setting tells Elasticsearch to not elect a master unless there are enough master-eligible nodes available. Only then will an election take place. + +>This setting should always be configured to a quorum (majority) of your master-eligible nodes. A quorum is (number of master-eligible nodes / 2) + 1 + +More info: https://www.elastic.co/guide/en/elasticsearch/guide/1.x/_important_configuration_changes.html#_minimum_master_nodes + +# Client and Coordinating Nodes + +Elasticsearch v5 terminology has updated, and now refers to a `Client Node` as a `Coordinating Node`. + +More info: https://www.elastic.co/guide/en/elasticsearch/reference/5.5/modules-node.html#coordinating-node + +## Enabling elasticsearch interal monitoring +Requires version 6.3+ and standard non `oss` repository defined. Starting with 6.3 Xpack is partially free and enabled by default. You need to set a new config to enable the collection of these internal metrics. (https://www.elastic.co/guide/en/elasticsearch/reference/6.3/monitoring-settings.html) + +To do this through this helm chart override with the three following changes: +``` +image.repository: docker.elastic.co/elasticsearch/elasticsearch +cluster.xpackEnable: true +cluster.env.XPACK_MONITORING_ENABLED: true +``` + +Note: to see these changes you will need to update your kibana repo to `image.repository: docker.elastic.co/kibana/kibana` instead of the `oss` version + + +## Select right storage class for SSD volumes + +### GCE + Kubernetes 1.5 + +Create StorageClass for SSD-PD + +``` +$ kubectl create -f - < >(tee -a "/var/log/elasticsearch-hooks.log") + NODE_NAME=${HOSTNAME} + echo "Prepare to migrate data of the node ${NODE_NAME}" + echo "Move all data from node ${NODE_NAME}" + curl -s -XPUT -H 'Content-Type: application/json' '{{ template "elasticsearch.client.fullname" . }}:9200/_cluster/settings' -d "{ + \"transient\" :{ + \"cluster.routing.allocation.exclude._name\" : \"${NODE_NAME}\" + } + }" + echo "" + + while true ; do + echo -e "Wait for node ${NODE_NAME} to become empty" + SHARDS_ALLOCATION=$(curl -s -XGET 'http://{{ template "elasticsearch.client.fullname" . }}:9200/_cat/shards') + if ! echo "${SHARDS_ALLOCATION}" | grep -E "${NODE_NAME}"; then + break + fi + sleep 1 + done + echo "Node ${NODE_NAME} is ready to shutdown" + post-start-hook.sh: |- + #!/bin/bash + exec &> >(tee -a "/var/log/elasticsearch-hooks.log") + NODE_NAME=${HOSTNAME} + CLUSTER_SETTINGS=$(curl -s -XGET "http://{{ template "elasticsearch.client.fullname" . }}:9200/_cluster/settings") + if echo "${CLUSTER_SETTINGS}" | grep -E "${NODE_NAME}"; then + echo "Activate node ${NODE_NAME}" + curl -s -XPUT -H 'Content-Type: application/json' "http://{{ template "elasticsearch.client.fullname" . }}:9200/_cluster/settings" -d "{ + \"transient\" :{ + \"cluster.routing.allocation.exclude._name\" : null + } + }" + fi + echo "Node ${NODE_NAME} is ready to be used" diff --git a/stable/elasticsearch/templates/data-pdb.yaml b/stable/elasticsearch/templates/data-pdb.yaml new file mode 100644 index 000000000000..54e91c75c102 --- /dev/null +++ b/stable/elasticsearch/templates/data-pdb.yaml @@ -0,0 +1,24 @@ +{{- if .Values.data.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "elasticsearch.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.data.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "elasticsearch.data.fullname" . }} +spec: +{{- if .Values.data.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.data.podDisruptionBudget.minAvailable }} +{{- end }} +{{- if .Values.data.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.data.podDisruptionBudget.maxUnavailable }} +{{- end }} + selector: + matchLabels: + app: {{ template "elasticsearch.name" . }} + component: "{{ .Values.data.name }}" + release: {{ .Release.Name }} +{{- end }} diff --git a/stable/elasticsearch/templates/data-statefulset.yaml b/stable/elasticsearch/templates/data-statefulset.yaml new file mode 100644 index 000000000000..d7ae76d6d753 --- /dev/null +++ b/stable/elasticsearch/templates/data-statefulset.yaml @@ -0,0 +1,198 @@ +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + labels: + app: {{ template "elasticsearch.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.data.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "elasticsearch.data.fullname" . }} +spec: + serviceName: {{ template "elasticsearch.data.fullname" . }} + replicas: {{ .Values.data.replicas }} + template: + metadata: + labels: + app: {{ template "elasticsearch.name" . }} + component: "{{ .Values.data.name }}" + release: {{ .Release.Name }} + {{- if .Values.data.podAnnotations }} + annotations: +{{ toYaml .Values.data.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- if .Values.data.priorityClassName }} + priorityClassName: "{{ .Values.data.priorityClassName }}" +{{- end }} + securityContext: + fsGroup: 1000 + {{- if or .Values.data.antiAffinity .Values.data.nodeAffinity }} + affinity: + {{- end }} + {{- if eq .Values.data.antiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "elasticsearch.name" . }}" + release: "{{ .Release.Name }}" + component: "{{ .Values.data.name }}" + {{- else if eq .Values.data.antiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "elasticsearch.name" . }}" + release: "{{ .Release.Name }}" + component: "{{ .Values.data.name }}" + {{- end }} + {{- with .Values.data.nodeAffinity }} + nodeAffinity: +{{ toYaml . | indent 10 }} + {{- end }} +{{- if .Values.data.nodeSelector }} + nodeSelector: +{{ toYaml .Values.data.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.data.tolerations }} + tolerations: +{{ toYaml .Values.data.tolerations | indent 8 }} +{{- end }} + initContainers: + # see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html + # and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall + - name: "sysctl" + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: {{ .Values.initImage.pullPolicy | quote }} + command: ["sysctl", "-w", "vm.max_map_count=262144"] + securityContext: + privileged: true + - name: "chown" + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - chown -R elasticsearch:elasticsearch /usr/share/elasticsearch/data && + chown -R elasticsearch:elasticsearch /usr/share/elasticsearch/logs + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: /usr/share/elasticsearch/data + name: data +{{- if .Values.extraInitContainers }} +{{ tpl .Values.extraInitContainers . | indent 6 }} +{{- end }} + containers: + - name: elasticsearch + env: + - name: DISCOVERY_SERVICE + value: {{ template "elasticsearch.fullname" . }}-discovery + - name: NODE_MASTER + value: "false" + - name: PROCESSORS + valueFrom: + resourceFieldRef: + resource: limits.cpu + - name: ES_JAVA_OPTS + value: "-Djava.net.preferIPv4Stack=true -Xms{{ .Values.data.heapSize }} -Xmx{{ .Values.data.heapSize }} {{ .Values.cluster.additionalJavaOpts }}" + {{- range $key, $value := .Values.cluster.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + ports: + - containerPort: 9300 + name: transport +{{ if .Values.data.exposeHttp }} + - containerPort: 9200 + name: http +{{ end }} + resources: +{{ toYaml .Values.data.resources | indent 12 }} + readinessProbe: + httpGet: + path: /_cluster/health?local=true + port: 9200 + initialDelaySeconds: 5 + volumeMounts: + - mountPath: /usr/share/elasticsearch/data + name: data + - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml + name: config + subPath: elasticsearch.yml +{{- if hasPrefix "2." .Values.image.tag }} + - mountPath: /usr/share/elasticsearch/config/logging.yml + name: config + subPath: logging.yml +{{- end }} +{{- if hasPrefix "5." .Values.image.tag }} + - mountPath: /usr/share/elasticsearch/config/log4j2.properties + name: config + subPath: log4j2.properties +{{- end }} + - name: config + mountPath: /pre-stop-hook.sh + subPath: pre-stop-hook.sh + - name: config + mountPath: /post-start-hook.sh + subPath: post-start-hook.sh +{{- if .Values.cluster.keystoreSecret }} + - name: keystore + mountPath: "/usr/share/elasticsearch/config/elasticsearch.keystore" + subPath: elasticsearch.keystore + readOnly: true +{{- end }} + lifecycle: + preStop: + exec: + command: ["/bin/bash","/pre-stop-hook.sh"] + postStart: + exec: + command: ["/bin/bash","/post-start-hook.sh"] + terminationGracePeriodSeconds: {{ .Values.data.terminationGracePeriodSeconds }} +{{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range $pullSecret := .Values.image.pullSecrets }} + - name: {{ $pullSecret }} + {{- end }} +{{- end }} + volumes: + - name: config + configMap: + name: {{ template "elasticsearch.fullname" . }} +{{- if .Values.cluster.keystoreSecret }} + - name: keystore + secret: + secretName: {{ .Values.cluster.keystoreSecret }} +{{- end }} + {{- if not .Values.data.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + updateStrategy: + type: {{ .Values.data.updateStrategy.type }} + {{- if .Values.data.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: {{ .Values.data.persistence.name }} + spec: + accessModes: + - {{ .Values.data.persistence.accessMode | quote }} + {{- if .Values.data.persistence.storageClass }} + {{- if (eq "-" .Values.data.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.data.persistence.storageClass }}" + {{- end }} + {{- end }} + resources: + requests: + storage: "{{ .Values.data.persistence.size }}" + {{- end }} diff --git a/stable/elasticsearch/templates/master-pdb.yaml b/stable/elasticsearch/templates/master-pdb.yaml new file mode 100644 index 000000000000..c3efe8350476 --- /dev/null +++ b/stable/elasticsearch/templates/master-pdb.yaml @@ -0,0 +1,24 @@ +{{- if .Values.master.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "elasticsearch.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.master.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "elasticsearch.master.fullname" . }} +spec: +{{- if .Values.master.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.master.podDisruptionBudget.minAvailable }} +{{- end }} +{{- if .Values.master.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.master.podDisruptionBudget.maxUnavailable }} +{{- end }} + selector: + matchLabels: + app: {{ template "elasticsearch.name" . }} + component: "{{ .Values.master.name }}" + release: {{ .Release.Name }} +{{- end }} diff --git a/stable/elasticsearch/templates/master-statefulset.yaml b/stable/elasticsearch/templates/master-statefulset.yaml new file mode 100644 index 000000000000..6530b00611ed --- /dev/null +++ b/stable/elasticsearch/templates/master-statefulset.yaml @@ -0,0 +1,188 @@ +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + labels: + app: {{ template "elasticsearch.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.master.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "elasticsearch.master.fullname" . }} +spec: + serviceName: {{ template "elasticsearch.master.fullname" . }} + replicas: {{ .Values.master.replicas }} + template: + metadata: + labels: + app: {{ template "elasticsearch.name" . }} + component: "{{ .Values.master.name }}" + release: {{ .Release.Name }} + {{- if .Values.master.podAnnotations }} + annotations: +{{ toYaml .Values.master.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- if .Values.master.priorityClassName }} + priorityClassName: "{{ .Values.master.priorityClassName }}" +{{- end }} + securityContext: + fsGroup: 1000 + {{- if or .Values.master.antiAffinity .Values.master.nodeAffinity }} + affinity: + {{- end }} + {{- if eq .Values.master.antiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "elasticsearch.name" . }}" + release: "{{ .Release.Name }}" + component: "{{ .Values.master.name }}" + {{- else if eq .Values.master.antiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "elasticsearch.name" . }}" + release: "{{ .Release.Name }}" + component: "{{ .Values.master.name }}" + {{- end }} + {{- with .Values.master.nodeAffinity }} + nodeAffinity: +{{ toYaml . | indent 10 }} + {{- end }} +{{- if .Values.master.nodeSelector }} + nodeSelector: +{{ toYaml .Values.master.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.master.tolerations }} + tolerations: +{{ toYaml .Values.master.tolerations | indent 8 }} +{{- end }} + initContainers: + # see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html + # and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall + - name: "sysctl" + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: {{ .Values.initImage.pullPolicy | quote }} + command: ["sysctl", "-w", "vm.max_map_count=262144"] + securityContext: + privileged: true + - name: "chown" + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - /bin/bash + - -c + - chown -R elasticsearch:elasticsearch /usr/share/elasticsearch/data && + chown -R elasticsearch:elasticsearch /usr/share/elasticsearch/logs + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: /usr/share/elasticsearch/data + name: data +{{- if .Values.extraInitContainers }} +{{ tpl .Values.extraInitContainers . | indent 6 }} +{{- end }} + containers: + - name: elasticsearch + env: + - name: NODE_DATA + value: "false" +{{- if hasPrefix "5." .Values.appVersion }} + - name: NODE_INGEST + value: "false" +{{- end }} + - name: DISCOVERY_SERVICE + value: {{ template "elasticsearch.fullname" . }}-discovery + - name: PROCESSORS + valueFrom: + resourceFieldRef: + resource: limits.cpu + - name: ES_JAVA_OPTS + value: "-Djava.net.preferIPv4Stack=true -Xms{{ .Values.master.heapSize }} -Xmx{{ .Values.master.heapSize }} {{ .Values.cluster.additionalJavaOpts }}" + {{- range $key, $value := .Values.cluster.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + resources: +{{ toYaml .Values.master.resources | indent 12 }} + readinessProbe: + httpGet: + path: /_cluster/health?local=true + port: 9200 + initialDelaySeconds: 5 + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + ports: + - containerPort: 9300 + name: transport +{{ if .Values.master.exposeHttp }} + - containerPort: 9200 + name: http +{{ end }} + volumeMounts: + - mountPath: /usr/share/elasticsearch/data + name: data + - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml + name: config + subPath: elasticsearch.yml +{{- if hasPrefix "2." .Values.image.tag }} + - mountPath: /usr/share/elasticsearch/config/logging.yml + name: config + subPath: logging.yml +{{- end }} +{{- if hasPrefix "5." .Values.image.tag }} + - mountPath: /usr/share/elasticsearch/config/log4j2.properties + name: config + subPath: log4j2.properties +{{- end }} +{{- if .Values.cluster.keystoreSecret }} + - name: keystore + mountPath: "/usr/share/elasticsearch/config/elasticsearch.keystore" + subPath: elasticsearch.keystore + readOnly: true +{{- end }} +{{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range $pullSecret := .Values.image.pullSecrets }} + - name: {{ $pullSecret }} + {{- end }} +{{- end }} + volumes: + - name: config + configMap: + name: {{ template "elasticsearch.fullname" . }} +{{- if .Values.cluster.keystoreSecret }} + - name: keystore + secret: + secretName: {{ .Values.cluster.keystoreSecret }} +{{- end }} + {{- if not .Values.master.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + updateStrategy: + type: {{ .Values.master.updateStrategy.type }} + {{- if .Values.master.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: {{ .Values.master.persistence.name }} + spec: + accessModes: + - {{ .Values.master.persistence.accessMode | quote }} + {{- if .Values.master.persistence.storageClass }} + {{- if (eq "-" .Values.master.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.master.persistence.storageClass }}" + {{- end }} + {{- end }} + resources: + requests: + storage: "{{ .Values.master.persistence.size }}" + {{ end }} diff --git a/stable/elasticsearch/templates/master-svc.yaml b/stable/elasticsearch/templates/master-svc.yaml new file mode 100644 index 000000000000..5db28b7f874d --- /dev/null +++ b/stable/elasticsearch/templates/master-svc.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: {{ template "elasticsearch.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.master.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "elasticsearch.fullname" . }}-discovery +spec: + clusterIP: None + ports: + - port: 9300 + targetPort: transport + selector: + app: {{ template "elasticsearch.name" . }} + component: "{{ .Values.master.name }}" + release: {{ .Release.Name }} diff --git a/stable/elasticsearch/values.yaml b/stable/elasticsearch/values.yaml new file mode 100644 index 000000000000..fb8d0e21a93d --- /dev/null +++ b/stable/elasticsearch/values.yaml @@ -0,0 +1,134 @@ +# Default values for elasticsearch. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +appVersion: "6.4.2" + +image: + repository: "docker.elastic.co/elasticsearch/elasticsearch-oss" + tag: "6.4.2" + pullPolicy: "IfNotPresent" + # If specified, use these secrets to access the image + # pullSecrets: + # - registry-secret + +initImage: + repository: "busybox" + tag: "latest" + pullPolicy: "Always" + +cluster: + name: "elasticsearch" + # If you want X-Pack installed, switch to an image that includes it, enable this option and toggle the features you want + # enabled in the environment variables outlined in the README + xpackEnable: false + # Some settings must be placed in a keystore, so they need to be mounted in from a secret. + # Use this setting to specify the name of the secret + # keystoreSecret: eskeystore + config: {} + # Custom parameters, as string, to be added to ES_JAVA_OPTS environment variable + additionalJavaOpts: "" + env: + # IMPORTANT: https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#minimum_master_nodes + # To prevent data loss, it is vital to configure the discovery.zen.minimum_master_nodes setting so that each master-eligible + # node knows the minimum number of master-eligible nodes that must be visible in order to form a cluster. + MINIMUM_MASTER_NODES: "2" + +client: + name: client + replicas: 2 + serviceType: ClusterIP + loadBalancerIP: {} + loadBalancerSourceRanges: {} +## (dict) If specified, apply these annotations to the client service +# serviceAnnotations: +# example: client-svc-foo + heapSize: "512m" + antiAffinity: "soft" + nodeAffinity: {} + nodeSelector: {} + tolerations: [] + resources: + limits: + cpu: "1" + # memory: "1024Mi" + requests: + cpu: "25m" + memory: "512Mi" + priorityClassName: "" + ## (dict) If specified, apply these annotations to each client Pod + # podAnnotations: + # example: client-foo + podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 + +master: + name: master + exposeHttp: false + replicas: 3 + heapSize: "512m" + persistence: + enabled: true + accessMode: ReadWriteOnce + name: data + size: "4Gi" + # storageClass: "ssd" + antiAffinity: "soft" + nodeAffinity: {} + nodeSelector: {} + tolerations: [] + resources: + limits: + cpu: "1" + # memory: "1024Mi" + requests: + cpu: "25m" + memory: "512Mi" + priorityClassName: "" + ## (dict) If specified, apply these annotations to each master Pod + # podAnnotations: + # example: master-foo + podDisruptionBudget: + enabled: false + minAvailable: 2 # Same as `cluster.env.MINIMUM_MASTER_NODES` + # maxUnavailable: 1 + updateStrategy: + type: OnDelete + +data: + name: data + exposeHttp: false + replicas: 2 + heapSize: "1536m" + persistence: + enabled: true + accessMode: ReadWriteOnce + name: data + size: "30Gi" + # storageClass: "ssd" + terminationGracePeriodSeconds: 3600 + antiAffinity: "soft" + nodeAffinity: {} + nodeSelector: {} + tolerations: [] + resources: + limits: + cpu: "1" + # memory: "2048Mi" + requests: + cpu: "25m" + memory: "1536Mi" + priorityClassName: "" + ## (dict) If specified, apply these annotations to each data Pod + # podAnnotations: + # example: data-foo + podDisruptionBudget: + enabled: false + # minAvailable: 1 + maxUnavailable: 1 + updateStrategy: + type: OnDelete + +## Additional init containers +extraInitContainers: |