diff --git a/applications/elasticsearch/README.md b/applications/elasticsearch/README.md new file mode 100644 index 000000000..ebc7d9929 --- /dev/null +++ b/applications/elasticsearch/README.md @@ -0,0 +1,3 @@ +# ElasticSearch deployment + +Based on https://github.com/elastic/helm-charts/tree/main/elasticsearch \ No newline at end of file diff --git a/applications/elasticsearch/deploy/charts/.helmignore b/applications/elasticsearch/deploy/charts/.helmignore new file mode 100644 index 000000000..e12c0b4b9 --- /dev/null +++ b/applications/elasticsearch/deploy/charts/.helmignore @@ -0,0 +1,2 @@ +tests/ +.pytest_cache/ diff --git a/applications/elasticsearch/deploy/charts/Chart.yaml b/applications/elasticsearch/deploy/charts/Chart.yaml new file mode 100644 index 000000000..713c6de81 --- /dev/null +++ b/applications/elasticsearch/deploy/charts/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +description: Official Elastic helm chart for Elasticsearch +home: https://github.com/elastic/helm-charts +maintainers: + - email: helm-charts@elastic.co + name: Elastic +name: elasticsearch +version: 8.5.1 +appVersion: 8.5.1 +sources: + - https://github.com/elastic/elasticsearch +icon: https://helm.elastic.co/icons/elasticsearch.png diff --git a/applications/elasticsearch/deploy/charts/Makefile b/applications/elasticsearch/deploy/charts/Makefile new file mode 100644 index 000000000..22218a1f6 --- /dev/null +++ b/applications/elasticsearch/deploy/charts/Makefile @@ -0,0 +1 @@ +include ../helpers/common.mk diff --git a/applications/elasticsearch/deploy/charts/README.md b/applications/elasticsearch/deploy/charts/README.md new file mode 100644 index 000000000..a4948bda4 --- /dev/null +++ b/applications/elasticsearch/deploy/charts/README.md @@ -0,0 +1,490 @@ +# Elasticsearch Helm Chart + +[![Build Status](https://img.shields.io/jenkins/s/https/devops-ci.elastic.co/job/elastic+helm-charts+main.svg)](https://devops-ci.elastic.co/job/elastic+helm-charts+main/) [![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/elastic)](https://artifacthub.io/packages/search?repo=elastic) + +This Helm chart is a lightweight way to configure and run our official +[Elasticsearch Docker image][]. + +> **Warning** +> When it comes to running the Elastic on Kubernetes infrastructure, we +> recommend [Elastic Cloud on Kubernetes][] (ECK) as the best way to run and manage +> the Elastic Stack. +> +> ECK offers many operational benefits for both our basic-tier and our +> enterprise-tier customers, such as spinning up cluster nodes that were lost on +> failed infrastructure, seamless upgrades, rolling cluster changes, and much +> much more. +> +> With the release of the Elastic Stack Helm charts for Elastic version 8.5.1, +> we are handing over the ongoing maintenance of our Elastic Stack Helm charts +> to the community and contributors. This repository will finally be archived +> after 6 months time. Elastic Stacks deployed on Kubernetes through Helm charts +> will still be fully supported under EOL limitations. +> +> Since we want to provide an even better experience for our customers by +> running the Elastic Stack on Kubernetes, we will continue maintaining the +> Helm charts applicable to ECK Custom Resources. These charts can be found in +> the [ECK repository][eck-charts]. +> +> Helm charts will currently be maintained for ECK Enterprise-tier customers, +> however, we encourage the community to engage with the existing Helm charts +> for the Elastic Stack and continue supporting their ongoing maintenance. +> +> See for more details. + + + + + +- [Requirements](#requirements) +- [Installing](#installing) + - [Install a released version using the Helm repository](#install-a-released-version-using-the-helm-repository) + - [Install a development version using the main branch](#install-a-development-version-using-the-main-branch) +- [Upgrading](#upgrading) +- [Usage notes](#usage-notes) +- [Configuration](#configuration) +- [FAQ](#faq) + - [How to deploy this chart on a specific K8S distribution?](#how-to-deploy-this-chart-on-a-specific-k8s-distribution) + - [How to deploy dedicated nodes types?](#how-to-deploy-dedicated-nodes-types) + - [Coordinating nodes](#coordinating-nodes) + - [Clustering and Node Discovery](#clustering-and-node-discovery) + - [How to deploy clusters with security (authentication and TLS) enabled?](#how-to-deploy-clusters-with-security-authentication-and-tls-enabled) + - [How to migrate from helm/charts stable chart?](#how-to-migrate-from-helmcharts-stable-chart) + - [How to install plugins?](#how-to-install-plugins) + - [How to use the keystore?](#how-to-use-the-keystore) + - [Basic example](#basic-example) + - [Multiple keys](#multiple-keys) + - [Custom paths and keys](#custom-paths-and-keys) + - [How to enable snapshotting?](#how-to-enable-snapshotting) + - [How to configure templates post-deployment?](#how-to-configure-templates-post-deployment) +- [Contributing](#contributing) + + + + + + +## Requirements + +* Minimum cluster requirements include the following to run this chart with +default settings. All of these settings are configurable. + * Three Kubernetes nodes to respect the default "hard" affinity settings + * 1GB of RAM for the JVM heap + +See [supported configurations][] for more details. + + +## Installing + +### Install a released version using the Helm repository + +* Add the Elastic Helm charts repo: +`helm repo add elastic https://helm.elastic.co` + +* Install it: `helm install elasticsearch elastic/elasticsearch` + +### Install a development version using the main branch + +* Clone the git repo: `git clone git@github.com:elastic/helm-charts.git` + +* Install it: `helm install elasticsearch ./helm-charts/elasticsearch --set imageTag=8.5.1` + +## Upgrading + +Please always check [CHANGELOG.md][] and [BREAKING_CHANGES.md][] before +upgrading to a new chart version. + + +## Usage notes + +* This repo includes several [examples][] of configurations that can be used +as a reference. They are also used in the automated testing of this chart. +* Automated testing of this chart is currently only run against GKE (Google +Kubernetes Engine). +* The chart deploys a StatefulSet and by default will do an automated rolling +update of your cluster. It does this by waiting for the cluster health to become +green after each instance is updated. If you prefer to update manually you can +set `OnDelete` [updateStrategy][]. +* It is important to verify that the JVM heap size in `esJavaOpts` and to set +the CPU/Memory `resources` to something suitable for your cluster. +* To simplify chart and maintenance each set of node groups is deployed as a +separate Helm release. Take a look at the [multi][] example to get an idea for +how this works. Without doing this it isn't possible to resize persistent +volumes in a StatefulSet. By setting it up this way it makes it possible to add +more nodes with a new storage size then drain the old ones. It also solves the +problem of allowing the user to determine which node groups to update first when +doing upgrades or changes. +* We have designed this chart to be very un-opinionated about how to configure +Elasticsearch. It exposes ways to set environment variables and mount secrets +inside of the container. Doing this makes it much easier for this chart to +support multiple versions with minimal changes. + + +## Configuration + +| Parameter | Description | Default | +|------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------| +| `antiAffinityTopologyKey` | The [anti-affinity][] topology key. By default this will prevent multiple Elasticsearch nodes from running on the same Kubernetes node | `kubernetes.io/hostname` | +| `antiAffinity` | Setting this to hard enforces the [anti-affinity][] rules. If it is set to soft it will be done "best effort". Other values will be ignored | `hard` | +| `clusterHealthCheckParams` | The [Elasticsearch cluster health status params][] that will be used by readiness [probe][] command | `wait_for_status=green&timeout=1s` | +| `clusterName` | This will be used as the Elasticsearch [cluster.name][] and should be unique per cluster in the namespace | `elasticsearch` | +| `createCert` | This will automatically create the SSL certificates | `true` | +| `enableServiceLinks` | Set to false to disabling service links, which can cause slow pod startup times when there are many services in the current namespace. | `true` | +| `envFrom` | Templatable string to be passed to the [environment from variables][] which will be appended to the `envFrom:` definition for the container | `[]` | +| `esConfig` | Allows you to add any config files in `/usr/share/elasticsearch/config/` such as `elasticsearch.yml` and `log4j2.properties`. See [values.yaml][] for an example of the formatting | `{}` | +| `esJavaOpts` | [Java options][] for Elasticsearch. This is where you could configure the [jvm heap size][] | `""` | +| `esJvmOptions` | [Java options][] for Elasticsearch. Override the default JVM options by adding custom options files . See [values.yaml][] for an example of the formatting | `{}` | +| `esMajorVersion` | Deprecated. Instead, use the version of the chart corresponding to your ES minor version. Used to set major version specific configuration. If you are using a custom image and not running the default Elasticsearch version you will need to set this to the version you are running (e.g. `esMajorVersion: 6`) | `""` | +| `extraContainers` | Templatable string of additional `containers` to be passed to the `tpl` function | `""` | +| `extraEnvs` | Extra [environment variables][] which will be appended to the `env:` definition for the container | `[]` | +| `extraInitContainers` | Templatable string of additional `initContainers` to be passed to the `tpl` function | `""` | +| `extraVolumeMounts` | Templatable string of additional `volumeMounts` to be passed to the `tpl` function | `""` | +| `extraVolumes` | Templatable string of additional `volumes` to be passed to the `tpl` function | `""` | +| `fullnameOverride` | Overrides the `clusterName` and `nodeGroup` when used in the naming of resources. This should only be used when using a single `nodeGroup`, otherwise you will have name conflicts | `""` | +| `healthNameOverride` | Overrides `test-elasticsearch-health` pod name | `""` | +| `hostAliases` | Configurable [hostAliases][] | `[]` | +| `httpPort` | The http port that Kubernetes will use for the healthchecks and the service. If you change this you will also need to set [http.port][] in `extraEnvs` | `9200` | +| `imagePullPolicy` | The Kubernetes [imagePullPolicy][] value | `IfNotPresent` | +| `imagePullSecrets` | Configuration for [imagePullSecrets][] so that you can use a private registry for your image | `[]` | +| `imageTag` | The Elasticsearch Docker image tag | `8.5.1` | +| `image` | The Elasticsearch Docker image | `docker.elastic.co/elasticsearch/elasticsearch` | +| `ingress` | Configurable [ingress][] to expose the Elasticsearch service. See [values.yaml][] for an example | see [values.yaml][] | +| `initResources` | Allows you to set the [resources][] for the `initContainer` in the StatefulSet | `{}` | +| `keystore` | Allows you map Kubernetes secrets into the keystore. See the [config example][] and [how to use the keystore][] | `[]` | +| `labels` | Configurable [labels][] applied to all Elasticsearch pods | `{}` | +| `lifecycle` | Allows you to add [lifecycle hooks][]. See [values.yaml][] for an example of the formatting | `{}` | +| `masterService` | The service name used to connect to the masters. You only need to set this if your master `nodeGroup` is set to something other than `master`. See [Clustering and Node Discovery][] for more information | `""` | +| `maxUnavailable` | The [maxUnavailable][] value for the pod disruption budget. By default this will prevent Kubernetes from having more than 1 unhealthy pod in the node group | `1` | +| `minimumMasterNodes` | The value for [discovery.zen.minimum_master_nodes][]. Should be set to `(master_eligible_nodes / 2) + 1`. Ignored in Elasticsearch versions >= 7 | `2` | +| `nameOverride` | Overrides the `clusterName` when used in the naming of resources | `""` | +| `networkHost` | Value for the [network.host Elasticsearch setting][] | `0.0.0.0` | +| `networkPolicy` | The [NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) to set. See [`values.yaml`](./values.yaml) for an example | `{http.enabled: false,transport.enabled: false}` | +| `nodeAffinity` | Value for the [node affinity settings][] | `{}` | +| `nodeGroup` | This is the name that will be used for each group of nodes in the cluster. The name will be `clusterName-nodeGroup-X` , `nameOverride-nodeGroup-X` if a `nameOverride` is specified, and `fullnameOverride-X` if a `fullnameOverride` is specified | `master` | +| `nodeSelector` | Configurable [nodeSelector][] so that you can target specific nodes for your Elasticsearch cluster | `{}` | +| `persistence` | Enables a persistent volume for Elasticsearch data. Can be disabled for nodes that only have [roles][] which don't require persistent data | see [values.yaml][] | +| `podAnnotations` | Configurable [annotations][] applied to all Elasticsearch pods | `{}` | +| `podManagementPolicy` | By default Kubernetes [deploys StatefulSets serially][]. This deploys them in parallel so that they can discover each other | `Parallel` | +| `podSecurityContext` | Allows you to set the [securityContext][] for the pod | see [values.yaml][] | +| `podSecurityPolicy` | Configuration for create a pod security policy with minimal permissions to run this Helm chart with `create: true`. Also can be used to reference an external pod security policy with `name: "externalPodSecurityPolicy"` | see [values.yaml][] | +| `priorityClassName` | The name of the [PriorityClass][]. No default is supplied as the PriorityClass must be created first | `""` | +| `protocol` | The protocol that will be used for the readiness [probe][]. Change this to `https` if you have `xpack.security.http.ssl.enabled` set | `http` | +| `rbac` | Configuration for creating a role, role binding and ServiceAccount as part of this Helm chart with `create: true`. Also can be used to reference an external ServiceAccount with `serviceAccountName: "externalServiceAccountName"`, or automount the service account token | see [values.yaml][] | +| `readinessProbe` | Configuration fields for the readiness [probe][] | see [values.yaml][] | +| `replicas` | Kubernetes replica count for the StatefulSet (i.e. how many pods) | `3` | +| `resources` | Allows you to set the [resources][] for the StatefulSet | see [values.yaml][] | +| `roles` | A list with the specific [roles][] for the `nodeGroup` | see [values.yaml][] | +| `schedulerName` | Name of the [alternate scheduler][] | `""` | +| `secret.enabled` | Enable Secret creation for Elasticsearch credentials | `true` | +| `secret.password` | Initial password for the elastic user | `""` (generated randomly) | +| `secretMounts` | Allows you easily mount a secret as a file inside the StatefulSet. Useful for mounting certificates and other secrets. See [values.yaml][] for an example | `[]` | +| `securityContext` | Allows you to set the [securityContext][] for the container | see [values.yaml][] | +| `service.annotations` | [LoadBalancer annotations][] that Kubernetes will use for the service. This will configure load balancer if `service.type` is `LoadBalancer` | `{}` | +| `service.enabled` | Enable non-headless service | `true` | +| `service.externalTrafficPolicy` | Some cloud providers allow you to specify the [LoadBalancer externalTrafficPolicy][]. Kubernetes will use this to preserve the client source IP. This will configure load balancer if `service.type` is `LoadBalancer` | `""` | +| `service.httpPortName` | The name of the http port within the service | `http` | +| `service.labelsHeadless` | Labels to be added to headless service | `{}` | +| `service.labels` | Labels to be added to non-headless service | `{}` | +| `service.loadBalancerIP` | Some cloud providers allow you to specify the [loadBalancer][] IP. If the `loadBalancerIP` field is not specified, the IP is dynamically assigned. If you specify a `loadBalancerIP` but your cloud provider does not support the feature, it is ignored. | `""` | +| `service.loadBalancerSourceRanges` | The IP ranges that are allowed to access | `[]` | +| `service.nodePort` | Custom [nodePort][] port that can be set if you are using `service.type: nodePort` | `""` | +| `service.transportPortName` | The name of the transport port within the service | `transport` | +| `service.publishNotReadyAddresses` | Consider that all endpoints are considered "ready" even if the Pods themselves are not | `false` | +| `service.type` | Elasticsearch [Service Types][] | `ClusterIP` | +| `sysctlInitContainer` | Allows you to disable the `sysctlInitContainer` if you are setting [sysctl vm.max_map_count][] with another method | `enabled: true` | +| `sysctlVmMaxMapCount` | Sets the [sysctl vm.max_map_count][] needed for Elasticsearch | `262144` | +| `terminationGracePeriod` | The [terminationGracePeriod][] in seconds used when trying to stop the pod | `120` | +| `tests.enabled` | Enable creating test related resources when running `helm template` or `helm test` | `true` | +| `tolerations` | Configurable [tolerations][] | `[]` | +| `transportPort` | The transport port that Kubernetes will use for the service. If you change this you will also need to set [transport port configuration][] in `extraEnvs` | `9300` | +| `updateStrategy` | The [updateStrategy][] for the StatefulSet. By default Kubernetes will wait for the cluster to be green after upgrading each pod. Setting this to `OnDelete` will allow you to manually delete each pod during upgrades | `RollingUpdate` | +| `volumeClaimTemplate` | Configuration for the [volumeClaimTemplate for StatefulSets][]. You will want to adjust the storage (default `30Gi` ) and the `storageClassName` if you are using a different storage class | see [values.yaml][] | + + +## FAQ + +### How to deploy this chart on a specific K8S distribution? + +This chart is designed to run on production scale Kubernetes clusters with +multiple nodes, lots of memory and persistent storage. For that reason it can be +a bit tricky to run them against local Kubernetes environments such as +[Minikube][]. + +This chart is highly tested with [GKE][], but some K8S distribution also +requires specific configurations. + +We provide examples of configuration for the following K8S providers: + +- [Docker for Mac][] +- [KIND][] +- [Minikube][] +- [MicroK8S][] +- [OpenShift][] + +### How to deploy dedicated nodes types? + +All the Elasticsearch pods deployed share the same configuration. If you need to +deploy dedicated [nodes types][] (for example dedicated master and data nodes), +you can deploy multiple releases of this chart with different configurations +while they share the same `clusterName` value. + +For each Helm release, the nodes types can then be defined using `roles` value. + +An example of Elasticsearch cluster using 2 different Helm releases for master, +data and coordinating nodes can be found in [examples/multi][]. + +#### Coordinating nodes + +Every node is implicitly a coordinating node. This means that a node that has an +explicit empty list of roles will only act as a coordinating node. + +When deploying coordinating-only node with Elasticsearch chart, it is required +to define the empty list of roles in both `roles` value and `node.roles` +settings: + +```yaml +roles: [] + +esConfig: + elasticsearch.yml: | + node.roles: [] +``` + +More details in [#1186 (comment)][] + +#### Clustering and Node Discovery + +This chart facilitates Elasticsearch node discovery and services by creating two +`Service` definitions in Kubernetes, one with the name `$clusterName-$nodeGroup` +and another named `$clusterName-$nodeGroup-headless`. +Only `Ready` pods are a part of the `$clusterName-$nodeGroup` service, while all +pods ( `Ready` or not) are a part of `$clusterName-$nodeGroup-headless`. + +If your group of master nodes has the default `nodeGroup: master` then you can +just add new groups of nodes with a different `nodeGroup` and they will +automatically discover the correct master. If your master nodes have a different +`nodeGroup` name then you will need to set `masterService` to +`$clusterName-$masterNodeGroup`. + +The chart value for `masterService` is used to populate +`discovery.zen.ping.unicast.hosts` , which Elasticsearch nodes will use to +contact master nodes and form a cluster. +Therefore, to add a group of nodes to an existing cluster, setting +`masterService` to the desired `Service` name of the related cluster is +sufficient. + +### How to deploy clusters with security (authentication and TLS) enabled? + +This Helm chart can generate a [Kubernetes Secret][] or use an existing one to +setup Elastic credentials. + +This Helm chart can use existing [Kubernetes Secret][] to setup Elastic +certificates for example. These secrets should be created outside of this chart +and accessed using [environment variables][] and volumes. + +This chart is setting TLS and creating a certificate by default, but you can also provide your own certs as a K8S secret. An example of configuration for providing existing certificates can be found in [examples/security][]. + +### How to migrate from helm/charts stable chart? + +If you currently have a cluster deployed with the [helm/charts stable][] chart +you can follow the [migration guide][]. + +### How to install plugins? + +The recommended way to install plugins into our Docker images is to create a +[custom Docker image][]. + +The Dockerfile would look something like: + +``` +ARG elasticsearch_version +FROM docker.elastic.co/elasticsearch/elasticsearch:${elasticsearch_version} + +RUN bin/elasticsearch-plugin install --batch repository-gcs +``` + +And then updating the `image` in values to point to your custom image. + +There are a couple reasons we recommend this. + +1. Tying the availability of Elasticsearch to the download service to install +plugins is not a great idea or something that we recommend. Especially in +Kubernetes where it is normal and expected for a container to be moved to +another host at random times. +2. Mutating the state of a running Docker image (by installing plugins) goes +against best practices of containers and immutable infrastructure. + +### How to use the keystore? + +#### Basic example + +Create the secret, the key name needs to be the keystore key path. In this +example we will create a secret from a file and from a literal string. + +``` +kubectl create secret generic encryption-key --from-file=xpack.watcher.encryption_key=./watcher_encryption_key +kubectl create secret generic slack-hook --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' +``` + +To add these secrets to the keystore: + +``` +keystore: + - secretName: encryption-key + - secretName: slack-hook +``` + +#### Multiple keys + +All keys in the secret will be added to the keystore. To create the previous +example in one secret you could also do: + +``` +kubectl create secret generic keystore-secrets --from-file=xpack.watcher.encryption_key=./watcher_encryption_key --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' +``` + +``` +keystore: + - secretName: keystore-secrets +``` + +#### Custom paths and keys + +If you are using these secrets for other applications (besides the Elasticsearch +keystore) then it is also possible to specify the keystore path and which keys +you want to add. Everything specified under each `keystore` item will be passed +through to the `volumeMounts` section for mounting the [secret][]. In this +example we will only add the `slack_hook` key from a secret that also has other +keys. Our secret looks like this: + +``` +kubectl create secret generic slack-secrets --from-literal=slack_channel='#general' --from-literal=slack_hook='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' +``` + +We only want to add the `slack_hook` key to the keystore at path +`xpack.notification.slack.account.monitoring.secure_url`: + +``` +keystore: + - secretName: slack-secrets + items: + - key: slack_hook + path: xpack.notification.slack.account.monitoring.secure_url +``` + +You can also take a look at the [config example][] which is used as part of the +automated testing pipeline. + +### How to enable snapshotting? + +1. Install your [snapshot plugin][] into a custom Docker image following the +[how to install plugins guide][]. +2. Add any required secrets or credentials into an Elasticsearch keystore +following the [how to use the keystore][] guide. +3. Configure the [snapshot repository][] as you normally would. +4. To automate snapshots you can use [Snapshot Lifecycle Management][] or a tool +like [curator][]. + +### How to configure templates post-deployment? + +You can use `postStart` [lifecycle hooks][] to run code triggered after a +container is created. + +Here is an example of `postStart` hook to configure templates: + +```yaml +lifecycle: + postStart: + exec: + command: + - bash + - -c + - | + #!/bin/bash + # Add a template to adjust number of shards/replicas + TEMPLATE_NAME=my_template + INDEX_PATTERN="logstash-*" + SHARD_COUNT=8 + REPLICA_COUNT=1 + ES_URL=http://localhost:9200 + while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done + curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}' +``` + + +## Contributing + +Please check [CONTRIBUTING.md][] before any contribution or for any questions +about our development and testing process. + +[#1186 (comment)]: https://github.com/elastic/helm-charts/pull/1186#discussion_r631166442 +[alternate scheduler]: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/#specify-schedulers-for-pods +[annotations]: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +[anti-affinity]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +[BREAKING_CHANGES.md]: https://github.com/elastic/helm-charts/blob/main/BREAKING_CHANGES.md +[CHANGELOG.md]: https://github.com/elastic/helm-charts/blob/main/CHANGELOG.md +[cluster.name]: https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.name.html +[clustering and node discovery]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/README.md#clustering-and-node-discovery +[config example]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/examples/config/values.yaml +[CONTRIBUTING.md]: https://github.com/elastic/helm-charts/blob/main/CONTRIBUTING.md +[curator]: https://www.elastic.co/guide/en/elasticsearch/client/curator/current/snapshot.html +[custom docker image]: https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#_c_customized_image +[deploys statefulsets serially]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies +[discovery.zen.minimum_master_nodes]: https://www.elastic.co/guide/en/elasticsearch/reference/current/discovery-settings.html#minimum_master_nodes +[docker for mac]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/docker-for-mac +[eck-charts]: https://github.com/elastic/cloud-on-k8s/tree/master/deploy +[elastic cloud on kubernetes]: https://github.com/elastic/cloud-on-k8s +[elasticsearch cluster health status params]: https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params +[elasticsearch docker image]: https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html +[environment from variables]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables +[environment variables]: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#using-environment-variables-inside-of-your-config +[examples]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/ +[examples/multi]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/multi +[examples/security]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/security +[gke]: https://cloud.google.com/kubernetes-engine +[helm]: https://helm.sh +[helm/charts stable]: https://github.com/helm/charts/tree/master/stable/elasticsearch/ +[hostAliases]: https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +[how to install plugins guide]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/README.md#how-to-install-plugins +[how to use the keystore]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/README.md#how-to-use-the-keystore +[http.port]: https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#_settings +[imagePullPolicy]: https://kubernetes.io/docs/concepts/containers/images/#updating-images +[imagePullSecrets]: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret +[ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ +[java options]: https://www.elastic.co/guide/en/elasticsearch/reference/current/jvm-options.html +[jvm heap size]: https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html +[kind]: https://github.com/elastic/helm-charts/tree/main//elasticsearch/examples/kubernetes-kind +[labels]: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +[lifecycle hooks]: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/ +[loadBalancer annotations]: https://kubernetes.io/docs/concepts/services-networking/service/#ssl-support-on-aws +[loadBalancer externalTrafficPolicy]: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip +[loadBalancer]: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer +[maxUnavailable]: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget +[microk8s]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/microk8s +[migration guide]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/examples/migration/README.md +[minikube]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/minikube +[multi]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/multi/ +[network.host elasticsearch setting]: https://www.elastic.co/guide/en/elasticsearch/reference/current/network.host.html +[node affinity settings]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature +[nodePort]: https://kubernetes.io/docs/concepts/services-networking/service/#nodeport +[nodes types]: https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html +[nodeSelector]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +[openshift]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/openshift +[priorityClass]: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass +[probe]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ +[resources]: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ +[roles]: https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html +[secret]: https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets +[securityContext]: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +[service types]: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types +[snapshot lifecycle management]: https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-lifecycle-management.html +[snapshot plugin]: https://www.elastic.co/guide/en/elasticsearch/plugins/current/repository.html +[snapshot repository]: https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html +[supported configurations]: https://github.com/elastic/helm-charts/blob/main/README.md#supported-configurations +[sysctl vm.max_map_count]: https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html#vm-max-map-count +[terminationGracePeriod]: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +[tolerations]: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +[transport port configuration]: https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-transport.html#_transport_settings +[updateStrategy]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ +[values.yaml]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/values.yaml +[volumeClaimTemplate for statefulsets]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-storage diff --git a/applications/elasticsearch/deploy/charts/templates/NOTES.txt b/applications/elasticsearch/deploy/charts/templates/NOTES.txt new file mode 100755 index 000000000..752526f31 --- /dev/null +++ b/applications/elasticsearch/deploy/charts/templates/NOTES.txt @@ -0,0 +1,8 @@ +1. Watch all cluster members come up. + $ kubectl get pods --namespace={{ .Release.Namespace }} -l app={{ template "elasticsearch.uname" . }} -w +2. Retrieve elastic user's password. + $ kubectl get secrets --namespace={{ .Release.Namespace }} {{ template "elasticsearch.uname" . }}-credentials -ojsonpath='{.data.password}' | base64 -d +{{- if .Values.tests.enabled }} +3. Test cluster health using Helm test. + $ helm --namespace={{ .Release.Namespace }} test {{ .Release.Name }} +{{- end -}} diff --git a/applications/elasticsearch/deploy/charts/templates/_helpers.tpl b/applications/elasticsearch/deploy/charts/templates/_helpers.tpl new file mode 100644 index 000000000..b47e2fed8 --- /dev/null +++ b/applications/elasticsearch/deploy/charts/templates/_helpers.tpl @@ -0,0 +1,97 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "elasticsearch.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "elasticsearch.uname" -}} +{{- if empty .Values.fullnameOverride -}} +{{- if empty .Values.nameOverride -}} +{{ .Values.clusterName }}-{{ .Values.nodeGroup }} +{{- else -}} +{{ .Values.nameOverride }}-{{ .Values.nodeGroup }} +{{- end -}} +{{- else -}} +{{ .Values.fullnameOverride }} +{{- end -}} +{{- end -}} + +{{/* +Generate certificates when the secret doesn't exist +*/}} +{{- define "elasticsearch.gen-certs" -}} +{{- $certs := lookup "v1" "Secret" .Release.Namespace ( printf "%s-certs" (include "elasticsearch.uname" . ) ) -}} +{{- if $certs -}} +tls.crt: {{ index $certs.data "tls.crt" }} +tls.key: {{ index $certs.data "tls.key" }} +ca.crt: {{ index $certs.data "ca.crt" }} +{{- else -}} +{{- $altNames := list ( include "elasticsearch.masterService" . ) ( printf "%s.%s" (include "elasticsearch.masterService" .) .Release.Namespace ) ( printf "%s.%s.svc" (include "elasticsearch.masterService" .) .Release.Namespace ) -}} +{{- $ca := genCA "elasticsearch-ca" 365 -}} +{{- $cert := genSignedCert ( include "elasticsearch.masterService" . ) nil $altNames 365 $ca -}} +tls.crt: {{ $cert.Cert | toString | b64enc }} +tls.key: {{ $cert.Key | toString | b64enc }} +ca.crt: {{ $ca.Cert | toString | b64enc }} +{{- end -}} +{{- end -}} + +{{- define "elasticsearch.masterService" -}} +{{- if empty .Values.masterService -}} +{{- if empty .Values.fullnameOverride -}} +{{- if empty .Values.nameOverride -}} +{{ .Values.clusterName }}-master +{{- else -}} +{{ .Values.nameOverride }}-master +{{- end -}} +{{- else -}} +{{ .Values.fullnameOverride }} +{{- end -}} +{{- else -}} +{{ .Values.masterService }} +{{- end -}} +{{- end -}} + +{{- define "elasticsearch.endpoints" -}} +{{- $replicas := int (toString (.Values.replicas)) }} +{{- $uname := (include "elasticsearch.uname" .) }} + {{- range $i, $e := untilStep 0 $replicas 1 -}} +{{ $uname }}-{{ $i }}, + {{- end -}} +{{- end -}} + +{{- define "elasticsearch.roles" -}} +{{- range $.Values.roles -}} +{{ . }}, +{{- end -}} +{{- end -}} + +{{- define "elasticsearch.esMajorVersion" -}} +{{- if .Values.esMajorVersion -}} +{{ .Values.esMajorVersion }} +{{- else -}} +{{- $version := int (index (.Values.imageTag | splitList ".") 0) -}} + {{- if and (contains "docker.elastic.co/elasticsearch/elasticsearch" .Values.image) (not (eq $version 0)) -}} +{{ $version }} + {{- else -}} +8 + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Use the fullname if the serviceAccount value is not set +*/}} +{{- define "elasticsearch.serviceAccount" -}} +{{- .Values.rbac.serviceAccountName | default (include "elasticsearch.uname" .) -}} +{{- end -}} diff --git a/applications/elasticsearch/deploy/charts/templates/configmap.yaml b/applications/elasticsearch/deploy/charts/templates/configmap.yaml new file mode 100644 index 000000000..fd1ad3063 --- /dev/null +++ b/applications/elasticsearch/deploy/charts/templates/configmap.yaml @@ -0,0 +1,34 @@ +{{- if .Values.esConfig }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "elasticsearch.uname" . }}-config + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +data: +{{- range $path, $config := .Values.esConfig }} + {{ $path }}: | +{{ $config | indent 4 -}} +{{- end -}} +{{- end -}} +{{- if .Values.esJvmOptions }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "elasticsearch.uname" . }}-jvm-options + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +data: +{{- range $path, $config := .Values.esJvmOptions }} + {{ $path }}: | +{{ $config | indent 4 -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/applications/elasticsearch/deploy/charts/templates/ingress.yaml b/applications/elasticsearch/deploy/charts/templates/ingress.yaml new file mode 100644 index 000000000..e60cebf18 --- /dev/null +++ b/applications/elasticsearch/deploy/charts/templates/ingress.yaml @@ -0,0 +1,64 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "elasticsearch.uname" . -}} +{{- $httpPort := .Values.httpPort -}} +{{- $pathtype := .Values.ingress.pathtype -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app: {{ .Chart.Name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + {{- if .Values.ingress.className }} + ingressClassName: {{ .Values.ingress.className | quote }} + {{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- if .ingressPath }} + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- else }} +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end }} +{{- end}} + rules: + {{- range .Values.ingress.hosts }} + {{- if $ingressPath }} + - host: {{ . }} + http: + paths: + - path: {{ $ingressPath }} + pathType: {{ $pathtype }} + backend: + service: + name: {{ $fullName }} + port: + number: {{ $httpPort }} + {{- else }} + - host: {{ .host }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + pathType: {{ $pathtype }} + backend: + service: + name: {{ $fullName }} + port: + number: {{ .servicePort | default $httpPort }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} diff --git a/applications/elasticsearch/deploy/charts/templates/networkpolicy.yaml b/applications/elasticsearch/deploy/charts/templates/networkpolicy.yaml new file mode 100644 index 000000000..62bb1bd7f --- /dev/null +++ b/applications/elasticsearch/deploy/charts/templates/networkpolicy.yaml @@ -0,0 +1,61 @@ +{{- if (or .Values.networkPolicy.http.enabled .Values.networkPolicy.transport.enabled) }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ template "elasticsearch.uname" . }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +spec: + podSelector: + matchLabels: + app: "{{ template "elasticsearch.uname" . }}" + ingress: # Allow inbound connections + +{{- if .Values.networkPolicy.http.enabled }} + # For HTTP access + - ports: + - port: {{ .Values.httpPort }} + from: + # From authorized Pods (having the correct label) + - podSelector: + matchLabels: + {{ template "elasticsearch.uname" . }}-http-client: "true" +{{- with .Values.networkPolicy.http.explicitNamespacesSelector }} + # From authorized namespaces + namespaceSelector: +{{ toYaml . | indent 12 }} +{{- end }} +{{- with .Values.networkPolicy.http.additionalRules }} + # Or from custom additional rules +{{ toYaml . | indent 8 }} +{{- end }} +{{- end }} + +{{- if .Values.networkPolicy.transport.enabled }} + # For transport access + - ports: + - port: {{ .Values.transportPort }} + from: + # From authorized Pods (having the correct label) + - podSelector: + matchLabels: + {{ template "elasticsearch.uname" . }}-transport-client: "true" +{{- with .Values.networkPolicy.transport.explicitNamespacesSelector }} + # From authorized namespaces + namespaceSelector: +{{ toYaml . | indent 12 }} +{{- end }} +{{- with .Values.networkPolicy.transport.additionalRules }} + # Or from custom additional rules +{{ toYaml . | indent 8 }} +{{- end }} + # Or from other ElasticSearch Pods + - podSelector: + matchLabels: + app: "{{ template "elasticsearch.uname" . }}" +{{- end }} + +{{- end }} diff --git a/applications/elasticsearch/deploy/charts/templates/poddisruptionbudget.yaml b/applications/elasticsearch/deploy/charts/templates/poddisruptionbudget.yaml new file mode 100644 index 000000000..6d0bdf3fd --- /dev/null +++ b/applications/elasticsearch/deploy/charts/templates/poddisruptionbudget.yaml @@ -0,0 +1,15 @@ +{{- if .Values.maxUnavailable }} +{{- if .Capabilities.APIVersions.Has "policy/v1" -}} +apiVersion: policy/v1 +{{- else}} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + name: "{{ template "elasticsearch.uname" . }}-pdb" +spec: + maxUnavailable: {{ .Values.maxUnavailable }} + selector: + matchLabels: + app: "{{ template "elasticsearch.uname" . }}" +{{- end }} diff --git a/applications/elasticsearch/deploy/charts/templates/podsecuritypolicy.yaml b/applications/elasticsearch/deploy/charts/templates/podsecuritypolicy.yaml new file mode 100644 index 000000000..d8b35457a --- /dev/null +++ b/applications/elasticsearch/deploy/charts/templates/podsecuritypolicy.yaml @@ -0,0 +1,14 @@ +{{- if .Values.podSecurityPolicy.create -}} +{{- $fullName := include "elasticsearch.uname" . -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ default $fullName .Values.podSecurityPolicy.name | quote }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +spec: +{{ toYaml .Values.podSecurityPolicy.spec | indent 2 }} +{{- end -}} diff --git a/applications/elasticsearch/deploy/charts/templates/role.yaml b/applications/elasticsearch/deploy/charts/templates/role.yaml new file mode 100644 index 000000000..d3a7ee302 --- /dev/null +++ b/applications/elasticsearch/deploy/charts/templates/role.yaml @@ -0,0 +1,25 @@ +{{- if .Values.rbac.create -}} +{{- $fullName := include "elasticsearch.uname" . -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ $fullName | quote }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +rules: + - apiGroups: + - extensions + resources: + - podsecuritypolicies + resourceNames: + {{- if eq .Values.podSecurityPolicy.name "" }} + - {{ $fullName | quote }} + {{- else }} + - {{ .Values.podSecurityPolicy.name | quote }} + {{- end }} + verbs: + - use +{{- end -}} diff --git a/applications/elasticsearch/deploy/charts/templates/rolebinding.yaml b/applications/elasticsearch/deploy/charts/templates/rolebinding.yaml new file mode 100644 index 000000000..e0ecced8f --- /dev/null +++ b/applications/elasticsearch/deploy/charts/templates/rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create -}} +{{- $fullName := include "elasticsearch.uname" . -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ $fullName | quote }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +subjects: + - kind: ServiceAccount + name: "{{ template "elasticsearch.serviceAccount" . }}" + namespace: {{ .Release.Namespace | quote }} +roleRef: + kind: Role + name: {{ $fullName | quote }} + apiGroup: rbac.authorization.k8s.io +{{- end -}} diff --git a/applications/elasticsearch/deploy/charts/templates/secret-cert.yaml b/applications/elasticsearch/deploy/charts/templates/secret-cert.yaml new file mode 100644 index 000000000..97d8dec5f --- /dev/null +++ b/applications/elasticsearch/deploy/charts/templates/secret-cert.yaml @@ -0,0 +1,14 @@ +{{- if .Values.createCert }} +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: {{ template "elasticsearch.uname" . }}-certs + labels: + app: {{ template "elasticsearch.uname" . }} + chart: "{{ .Chart.Name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{ ( include "elasticsearch.gen-certs" . ) | indent 2 }} +{{- end }} diff --git a/applications/elasticsearch/deploy/charts/templates/secret.yaml b/applications/elasticsearch/deploy/charts/templates/secret.yaml new file mode 100644 index 000000000..cbdcbbaf1 --- /dev/null +++ b/applications/elasticsearch/deploy/charts/templates/secret.yaml @@ -0,0 +1,23 @@ +{{- if .Values.secret.enabled -}} +{{- $passwordValue := (randAlphaNum 16) | b64enc | quote }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "elasticsearch.uname" . }}-credentials + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + {{- range $key, $value := .Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} +type: Opaque +data: + username: {{ "elastic" | b64enc }} + {{- if .Values.secret.password }} + password: {{ .Values.secret.password | b64enc }} + {{- else }} + password: {{ $passwordValue }} + {{- end }} +{{- end }} diff --git a/applications/elasticsearch/deploy/charts/templates/service.yaml b/applications/elasticsearch/deploy/charts/templates/service.yaml new file mode 100644 index 000000000..5fe52eb78 --- /dev/null +++ b/applications/elasticsearch/deploy/charts/templates/service.yaml @@ -0,0 +1,78 @@ +{{- if .Values.service.enabled -}} +--- +kind: Service +apiVersion: v1 +metadata: +{{- if eq .Values.nodeGroup "master" }} + name: {{ template "elasticsearch.masterService" . }} +{{- else }} + name: {{ template "elasticsearch.uname" . }} +{{- end }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4}} +{{- end }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +spec: + type: {{ .Values.service.type }} + selector: + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + publishNotReadyAddresses: {{ .Values.service.publishNotReadyAddresses }} + ports: + - name: {{ .Values.service.httpPortName | default "http" }} + protocol: TCP + port: {{ .Values.httpPort }} +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + - name: {{ .Values.service.transportPortName | default "transport" }} + protocol: TCP + port: {{ .Values.transportPort }} +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} +{{- end }} +{{- with .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml . | indent 4 }} +{{- end }} +{{- if .Values.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} +{{- end }} +{{- end }} +--- +kind: Service +apiVersion: v1 +metadata: +{{- if eq .Values.nodeGroup "master" }} + name: {{ template "elasticsearch.masterService" . }}-headless +{{- else }} + name: {{ template "elasticsearch.uname" . }}-headless +{{- end }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +{{- if .Values.service.labelsHeadless }} +{{ toYaml .Values.service.labelsHeadless | indent 4 }} +{{- end }} + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + clusterIP: None # This is needed for statefulset hostnames like elasticsearch-0 to resolve + # Create endpoints also if the related pod isn't ready + publishNotReadyAddresses: true + selector: + app: "{{ template "elasticsearch.uname" . }}" + ports: + - name: {{ .Values.service.httpPortName | default "http" }} + port: {{ .Values.httpPort }} + - name: {{ .Values.service.transportPortName | default "transport" }} + port: {{ .Values.transportPort }} diff --git a/applications/elasticsearch/deploy/charts/templates/serviceaccount.yaml b/applications/elasticsearch/deploy/charts/templates/serviceaccount.yaml new file mode 100644 index 000000000..a7ef847ea --- /dev/null +++ b/applications/elasticsearch/deploy/charts/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbac.create -}} +{{- $fullName := include "elasticsearch.uname" . -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "{{ template "elasticsearch.serviceAccount" . }}" + annotations: + {{- with .Values.rbac.serviceAccountAnnotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +{{- end -}} diff --git a/applications/elasticsearch/deploy/charts/templates/statefulset.yaml b/applications/elasticsearch/deploy/charts/templates/statefulset.yaml new file mode 100644 index 000000000..b5dcd74e6 --- /dev/null +++ b/applications/elasticsearch/deploy/charts/templates/statefulset.yaml @@ -0,0 +1,376 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "elasticsearch.uname" . }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + {{- range $key, $value := .Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + annotations: + esMajorVersion: "{{ include "elasticsearch.esMajorVersion" . }}" +spec: + serviceName: {{ template "elasticsearch.uname" . }}-headless + selector: + matchLabels: + app: "{{ template "elasticsearch.uname" . }}" + replicas: {{ .Values.replicas }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + type: {{ .Values.updateStrategy }} + {{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: {{ template "elasticsearch.uname" . }} + {{- if .Values.persistence.labels.enabled }} + labels: + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + {{- range $key, $value := .Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- with .Values.persistence.annotations }} + annotations: +{{ toYaml . | indent 8 }} + {{- end }} + spec: +{{ toYaml .Values.volumeClaimTemplate | indent 6 }} + {{- end }} + template: + metadata: + name: "{{ template "elasticsearch.uname" . }}" + labels: + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + {{- range $key, $value := .Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{/* This forces a restart if the configmap has changed */}} + {{- if or .Values.esConfig .Values.esJvmOptions }} + configchecksum: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum | trunc 63 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} + {{- if .Values.fsGroup }} + fsGroup: {{ .Values.fsGroup }} # Deprecated value, please use .Values.podSecurityContext.fsGroup + {{- end }} + {{- if or .Values.rbac.create .Values.rbac.serviceAccountName }} + serviceAccountName: "{{ template "elasticsearch.serviceAccount" . }}" + {{- end }} + automountServiceAccountToken: {{ .Values.rbac.automountToken }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if or (eq .Values.antiAffinity "hard") (eq .Values.antiAffinity "soft") .Values.nodeAffinity }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + affinity: + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - "{{ template "elasticsearch.uname" .}}" + topologyKey: {{ .Values.antiAffinityTopologyKey }} + {{- else if eq .Values.antiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: {{ .Values.antiAffinityTopologyKey }} + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - "{{ template "elasticsearch.uname" . }}" + {{- end }} + {{- with .Values.nodeAffinity }} + nodeAffinity: +{{ toYaml . | indent 10 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + volumes: + {{- range .Values.secretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- if .defaultMode }} + defaultMode: {{ .defaultMode }} + {{- end }} + {{- end }} + {{- if .Values.esConfig }} + - name: esconfig + configMap: + name: {{ template "elasticsearch.uname" . }}-config + {{- end }} + {{- if .Values.esJvmOptions }} + - name: esjvmoptions + configMap: + name: {{ template "elasticsearch.uname" . }}-jvm-options + {{- end }} + {{- if .Values.createCert }} + - name: elasticsearch-certs + secret: + secretName: {{ template "elasticsearch.uname" . }}-certs + {{- end }} +{{- if .Values.keystore }} + - name: keystore + emptyDir: {} + {{- range .Values.keystore }} + - name: keystore-{{ .secretName }} + secret: {{ toYaml . | nindent 12 }} + {{- end }} +{{ end }} + {{- if .Values.extraVolumes }} + # Currently some extra blocks accept strings + # to continue with backwards compatibility this is being kept + # whilst also allowing for yaml to be specified too. + {{- if eq "string" (printf "%T" .Values.extraVolumes) }} +{{ tpl .Values.extraVolumes . | indent 8 }} + {{- else }} +{{ toYaml .Values.extraVolumes | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + enableServiceLinks: {{ .Values.enableServiceLinks }} + {{- if .Values.hostAliases }} + hostAliases: {{ toYaml .Values.hostAliases | nindent 8 }} + {{- end }} + {{- if or (.Values.extraInitContainers) (.Values.sysctlInitContainer.enabled) (.Values.keystore) }} + initContainers: + {{- if .Values.sysctlInitContainer.enabled }} + - name: configure-sysctl + securityContext: + runAsUser: 0 + privileged: true + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + command: ["sysctl", "-w", "vm.max_map_count={{ .Values.sysctlVmMaxMapCount}}"] + resources: +{{ toYaml .Values.initResources | indent 10 }} + {{- end }} +{{ if .Values.keystore }} + - name: keystore + securityContext: +{{ toYaml .Values.securityContext | indent 10 }} + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + command: + - bash + - -c + - | + set -euo pipefail + + elasticsearch-keystore create + + for i in /tmp/keystoreSecrets/*/*; do + key=$(basename $i) + echo "Adding file $i to keystore key $key" + elasticsearch-keystore add-file "$key" "$i" + done + + # Add the bootstrap password since otherwise the Elasticsearch entrypoint tries to do this on startup + if [ ! -z ${ELASTIC_PASSWORD+x} ]; then + echo 'Adding env $ELASTIC_PASSWORD to keystore as key bootstrap.password' + echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x bootstrap.password + fi + + cp -a /usr/share/elasticsearch/config/elasticsearch.keystore /tmp/keystore/ + env: {{ toYaml .Values.extraEnvs | nindent 10 }} + envFrom: {{ toYaml .Values.envFrom | nindent 10 }} + resources: {{ toYaml .Values.initResources | nindent 10 }} + volumeMounts: + - name: keystore + mountPath: /tmp/keystore + {{- range .Values.keystore }} + - name: keystore-{{ .secretName }} + mountPath: /tmp/keystoreSecrets/{{ .secretName }} + {{- end }} +{{ end }} + {{- if .Values.extraInitContainers }} + # Currently some extra blocks accept strings + # to continue with backwards compatibility this is being kept + # whilst also allowing for yaml to be specified too. + {{- if eq "string" (printf "%T" .Values.extraInitContainers) }} +{{ tpl .Values.extraInitContainers . | indent 6 }} + {{- else }} +{{ toYaml .Values.extraInitContainers | indent 6 }} + {{- end }} + {{- end }} + {{- end }} + containers: + - name: "{{ template "elasticsearch.name" . }}" + securityContext: +{{ toYaml .Values.securityContext | indent 10 }} + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + readinessProbe: + httpGet: + path: / + port: 9200 + scheme: HTTP +{{ toYaml .Values.readinessProbe | indent 10 }} + ports: + - name: http + containerPort: {{ .Values.httpPort }} + - name: transport + containerPort: {{ .Values.transportPort }} + resources: +{{ toYaml .Values.resources | indent 10 }} + env: + - name: node.name + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if has "master" .Values.roles }} + - name: cluster.initial_master_nodes + value: "{{ template "elasticsearch.endpoints" . }}" + {{- end }} + {{- if gt (len (include "elasticsearch.roles" .)) 0 }} + - name: node.roles + value: "{{ template "elasticsearch.roles" . }}" + {{- end }} + {{- if lt (int (include "elasticsearch.esMajorVersion" .)) 7 }} + - name: discovery.zen.ping.unicast.hosts + value: "{{ template "elasticsearch.masterService" . }}-headless" + {{- else }} + - name: discovery.seed_hosts + value: "{{ template "elasticsearch.masterService" . }}-headless" + {{- end }} + - name: cluster.name + value: "{{ .Values.clusterName }}" + - name: network.host + value: "{{ .Values.networkHost }}" + {{- if .Values.secret.enabled }} + - name: ELASTIC_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "elasticsearch.uname" . }}-credentials + key: password + {{- end }} + {{- if .Values.esJavaOpts }} + - name: ES_JAVA_OPTS + value: "{{ .Values.esJavaOpts }}" + {{- end }} + {{- if .Values.createCert }} + - name: xpack.security.enabled + value: "true" + - name: xpack.security.transport.ssl.enabled + value: "true" + - name: xpack.security.http.ssl.enabled + value: "true" + - name: xpack.security.transport.ssl.verification_mode + value: "certificate" + - name: xpack.security.transport.ssl.key + value: "/usr/share/elasticsearch/config/certs/tls.key" + - name: xpack.security.transport.ssl.certificate + value: "/usr/share/elasticsearch/config/certs/tls.crt" + - name: xpack.security.transport.ssl.certificate_authorities + value: "/usr/share/elasticsearch/config/certs/ca.crt" + - name: xpack.security.http.ssl.key + value: "/usr/share/elasticsearch/config/certs/tls.key" + - name: xpack.security.http.ssl.certificate + value: "/usr/share/elasticsearch/config/certs/tls.crt" + - name: xpack.security.http.ssl.certificate_authorities + value: "/usr/share/elasticsearch/config/certs/ca.crt" + + {{- else }} + - name: xpack.security.enabled + value: "false" + - name: xpack.security.http.ssl.enabled + value: "false" + - name: xpack.security.transport.ssl.enabled + value: "false" + {{- end }} +{{- if .Values.extraEnvs }} +{{ toYaml .Values.extraEnvs | indent 10 }} +{{- end }} +{{- if .Values.envFrom }} + envFrom: +{{ toYaml .Values.envFrom | indent 10 }} +{{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: "{{ template "elasticsearch.uname" . }}" + mountPath: /usr/share/elasticsearch/data + {{- end }} + {{- if .Values.createCert }} + - name: elasticsearch-certs + mountPath: /usr/share/elasticsearch/config/certs + readOnly: true + {{- end }} +{{ if .Values.keystore }} + - name: keystore + mountPath: /usr/share/elasticsearch/config/elasticsearch.keystore + subPath: elasticsearch.keystore +{{ end }} + {{- range .Values.secretMounts }} + - name: {{ .name }} + mountPath: {{ .path }} + {{- if .subPath }} + subPath: {{ .subPath }} + {{- end }} + {{- end }} + {{- range $path, $config := .Values.esConfig }} + - name: esconfig + mountPath: /usr/share/elasticsearch/config/{{ $path }} + subPath: {{ $path }} + {{- end -}} + {{- range $path, $config := .Values.esJvmOptions }} + - name: esjvmoptions + mountPath: /usr/share/elasticsearch/config/jvm.options.d/{{ $path }} + subPath: {{ $path }} + {{- end -}} + {{- if .Values.extraVolumeMounts }} + # Currently some extra blocks accept strings + # to continue with backwards compatibility this is being kept + # whilst also allowing for yaml to be specified too. + {{- if eq "string" (printf "%T" .Values.extraVolumeMounts) }} +{{ tpl .Values.extraVolumeMounts . | indent 10 }} + {{- else }} +{{ toYaml .Values.extraVolumeMounts | indent 10 }} + {{- end }} + {{- end }} +{{- if .Values.lifecycle }} + lifecycle: +{{ toYaml .Values.lifecycle | indent 10 }} +{{- end }} + {{- if .Values.extraContainers }} + # Currently some extra blocks accept strings + # to continue with backwards compatibility this is being kept + # whilst also allowing for yaml to be specified too. + {{- if eq "string" (printf "%T" .Values.extraContainers) }} +{{ tpl .Values.extraContainers . | indent 6 }} + {{- else }} +{{ toYaml .Values.extraContainers | indent 6 }} + {{- end }} + {{- end }} diff --git a/applications/elasticsearch/deploy/charts/templates/test/test-elasticsearch-health.yaml b/applications/elasticsearch/deploy/charts/templates/test/test-elasticsearch-health.yaml new file mode 100644 index 000000000..d0890fb98 --- /dev/null +++ b/applications/elasticsearch/deploy/charts/templates/test/test-elasticsearch-health.yaml @@ -0,0 +1,50 @@ +{{- if .Values.tests.enabled -}} +--- +apiVersion: v1 +kind: Pod +metadata: +{{- if .Values.healthNameOverride }} + name: {{ .Values.healthNameOverride | quote }} +{{- else }} + name: "{{ .Release.Name }}-{{ randAlpha 5 | lower }}-test" +{{- end }} + annotations: + "helm.sh/hook": test + "helm.sh/hook-delete-policy": hook-succeeded +spec: + securityContext: +{{ toYaml .Values.podSecurityContext | indent 4 }} + containers: +{{- if .Values.healthNameOverride }} + - name: {{ .Values.healthNameOverride | quote }} +{{- else }} + - name: "{{ .Release.Name }}-{{ randAlpha 5 | lower }}-test" +{{- end }} + env: + - name: ELASTIC_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "elasticsearch.uname" . }}-credentials + key: password + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + command: + - "sh" + - "-c" + - | + #!/usr/bin/env bash -e + curl -XGET --fail --cacert /usr/share/elasticsearch/config/certs/tls.crt -u "elastic:${ELASTIC_PASSWORD}" https://'{{ template "elasticsearch.uname" . }}:{{ .Values.httpPort }}/_cluster/health?{{ .Values.clusterHealthCheckParams }}' + volumeMounts: + - name: elasticsearch-certs + mountPath: /usr/share/elasticsearch/config/certs + readOnly: true + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 4 }} + {{- end }} + restartPolicy: Never + volumes: + - name: elasticsearch-certs + secret: + secretName: {{ template "elasticsearch.uname" . }}-certs +{{- end -}} diff --git a/applications/elasticsearch/deploy/charts/tests/elasticsearch_test.py b/applications/elasticsearch/deploy/charts/tests/elasticsearch_test.py new file mode 100644 index 000000000..5c41e6a81 --- /dev/null +++ b/applications/elasticsearch/deploy/charts/tests/elasticsearch_test.py @@ -0,0 +1,1504 @@ +import yaml +from helpers import helm_template +import os +import sys + +sys.path.insert(1, os.path.join(sys.path[0], "../../helpers")) + +clusterName = "elasticsearch" +nodeGroup = "master" +uname = clusterName + "-" + nodeGroup + + +def test_defaults(): + config = """ + """ + + r = helm_template(config) + + # Statefulset + assert r["statefulset"][uname]["spec"]["replicas"] == 3 + assert r["statefulset"][uname]["spec"]["updateStrategy"] == { + "type": "RollingUpdate" + } + assert r["statefulset"][uname]["spec"]["podManagementPolicy"] == "Parallel" + assert r["statefulset"][uname]["spec"]["serviceName"] == uname + "-headless" + assert r["statefulset"][uname]["spec"]["template"]["spec"]["affinity"][ + "podAntiAffinity" + ]["requiredDuringSchedulingIgnoredDuringExecution"][0] == { + "labelSelector": { + "matchExpressions": [{"key": "app", "operator": "In", "values": [uname]}] + }, + "topologyKey": "kubernetes.io/hostname", + } + + # Default environment variables + env_vars = [ + { + "name": "node.name", + "valueFrom": {"fieldRef": {"fieldPath": "metadata.name"}}, + }, + { + "name": "cluster.initial_master_nodes", + "value": uname + "-0," + uname + "-1," + uname + "-2,", + }, + {"name": "discovery.seed_hosts", "value": uname + "-headless"}, + {"name": "network.host", "value": "0.0.0.0"}, + {"name": "cluster.name", "value": clusterName}, + { + "name": "node.roles", + "value": "master,data,data_content,data_hot,data_warm,data_cold,ingest,ml,remote_cluster_client,transform,", + }, + ] + + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + for env in env_vars: + assert env in c["env"] + + # Image + assert c["image"].startswith("docker.elastic.co/elasticsearch/elasticsearch:") + assert c["imagePullPolicy"] == "IfNotPresent" + assert c["name"] == "elasticsearch" + + # Ports + assert c["ports"][0] == {"name": "http", "containerPort": 9200} + assert c["ports"][1] == {"name": "transport", "containerPort": 9300} + + # Health checks + assert c["readinessProbe"]["failureThreshold"] == 3 + assert c["readinessProbe"]["initialDelaySeconds"] == 10 + assert c["readinessProbe"]["periodSeconds"] == 10 + assert c["readinessProbe"]["successThreshold"] == 3 + assert c["readinessProbe"]["timeoutSeconds"] == 5 + + assert "curl" in c["readinessProbe"]["exec"]["command"][-1] + assert "https://127.0.0.1:9200" in c["readinessProbe"]["exec"]["command"][-1] + + # Resources + assert c["resources"] == { + "requests": {"cpu": "1000m", "memory": "2Gi"}, + "limits": {"cpu": "1000m", "memory": "2Gi"}, + } + + # Mounts + assert c["volumeMounts"][0]["mountPath"] == "/usr/share/elasticsearch/data" + assert c["volumeMounts"][0]["name"] == uname + + # volumeClaimTemplates + v = r["statefulset"][uname]["spec"]["volumeClaimTemplates"][0] + assert v["metadata"]["name"] == uname + assert "labels" not in v["metadata"] + assert v["spec"]["accessModes"] == ["ReadWriteOnce"] + assert v["spec"]["resources"]["requests"]["storage"] == "30Gi" + + # Init container + i = r["statefulset"][uname]["spec"]["template"]["spec"]["initContainers"][0] + assert i["name"] == "configure-sysctl" + assert i["command"] == ["sysctl", "-w", "vm.max_map_count=262144"] + assert i["image"].startswith("docker.elastic.co/elasticsearch/elasticsearch:") + assert i["securityContext"] == {"privileged": True, "runAsUser": 0} + + # Other + assert r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"] == { + "fsGroup": 1000, + "runAsUser": 1000, + } + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"][ + "terminationGracePeriodSeconds" + ] == + 120 + ) + + # Pod disruption budget + assert r["poddisruptionbudget"][uname + "-pdb"]["spec"]["maxUnavailable"] == 1 + + # Service + s = r["service"][uname] + assert s["metadata"]["name"] == uname + assert s["metadata"]["annotations"] == {} + assert s["spec"]["type"] == "ClusterIP" + assert s["spec"]["publishNotReadyAddresses"] == False + assert len(s["spec"]["ports"]) == 2 + assert s["spec"]["ports"][0] == {"name": "http", "port": 9200, "protocol": "TCP"} + assert s["spec"]["ports"][1] == { + "name": "transport", + "port": 9300, + "protocol": "TCP", + } + assert "loadBalancerSourceRanges" not in s["spec"] + + # Headless Service + h = r["service"][uname + "-headless"] + assert h["spec"]["clusterIP"] == "None" + assert h["spec"]["publishNotReadyAddresses"] == True + assert h["spec"]["ports"][0]["name"] == "http" + assert h["spec"]["ports"][0]["port"] == 9200 + assert h["spec"]["ports"][1]["name"] == "transport" + assert h["spec"]["ports"][1]["port"] == 9300 + + # Empty customizable defaults + assert "imagePullSecrets" not in r["statefulset"][uname]["spec"]["template"]["spec"] + assert "tolerations" not in r["statefulset"][uname]["spec"]["template"]["spec"] + assert "nodeSelector" not in r["statefulset"][uname]["spec"]["template"]["spec"] + assert "ingress" not in r + assert "hostAliases" not in r["statefulset"][uname]["spec"]["template"]["spec"] + + +def test_increasing_the_replicas(): + config = """ +replicas: 5 +""" + r = helm_template(config) + assert r["statefulset"][uname]["spec"]["replicas"] == 5 + + +def test_disabling_pod_disruption_budget(): + config = """ +maxUnavailable: false +""" + r = helm_template(config) + assert "poddisruptionbudget" not in r + + +def test_overriding_the_image_and_tag(): + config = """ +image: customImage +imageTag: 6.2.4 +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0]["image"] == + "customImage:6.2.4" + ) + + +def test_set_initial_master_nodes(): + config = """ +roles: + - master +""" + r = helm_template(config) + env = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0]["env"] + assert { + "name": "cluster.initial_master_nodes", + "value": "elasticsearch-master-0," + + "elasticsearch-master-1," + + "elasticsearch-master-2,", + } in env + + for e in env: + assert e["name"] != "discovery.zen.minimum_master_nodes" + + +def test_dont_set_initial_master_nodes_if_not_master(): + config = """ +roles: + - data +""" + r = helm_template(config) + env = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0]["env"] + for e in env: + assert e["name"] != "cluster.initial_master_nodes" + + +def test_set_discovery_seed_host(): + config = """ +roles: + - master +""" + r = helm_template(config) + env = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0]["env"] + assert { + "name": "discovery.seed_hosts", + "value": "elasticsearch-master-headless", + } in env + + for e in env: + assert e["name"] != "discovery.zen.ping.unicast.hosts" + + +def test_adding_extra_env_vars(): + config = """ +extraEnvs: + - name: hello + value: world +""" + r = helm_template(config) + env = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0]["env"] + assert {"name": "hello", "value": "world"} in env + + +def test_adding_env_from(): + config = """ +envFrom: +- secretRef: + name: secret-name +""" + r = helm_template(config) + secretRef = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0][ + "envFrom" + ][0]["secretRef"] + assert secretRef == {"name": "secret-name"} + + +def test_adding_a_extra_volume_with_volume_mount(): + config = """ +extraVolumes: | + - name: extras + emptyDir: {} +extraVolumeMounts: | + - name: extras + mountPath: /usr/share/extras + readOnly: true +""" + r = helm_template(config) + extraVolume = r["statefulset"][uname]["spec"]["template"]["spec"]["volumes"] + assert {"name": "extras", "emptyDir": {}} in extraVolume + extraVolumeMounts = r["statefulset"][uname]["spec"]["template"]["spec"][ + "containers" + ][0]["volumeMounts"] + assert { + "name": "extras", + "mountPath": "/usr/share/extras", + "readOnly": True, + } in extraVolumeMounts + + +def test_adding_a_extra_volume_with_volume_mount_as_yaml(): + config = """ +extraVolumes: + - name: extras + emptyDir: {} +extraVolumeMounts: + - name: extras + mountPath: /usr/share/extras + readOnly: true +""" + r = helm_template(config) + extraVolume = r["statefulset"][uname]["spec"]["template"]["spec"]["volumes"] + assert {"name": "extras", "emptyDir": {}} in extraVolume + extraVolumeMounts = r["statefulset"][uname]["spec"]["template"]["spec"][ + "containers" + ][0]["volumeMounts"] + assert { + "name": "extras", + "mountPath": "/usr/share/extras", + "readOnly": True, + } in extraVolumeMounts + + +def test_adding_a_extra_container(): + config = """ +extraContainers: | + - name: do-something + image: busybox + command: ['do', 'something'] +""" + r = helm_template(config) + extraContainer = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"] + assert { + "name": "do-something", + "image": "busybox", + "command": ["do", "something"], + } in extraContainer + + +def test_adding_a_extra_container_as_yaml(): + config = """ +extraContainers: + - name: do-something + image: busybox + command: ['do', 'something'] +""" + r = helm_template(config) + extraContainer = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"] + assert { + "name": "do-something", + "image": "busybox", + "command": ["do", "something"], + } in extraContainer + + +def test_adding_a_extra_init_container(): + config = """ +extraInitContainers: | + - name: do-something + image: busybox + command: ['do', 'something'] +""" + r = helm_template(config) + extraInitContainer = r["statefulset"][uname]["spec"]["template"]["spec"][ + "initContainers" + ] + assert { + "name": "do-something", + "image": "busybox", + "command": ["do", "something"], + } in extraInitContainer + + +def test_adding_a_extra_init_container_as_yaml(): + config = """ +extraInitContainers: + - name: do-something + image: busybox + command: ['do', 'something'] +""" + r = helm_template(config) + extraInitContainer = r["statefulset"][uname]["spec"]["template"]["spec"][ + "initContainers" + ] + assert { + "name": "do-something", + "image": "busybox", + "command": ["do", "something"], + } in extraInitContainer + + +def test_sysctl_init_container_disabled(): + config = """ +sysctlInitContainer: + enabled: false +""" + r = helm_template(config) + assert "initContainers" not in r["statefulset"][uname]["spec"]["template"]["spec"] + + +def test_sysctl_init_container_enabled(): + config = """ +sysctlInitContainer: + enabled: true +""" + r = helm_template(config) + initContainers = r["statefulset"][uname]["spec"]["template"]["spec"][ + "initContainers" + ] + assert initContainers[0]["name"] == "configure-sysctl" + + +def test_sysctl_init_container_image(): + config = """ +image: customImage +imageTag: 6.2.4 +imagePullPolicy: Never +sysctlInitContainer: + enabled: true +""" + r = helm_template(config) + initContainers = r["statefulset"][uname]["spec"]["template"]["spec"][ + "initContainers" + ] + assert initContainers[0]["image"] == "customImage:6.2.4" + assert initContainers[0]["imagePullPolicy"] == "Never" + + +def test_adding_storageclass_annotation_to_volumeclaimtemplate(): + config = """ +persistence: + annotations: + volume.beta.kubernetes.io/storage-class: id +""" + r = helm_template(config) + annotations = r["statefulset"][uname]["spec"]["volumeClaimTemplates"][0][ + "metadata" + ]["annotations"] + assert annotations["volume.beta.kubernetes.io/storage-class"] == "id" + + +def test_adding_multiple_persistence_annotations(): + config = """ + persistence: + annotations: + hello: world + world: hello + """ + r = helm_template(config) + annotations = r["statefulset"][uname]["spec"]["volumeClaimTemplates"][0][ + "metadata" + ]["annotations"] + + assert annotations["hello"] == "world" + assert annotations["world"] == "hello" + + +def test_enabling_persistence_label_in_volumeclaimtemplate(): + config = """ +persistence: + labels: + enabled: true +""" + r = helm_template(config) + volume_claim_template_labels = r["statefulset"][uname]["spec"][ + "volumeClaimTemplates" + ][0]["metadata"]["labels"] + statefulset_labels = r["statefulset"][uname]["metadata"]["labels"] + expected_labels = statefulset_labels + # heritage label shouldn't be present in volumeClaimTemplates labels + expected_labels.pop("heritage") + assert volume_claim_template_labels == expected_labels + + +def test_adding_a_secret_mount(): + config = """ +secretMounts: + - name: elastic-certificates + secretName: elastic-certs + path: /usr/share/elasticsearch/config/certs +""" + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + assert s["containers"][0]["volumeMounts"][-1] == { + "mountPath": "/usr/share/elasticsearch/config/certs", + "name": "elastic-certificates", + } + assert { + "name": "elastic-certificates", + "secret": {"secretName": "elastic-certs"}, + } in s["volumes"] + + +def test_adding_a_secret_mount_with_subpath(): + config = """ +secretMounts: + - name: elastic-certificates + secretName: elastic-certs + path: /usr/share/elasticsearch/config/certs + subPath: cert.crt +""" + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + assert s["containers"][0]["volumeMounts"][-1] == { + "mountPath": "/usr/share/elasticsearch/config/certs", + "subPath": "cert.crt", + "name": "elastic-certificates", + } + + +def test_adding_a_secret_mount_with_default_mode(): + config = """ +secretMounts: + - name: elastic-certificates + secretName: elastic-certs + path: /usr/share/elasticsearch/config/certs + subPath: cert.crt + defaultMode: 0755 +""" + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + assert s["containers"][0]["volumeMounts"][-1] == { + "mountPath": "/usr/share/elasticsearch/config/certs", + "subPath": "cert.crt", + "name": "elastic-certificates", + } + + +def test_adding_image_pull_secrets(): + config = """ +imagePullSecrets: + - name: test-registry +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["imagePullSecrets"][0][ + "name" + ] == + "test-registry" + ) + + +def test_adding_tolerations(): + config = """ +tolerations: +- key: "key1" + operator: "Equal" + value: "value1" + effect: "NoExecute" + tolerationSeconds: 3600 +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["tolerations"][0]["key"] == + "key1" + ) + + +def test_adding_pod_annotations(): + config = """ +podAnnotations: + iam.amazonaws.com/role: es-role +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["metadata"]["annotations"][ + "iam.amazonaws.com/role" + ] == + "es-role" + ) + + +def test_adding_serviceaccount_annotations(): + config = """ +rbac: + create: true + serviceAccountAnnotations: + eks.amazonaws.com/role-arn: arn:aws:iam::111111111111:role/k8s.clustername.namespace.serviceaccount +""" + r = helm_template(config) + assert ( + r["serviceaccount"][uname]["metadata"]["annotations"][ + "eks.amazonaws.com/role-arn" + ] == + "arn:aws:iam::111111111111:role/k8s.clustername.namespace.serviceaccount" + ) + + +def test_adding_a_node_selector(): + config = """ +nodeSelector: + disktype: ssd +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["nodeSelector"]["disktype"] == + "ssd" + ) + + +def test_adding_resources_to_initcontainer(): + config = """ +initResources: + limits: + cpu: "25m" + memory: "128Mi" + requests: + cpu: "25m" + memory: "128Mi" +""" + r = helm_template(config) + i = r["statefulset"][uname]["spec"]["template"]["spec"]["initContainers"][0] + + assert i["resources"] == { + "requests": {"cpu": "25m", "memory": "128Mi"}, + "limits": {"cpu": "25m", "memory": "128Mi"}, + } + + +def test_adding_a_node_affinity(): + config = """ +nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: mylabel + operator: In + values: + - myvalue +""" + r = helm_template(config) + assert r["statefulset"][uname]["spec"]["template"]["spec"]["affinity"][ + "nodeAffinity" + ] == { + "preferredDuringSchedulingIgnoredDuringExecution": [ + { + "weight": 100, + "preference": { + "matchExpressions": [ + {"key": "mylabel", "operator": "In", "values": ["myvalue"]} + ] + }, + } + ] + } + + +def test_adding_an_ingress_rule(): + config = """ +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx + hosts: + - host: elasticsearch.elastic.co + paths: + - path: / + - host: '' + paths: + - path: / + - path: /mypath + servicePort: 8888 + - host: elasticsearch.hello.there + paths: + - path: / + servicePort: 9999 + tls: + - secretName: elastic-co-wildcard + hosts: + - elasticsearch.elastic.co +""" + + r = helm_template(config) + assert uname in r["ingress"] + i = r["ingress"][uname]["spec"] + assert i["tls"][0]["hosts"][0] == "elasticsearch.elastic.co" + assert i["tls"][0]["secretName"] == "elastic-co-wildcard" + + assert i["rules"][0]["host"] == "elasticsearch.elastic.co" + assert i["rules"][0]["http"]["paths"][0]["path"] == "/" + assert i["rules"][0]["http"]["paths"][0]["backend"]["service"]["name"] == uname + assert ( + i["rules"][0]["http"]["paths"][0]["backend"]["service"]["port"]["number"] == + 9200 + ) + assert i["rules"][1]["host"] == None + assert i["rules"][1]["http"]["paths"][0]["path"] == "/" + assert i["rules"][1]["http"]["paths"][0]["backend"]["service"]["name"] == uname + assert ( + i["rules"][1]["http"]["paths"][0]["backend"]["service"]["port"]["number"] == + 9200 + ) + assert i["rules"][1]["http"]["paths"][1]["path"] == "/mypath" + assert i["rules"][1]["http"]["paths"][1]["backend"]["service"]["name"] == uname + assert ( + i["rules"][1]["http"]["paths"][1]["backend"]["service"]["port"]["number"] == + 8888 + ) + assert i["rules"][2]["host"] == "elasticsearch.hello.there" + assert i["rules"][2]["http"]["paths"][0]["path"] == "/" + assert i["rules"][2]["http"]["paths"][0]["backend"]["service"]["name"] == uname + assert ( + i["rules"][2]["http"]["paths"][0]["backend"]["service"]["port"]["number"] == + 9999 + ) + + +def test_adding_a_deprecated_ingress_rule(): + config = """ +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx + path: / + hosts: + - elasticsearch.elastic.co + tls: + - secretName: elastic-co-wildcard + hosts: + - elasticsearch.elastic.co +""" + + r = helm_template(config) + assert uname in r["ingress"] + i = r["ingress"][uname]["spec"] + assert i["tls"][0]["hosts"][0] == "elasticsearch.elastic.co" + assert i["tls"][0]["secretName"] == "elastic-co-wildcard" + + assert i["rules"][0]["host"] == "elasticsearch.elastic.co" + assert i["rules"][0]["http"]["paths"][0]["path"] == "/" + assert i["rules"][0]["http"]["paths"][0]["backend"]["service"]["name"] == uname + assert ( + i["rules"][0]["http"]["paths"][0]["backend"]["service"]["port"]["number"] == + 9200 + ) + + +def test_changing_the_protocol(): + config = """ +protocol: https +""" + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + assert "https://127.0.0.1:9200" in c["readinessProbe"]["exec"]["command"][-1] + + +def test_changing_the_cluster_health_status(): + config = """ +clusterHealthCheckParams: 'wait_for_no_initializing_shards=true&timeout=60s' +""" + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + assert ( + "/_cluster/health?wait_for_no_initializing_shards=true&timeout=60s" + in c["readinessProbe"]["exec"]["command"][-1] + ) + + +def test_adding_in_es_config(): + config = """ +esConfig: + elasticsearch.yml: | + key: + nestedkey: value + dot.notation: test + + log4j2.properties: | + appender.rolling.name = rolling +""" + r = helm_template(config) + c = r["configmap"][uname + "-config"]["data"] + + assert "elasticsearch.yml" in c + assert "log4j2.properties" in c + + assert "nestedkey: value" in c["elasticsearch.yml"] + assert "dot.notation: test" in c["elasticsearch.yml"] + + assert "appender.rolling.name = rolling" in c["log4j2.properties"] + + s = r["statefulset"][uname]["spec"]["template"]["spec"] + + assert { + "configMap": {"name": "elasticsearch-master-config"}, + "name": "esconfig", + } in s["volumes"] + assert { + "mountPath": "/usr/share/elasticsearch/config/elasticsearch.yml", + "name": "esconfig", + "subPath": "elasticsearch.yml", + } in s["containers"][0]["volumeMounts"] + assert { + "mountPath": "/usr/share/elasticsearch/config/log4j2.properties", + "name": "esconfig", + "subPath": "log4j2.properties", + } in s["containers"][0]["volumeMounts"] + + assert ( + "configchecksum" + in r["statefulset"][uname]["spec"]["template"]["metadata"]["annotations"] + ) + + +def test_adding_in_jvm_options(): + config = """ +esJvmOptions: + processors.options: | + -XX:ActiveProcessorCount=3 +""" + r = helm_template(config) + c = r["configmap"][uname + "-jvm-options"]["data"] + + assert "processors.options" in c + + assert "-XX:ActiveProcessorCount=3" in c["processors.options"] + + s = r["statefulset"][uname]["spec"]["template"]["spec"] + + assert { + "configMap": {"name": "elasticsearch-master-jvm-options"}, + "name": "esjvmoptions", + } in s["volumes"] + assert { + "mountPath": "/usr/share/elasticsearch/config/jvm.options.d/processors.options", + "name": "esjvmoptions", + "subPath": "processors.options", + } in s["containers"][0]["volumeMounts"] + + assert ( + "configchecksum" + in r["statefulset"][uname]["spec"]["template"]["metadata"]["annotations"] + ) + + +def test_dont_add_data_volume_when_persistance_is_disabled(): + config = """ +persistence: + enabled: false +""" + r = helm_template(config) + assert "volumeClaimTemplates" not in r["statefulset"][uname]["spec"] + assert { + "name": "elasticsearch-master", + "mountPath": "/usr/share/elasticsearch/data", + } not in r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0][ + "volumeMounts" + ] + + +def test_priority_class_name(): + config = """ +priorityClassName: "" +""" + r = helm_template(config) + spec = r["statefulset"][uname]["spec"]["template"]["spec"] + assert "priorityClassName" not in spec + + config = """ +priorityClassName: "highest" +""" + r = helm_template(config) + priority_class_name = r["statefulset"][uname]["spec"]["template"]["spec"][ + "priorityClassName" + ] + assert priority_class_name == "highest" + + +def test_scheduler_name(): + r = helm_template("") + spec = r["statefulset"][uname]["spec"]["template"]["spec"] + assert "schedulerName" not in spec + + config = """ +schedulerName: "stork" +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["schedulerName"] == "stork" + ) + + +def test_disabling_non_headless_service(): + config = "" + + r = helm_template(config) + + assert uname in r["service"] + + config = """ +service: + enabled: false +""" + + r = helm_template(config) + + assert uname not in r["service"] + + +def test_enabling_service_publishNotReadyAddresses(): + config = """ + service: + publishNotReadyAddresses: true + """ + + r = helm_template(config) + + assert r["service"][uname]["spec"]["publishNotReadyAddresses"] == True + + +def test_adding_a_nodePort(): + config = "" + + r = helm_template(config) + + assert "nodePort" not in r["service"][uname]["spec"]["ports"][0] + + config = """ + service: + nodePort: 30001 + """ + + r = helm_template(config) + + assert r["service"][uname]["spec"]["ports"][0]["nodePort"] == 30001 + + +def test_adding_a_loadBalancerIP(): + config = "" + + r = helm_template(config) + + assert "loadBalancerIP" not in r["service"][uname]["spec"] + + config = """ + service: + loadBalancerIP: 12.4.19.81 + """ + + r = helm_template(config) + + assert r["service"][uname]["spec"]["loadBalancerIP"] == "12.4.19.81" + + +def test_adding_an_externalTrafficPolicy(): + config = "" + + r = helm_template(config) + + assert "externalTrafficPolicy" not in r["service"][uname]["spec"] + + config = """ + service: + externalTrafficPolicy: Local + """ + + r = helm_template(config) + + assert r["service"][uname]["spec"]["externalTrafficPolicy"] == "Local" + + +def test_adding_a_label_on_non_headless_service(): + config = "" + + r = helm_template(config) + + assert "label1" not in r["service"][uname]["metadata"]["labels"] + + config = """ + service: + labels: + label1: value1 + """ + + r = helm_template(config) + + assert r["service"][uname]["metadata"]["labels"]["label1"] == "value1" + + +def test_adding_a_label_on_headless_service(): + config = "" + + r = helm_template(config) + + assert "label1" not in r["service"][uname + "-headless"]["metadata"]["labels"] + + config = """ + service: + labelsHeadless: + label1: value1 + """ + + r = helm_template(config) + + assert r["service"][uname + "-headless"]["metadata"]["labels"]["label1"] == "value1" + + +def test_adding_load_balancer_source_ranges(): + config = """ +service: + loadBalancerSourceRanges: + - 0.0.0.0/0 + """ + r = helm_template(config) + assert r["service"][uname]["spec"]["loadBalancerSourceRanges"][0] == "0.0.0.0/0" + + config = """ +service: + loadBalancerSourceRanges: + - 192.168.0.0/24 + - 192.168.1.0/24 + """ + r = helm_template(config) + ranges = r["service"][uname]["spec"]["loadBalancerSourceRanges"] + assert ranges[0] == "192.168.0.0/24" + assert ranges[1] == "192.168.1.0/24" + + +def test_lifecycle_hooks(): + config = "" + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + assert "lifecycle" not in c + + config = """ + lifecycle: + preStop: + exec: + command: ["/bin/bash","/preStop"] + """ + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + + assert c["lifecycle"]["preStop"]["exec"]["command"] == ["/bin/bash", "/preStop"] + + +def test_esMajorVersion_detect_default_version(): + config = "" + + r = helm_template(config) + assert r["statefulset"][uname]["metadata"]["annotations"]["esMajorVersion"] == "8" + + +def test_esMajorVersion_default_to_8_if_not_elastic_image(): + config = """ + image: notElastic + imageTag: 1.0.0 + """ + + r = helm_template(config) + assert r["statefulset"][uname]["metadata"]["annotations"]["esMajorVersion"] == "8" + + +def test_esMajorVersion_default_to_8_if_no_version_is_found(): + config = """ + imageTag: not_a_number + """ + + r = helm_template(config) + assert r["statefulset"][uname]["metadata"]["annotations"]["esMajorVersion"] == "8" + + +def test_esMajorVersion_always_wins(): + config = """ + esMajorVersion: 7 + imageTag: 8.0.0 + """ + + r = helm_template(config) + assert r["statefulset"][uname]["metadata"]["annotations"]["esMajorVersion"] == "7" + + +def test_set_pod_security_context(): + config = "" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"][ + "fsGroup" + ] == + 1000 + ) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"][ + "runAsUser" + ] == + 1000 + ) + + config = """ + podSecurityContext: + fsGroup: 1001 + other: test + """ + + r = helm_template(config) + + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"][ + "fsGroup" + ] == + 1001 + ) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"]["other"] == + "test" + ) + + +def test_fsGroup_backwards_compatability(): + config = """ + fsGroup: 1001 + """ + + r = helm_template(config) + + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"][ + "fsGroup" + ] == + 1001 + ) + + +def test_set_container_security_context(): + config = "" + + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + assert c["securityContext"]["capabilities"]["drop"] == ["ALL"] + assert c["securityContext"]["runAsNonRoot"] == True + assert c["securityContext"]["runAsUser"] == 1000 + + config = """ + securityContext: + runAsUser: 1001 + other: test + """ + + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + assert c["securityContext"]["capabilities"]["drop"] == ["ALL"] + assert c["securityContext"]["runAsNonRoot"] == True + assert c["securityContext"]["runAsUser"] == 1001 + assert c["securityContext"]["other"] == "test" + + +def test_adding_pod_labels(): + config = """ +labels: + app.kubernetes.io/name: elasticsearch +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["metadata"]["labels"]["app.kubernetes.io/name"] == + "elasticsearch" + ) + assert ( + r["statefulset"][uname]["spec"]["template"]["metadata"]["labels"][ + "app.kubernetes.io/name" + ] == + "elasticsearch" + ) + + +def test_keystore_enable(): + config = """ +keystore: + - secretName: test + """ + + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + + assert {"name": "keystore", "emptyDir": {}} in s["volumes"] + + +def test_keystore_init_container(): + config = "" + + r = helm_template(config) + i = r["statefulset"][uname]["spec"]["template"]["spec"]["initContainers"][-1] + + assert i["name"] != "keystore" + + config = """ +keystore: + - secretName: test + """ + + r = helm_template(config) + i = r["statefulset"][uname]["spec"]["template"]["spec"]["initContainers"][-1] + + assert i["name"] == "keystore" + + +def test_keystore_init_container_image(): + config = """ +image: customImage +imageTag: 6.2.4 +imagePullPolicy: Never +keystore: + - secretName: test +""" + r = helm_template(config) + i = r["statefulset"][uname]["spec"]["template"]["spec"]["initContainers"][-1] + assert i["image"] == "customImage:6.2.4" + assert i["imagePullPolicy"] == "Never" + + +def test_keystore_mount(): + config = """ +keystore: + - secretName: test +""" + + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + assert s["containers"][0]["volumeMounts"][-1] == { + "mountPath": "/usr/share/elasticsearch/config/elasticsearch.keystore", + "subPath": "elasticsearch.keystore", + "name": "keystore", + } + + +def test_keystore_init_volume_mounts(): + config = """ +keystore: + - secretName: test + - secretName: test-with-custom-path + items: + - key: slack_url + path: xpack.notification.slack.account.otheraccount.secure_url +""" + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + assert s["initContainers"][-1]["volumeMounts"] == [ + {"mountPath": "/tmp/keystore", "name": "keystore"}, + {"mountPath": "/tmp/keystoreSecrets/test", "name": "keystore-test"}, + { + "mountPath": "/tmp/keystoreSecrets/test-with-custom-path", + "name": "keystore-test-with-custom-path", + }, + ] + + +def test_keystore_volumes(): + config = """ +keystore: + - secretName: test + - secretName: test-with-custom-path + items: + - key: slack_url + path: xpack.notification.slack.account.otheraccount.secure_url +""" + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + + assert {"name": "keystore-test", "secret": {"secretName": "test"}} in s["volumes"] + + assert { + "name": "keystore-test-with-custom-path", + "secret": { + "secretName": "test-with-custom-path", + "items": [ + { + "key": "slack_url", + "path": "xpack.notification.slack.account.otheraccount.secure_url", + } + ], + }, + } in s["volumes"] + + +def test_pod_security_policy(): + # Make sure the default config is not creating any resources + config = "" + resources = ("role", "rolebinding", "serviceaccount", "podsecuritypolicy") + r = helm_template(config) + for resource in resources: + assert resource not in r + assert ( + "serviceAccountName" not in r["statefulset"][uname]["spec"]["template"]["spec"] + ) + + # Make sure all the resources are created with default values + config = """ +rbac: + create: true + serviceAccountName: "" + +podSecurityPolicy: + create: true + name: "" +""" + r = helm_template(config) + for resource in resources: + assert resource in r + assert r["role"][uname]["rules"][0] == { + "apiGroups": ["extensions"], + "verbs": ["use"], + "resources": ["podsecuritypolicies"], + "resourceNames": [uname], + } + assert r["rolebinding"][uname]["subjects"] == [ + {"kind": "ServiceAccount", "namespace": "default", "name": uname} + ] + assert r["rolebinding"][uname]["roleRef"] == { + "apiGroup": "rbac.authorization.k8s.io", + "kind": "Role", + "name": uname, + } + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["serviceAccountName"] == + uname + ) + psp_spec = r["podsecuritypolicy"][uname]["spec"] + assert psp_spec["privileged"] is True + + +def test_external_pod_security_policy(): + # Make sure we can use an externally defined pod security policy + config = """ +rbac: + create: true + serviceAccountName: "" + +podSecurityPolicy: + create: false + name: "customPodSecurityPolicy" +""" + resources = ("role", "rolebinding") + r = helm_template(config) + for resource in resources: + assert resource in r + + assert r["role"][uname]["rules"][0] == { + "apiGroups": ["extensions"], + "verbs": ["use"], + "resources": ["podsecuritypolicies"], + "resourceNames": ["customPodSecurityPolicy"], + } + + +def test_external_service_account(): + # Make sure we can use an externally defined service account + config = """ +rbac: + create: false + serviceAccountName: "customServiceAccountName" + +podSecurityPolicy: + create: false + name: "" +""" + resources = ("role", "rolebinding", "serviceaccount") + r = helm_template(config) + + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["serviceAccountName"] == + "customServiceAccountName" + ) + # When referencing an external service account we do not want any resources to be created. + for resource in resources: + assert resource not in r + + +def test_name_override(): + # Make sure we can use a name override + config = """ +nameOverride: "customName" +""" + r = helm_template(config) + + assert "customName-master" in r["statefulset"] + assert "customName-master" in r["service"] + + +def test_full_name_override(): + # Make sure we can use a full name override + config = """ +fullnameOverride: "customfullName" +""" + r = helm_template(config) + + assert "customfullName" in r["statefulset"] + assert "customfullName" in r["service"] + + +def test_initial_master_nodes_when_using_full_name_override(): + config = """ +fullnameOverride: "customfullName" +""" + r = helm_template(config) + env = r["statefulset"]["customfullName"]["spec"]["template"]["spec"]["containers"][ + 0 + ]["env"] + assert { + "name": "cluster.initial_master_nodes", + "value": "customfullName-0," + "customfullName-1," + "customfullName-2,", + } in env + + +def test_hostaliases(): + config = """ +hostAliases: +- ip: "127.0.0.1" + hostnames: + - "foo.local" + - "bar.local" +""" + r = helm_template(config) + hostAliases = r["statefulset"][uname]["spec"]["template"]["spec"]["hostAliases"] + assert {"ip": "127.0.0.1", "hostnames": ["foo.local", "bar.local"]} in hostAliases + + +def test_network_policy(): + config = """ +networkPolicy: + http: + enabled: true + explicitNamespacesSelector: + # Accept from namespaces with all those different rules (from whitelisted Pods) + matchLabels: + role: frontend-http + matchExpressions: + - {key: role, operator: In, values: [frontend-http]} + additionalRules: + - podSelector: + matchLabels: + role: frontend-http + - podSelector: + matchExpressions: + - key: role + operator: In + values: + - frontend-http + transport: + enabled: true + allowExternal: true + explicitNamespacesSelector: + matchLabels: + role: frontend-transport + matchExpressions: + - {key: role, operator: In, values: [frontend-transport]} + additionalRules: + - podSelector: + matchLabels: + role: frontend-transport + - podSelector: + matchExpressions: + - key: role + operator: In + values: + - frontend-transport + +""" + r = helm_template(config) + ingress = r["networkpolicy"][uname]["spec"]["ingress"] + pod_selector = r["networkpolicy"][uname]["spec"]["podSelector"] + http = ingress[0] + transport = ingress[1] + assert http["from"] == [ + { + "podSelector": { + "matchLabels": {"elasticsearch-master-http-client": "true"} + }, + "namespaceSelector": { + "matchExpressions": [ + {"key": "role", "operator": "In", "values": ["frontend-http"]} + ], + "matchLabels": {"role": "frontend-http"}, + }, + }, + {"podSelector": {"matchLabels": {"role": "frontend-http"}}}, + { + "podSelector": { + "matchExpressions": [ + {"key": "role", "operator": "In", "values": ["frontend-http"]} + ] + } + }, + ] + assert http["ports"][0]["port"] == 9200 + assert transport["from"] == [ + { + "podSelector": { + "matchLabels": {"elasticsearch-master-transport-client": "true"} + }, + "namespaceSelector": { + "matchExpressions": [ + {"key": "role", "operator": "In", "values": ["frontend-transport"]} + ], + "matchLabels": {"role": "frontend-transport"}, + }, + }, + {"podSelector": {"matchLabels": {"role": "frontend-transport"}}}, + { + "podSelector": { + "matchExpressions": [ + {"key": "role", "operator": "In", "values": ["frontend-transport"]} + ] + } + }, + {"podSelector": {"matchLabels": {"app": "elasticsearch-master"}}}, + ] + assert transport["ports"][0]["port"] == 9300 + assert pod_selector == { + "matchLabels": { + "app": "elasticsearch-master", + } + } + + +def test_default_automount_sa_token(): + config = """ +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"][ + "automountServiceAccountToken" + ] == + True + ) + + +def test_disable_automount_sa_token(): + config = """ +rbac: + automountToken: false +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"][ + "automountServiceAccountToken" + ] == + False + ) diff --git a/applications/elasticsearch/deploy/charts/values-dev.yaml b/applications/elasticsearch/deploy/charts/values-dev.yaml new file mode 100644 index 000000000..98cf07847 --- /dev/null +++ b/applications/elasticsearch/deploy/charts/values-dev.yaml @@ -0,0 +1,15 @@ +resources: + requests: + cpu: "10m" + memory: "0.2Gi" + limits: + cpu: "1000m" + memory: "2Gi" + +volumeClaimTemplate: + resources: + requests: + storage: 2Gi + +tests: + enabled: true diff --git a/applications/elasticsearch/deploy/charts/values-local.yaml b/applications/elasticsearch/deploy/charts/values-local.yaml new file mode 100644 index 000000000..fc5dd125b --- /dev/null +++ b/applications/elasticsearch/deploy/charts/values-local.yaml @@ -0,0 +1,15 @@ +--- +replicas: 1 +minimumMasterNodes: 1 +resources: + requests: + cpu: "10m" + memory: "0.2Gi" + limits: + cpu: "1000m" + memory: "2Gi" + +volumeClaimTemplate: + resources: + requests: + storage: 100Mi diff --git a/applications/elasticsearch/deploy/charts/values.yaml b/applications/elasticsearch/deploy/charts/values.yaml new file mode 100644 index 000000000..18a89c1af --- /dev/null +++ b/applications/elasticsearch/deploy/charts/values.yaml @@ -0,0 +1,359 @@ +--- +clusterName: "elasticsearch" +nodeGroup: "master" + +# The service that non master groups will try to connect to when joining the cluster +# This should be set to clusterName + "-" + nodeGroup for your master group +masterService: "" + +# Elasticsearch roles that will be applied to this nodeGroup +# These will be set as environment variables. E.g. node.roles=master +# https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles +roles: + - master + - data + - data_content + - data_hot + - data_warm + - data_cold + - ingest + - ml + - remote_cluster_client + - transform + +replicas: 2 +minimumMasterNodes: 2 + +esMajorVersion: "" + +# Allows you to add any config files in /usr/share/elasticsearch/config/ +# such as elasticsearch.yml and log4j2.properties +esConfig: + elasticsearch.yml: | + xpack.security.enabled: false + path.data: /usr/share/elasticsearch/data +# elasticsearch.yml: | +# key: +# nestedkey: value +# log4j2.properties: | +# key = value + +createCert: false + +esJvmOptions: {} +# processors.options: | +# -XX:ActiveProcessorCount=3 + +# Extra environment variables to append to this nodeGroup +# This will be appended to the current 'env:' key. You can use any of the kubernetes env +# syntax here +extraEnvs: [] +# - name: MY_ENVIRONMENT_VAR +# value: the_value_goes_here + +# Allows you to load environment variables from kubernetes secret or config map +envFrom: [] +# - secretRef: +# name: env-secret +# - configMapRef: +# name: config-map + +# Disable it to use your own elastic-credential Secret. +secret: + enabled: false + password: "" # generated randomly if not defined + +# A list of secrets and their paths to mount inside the pod +# This is useful for mounting certificates for security and for mounting +# the X-Pack license +secretMounts: [] +# - name: elastic-certificates +# secretName: elastic-certificates +# path: /usr/share/elasticsearch/config/certs +# defaultMode: 0755 + +hostAliases: [] +#- ip: "127.0.0.1" +# hostnames: +# - "foo.local" +# - "bar.local" + +image: "docker.elastic.co/elasticsearch/elasticsearch" +imageTag: "8.5.1" +imagePullPolicy: "IfNotPresent" + +podAnnotations: {} +# iam.amazonaws.com/role: es-cluster + +# additionals labels +labels: {} + +esJavaOpts: "" # example: "-Xmx1g -Xms1g" + +resources: + requests: + cpu: "100m" + memory: "0.5Gi" + limits: + cpu: "1000m" + memory: "2Gi" + +initResources: {} +# limits: +# cpu: "25m" +# # memory: "128Mi" +# requests: +# cpu: "25m" +# memory: "128Mi" + +networkHost: "0.0.0.0" + +volumeClaimTemplate: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 10Gi + +rbac: + create: false + serviceAccountAnnotations: {} + serviceAccountName: "" + automountToken: true + +podSecurityPolicy: + create: false + name: "" + spec: + privileged: true + fsGroup: + rule: RunAsAny + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - secret + - configMap + - persistentVolumeClaim + - emptyDir + +persistence: + enabled: true + labels: + # Add default labels for the volumeClaimTemplate of the StatefulSet + enabled: false + annotations: {} + +extraVolumes: [] +# - name: extras +# emptyDir: {} + +extraVolumeMounts: [] +# - name: extras +# mountPath: /usr/share/extras +# readOnly: true + +extraContainers: [] +# - name: do-something +# image: busybox +# command: ['do', 'something'] + +extraInitContainers: [] +# - name: do-something +# image: busybox +# command: ['do', 'something'] + +# This is the PriorityClass settings as defined in +# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass +priorityClassName: "" + +# By default this will make sure two pods don't end up on the same node +# Changing this to a region would allow you to spread pods across regions +antiAffinityTopologyKey: "kubernetes.io/hostname" + +# Hard means that by default pods will only be scheduled if there are enough nodes for them +# and that they will never end up on the same node. Setting this to soft will do this "best effort" +antiAffinity: "hard" + +# This is the node affinity settings as defined in +# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature +nodeAffinity: {} + +# The default is to deploy all pods serially. By setting this to parallel all pods are started at +# the same time when bootstrapping the cluster +podManagementPolicy: "Parallel" + +# The environment variables injected by service links are not used, but can lead to slow Elasticsearch boot times when +# there are many services in the current namespace. +# If you experience slow pod startups you probably want to set this to `false`. +enableServiceLinks: true + +protocol: http +httpPort: 9200 +transportPort: 9300 + +service: + enabled: true + labels: {} + labelsHeadless: {} + type: ClusterIP + # Consider that all endpoints are considered "ready" even if the Pods themselves are not + # https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec + publishNotReadyAddresses: false + nodePort: "" + annotations: {} + httpPortName: http + transportPortName: transport + loadBalancerIP: "" + loadBalancerSourceRanges: [] + externalTrafficPolicy: "" + +updateStrategy: RollingUpdate + +# This is the max unavailable setting for the pod disruption budget +# The default value of 1 will make sure that kubernetes won't allow more than 1 +# of your pods to be unavailable during maintenance +maxUnavailable: 1 + +podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + +securityContext: + capabilities: + drop: + - ALL + # readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +# How long to wait for elasticsearch to stop gracefully +terminationGracePeriod: 120 + +sysctlVmMaxMapCount: 262144 + +readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 3 + timeoutSeconds: 5 + +# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status +clusterHealthCheckParams: "wait_for_status=green&timeout=1s" + +## Use an alternate scheduler. +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" + +imagePullSecrets: [] +nodeSelector: {} +tolerations: [] + +# Enabling this will publicly expose your Elasticsearch instance. +# Only enable this if you have security enabled on your cluster +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + className: "nginx" + pathtype: ImplementationSpecific + hosts: + - host: chart-example.local + paths: + - path: / + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +nameOverride: "" +fullnameOverride: "" +healthNameOverride: "" + +lifecycle: {} +# preStop: +# exec: +# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] +# postStart: +# exec: +# command: +# - bash +# - -c +# - | +# #!/bin/bash +# # Add a template to adjust number of shards/replicas +# TEMPLATE_NAME=my_template +# INDEX_PATTERN="logstash-*" +# SHARD_COUNT=8 +# REPLICA_COUNT=1 +# ES_URL=http://localhost:9200 +# while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done +# curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}' + +sysctlInitContainer: + enabled: true + +keystore: [] + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## In order for a Pod to access Elasticsearch, it needs to have the following label: + ## {{ template "uname" . }}-client: "true" + ## Example for default configuration to access HTTP port: + ## elasticsearch-master-http-client: "true" + ## Example for default configuration to access transport port: + ## elasticsearch-master-transport-client: "true" + + http: + enabled: false + ## if explicitNamespacesSelector is not set or set to {}, only client Pods being in the networkPolicy's namespace + ## and matching all criteria can reach the DB. + ## But sometimes, we want the Pods to be accessible to clients from other namespaces, in this case, we can use this + ## parameter to select these namespaces + ## + # explicitNamespacesSelector: + # # Accept from namespaces with all those different rules (only from whitelisted Pods) + # matchLabels: + # role: frontend + # matchExpressions: + # - {key: role, operator: In, values: [frontend]} + + ## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. + ## + # additionalRules: + # - podSelector: + # matchLabels: + # role: frontend + # - podSelector: + # matchExpressions: + # - key: role + # operator: In + # values: + # - frontend + + transport: + ## Note that all Elasticsearch Pods can talk to themselves using transport port even if enabled. + enabled: false + # explicitNamespacesSelector: + # matchLabels: + # role: frontend + # matchExpressions: + # - {key: role, operator: In, values: [frontend]} + # additionalRules: + # - podSelector: + # matchLabels: + # role: frontend + # - podSelector: + # matchExpressions: + # - key: role + # operator: In + # values: + # - frontend + +tests: + enabled: true diff --git a/applications/elasticsearch/deploy/values.yaml b/applications/elasticsearch/deploy/values.yaml new file mode 100644 index 000000000..dd0ca5aeb --- /dev/null +++ b/applications/elasticsearch/deploy/values.yaml @@ -0,0 +1,7 @@ +harness: + subdomain: es + secured: true + service: + port: 9200 + auto: false + name: elasticsearch-master diff --git a/tools/deployment-cli-tools/ch_cli_tools/configurationgenerator.py b/tools/deployment-cli-tools/ch_cli_tools/configurationgenerator.py index 5ff0a8a66..494aae673 100644 --- a/tools/deployment-cli-tools/ch_cli_tools/configurationgenerator.py +++ b/tools/deployment-cli-tools/ch_cli_tools/configurationgenerator.py @@ -78,14 +78,14 @@ def __init_deployment(self): if self.dest_deployment_path.exists(): shutil.rmtree(self.dest_deployment_path) # Initialize with default - copy_merge_base_deployment(self.dest_deployment_path, Path(CH_ROOT) / DEPLOYMENT_CONFIGURATION_PATH / self.templates_path) + copy_merge_base_deployment(self.dest_deployment_path, Path(CH_ROOT) / DEPLOYMENT_CONFIGURATION_PATH / self.templates_path, self.env) # Override for every cloudharness scaffolding for root_path in self.root_paths: copy_merge_base_deployment(dest_helm_chart_path=self.dest_deployment_path, - base_helm_chart=root_path / DEPLOYMENT_CONFIGURATION_PATH / self.templates_path) - collect_apps_helm_templates(root_path, exclude=self.exclude, include=self.include, - dest_helm_chart_path=self.dest_deployment_path, templates_path=self.templates_path) + base_helm_chart=root_path / DEPLOYMENT_CONFIGURATION_PATH / self.templates_path, envs=self.env) + # collect_apps_helm_templates(root_path, exclude=self.exclude, include=self.include, + # dest_helm_chart_path=self.dest_deployment_path, templates_path=self.templates_path, envs=self.env) def _adjust_missing_values(self, helm_values): if 'name' not in helm_values: @@ -152,12 +152,12 @@ def _init_static_images(self): for static_img_dockerfile in find_dockerfiles_paths(os.path.join(root_path, STATIC_IMAGES_PATH)): self.static_images.add(static_img_dockerfile) - img_name = image_name_from_dockerfile_path(os.path.basename( - static_img_dockerfile), base_name=clean_image_name(root_path.name)) - self.base_images[os.path.basename(static_img_dockerfile)] = self.image_tag( - img_name, build_context_path=static_img_dockerfile, - dependencies=guess_build_dependencies_from_dockerfile(static_img_dockerfile) - ) + img_name = image_name_from_dockerfile_path(os.path.basename( + static_img_dockerfile), base_name=clean_image_name(root_path.name)) + self.base_images[os.path.basename(static_img_dockerfile)] = self.image_tag( + img_name, build_context_path=static_img_dockerfile, + dependencies=guess_build_dependencies_from_dockerfile(static_img_dockerfile) + ) def _assign_static_build_dependencies(self, helm_values): for static_img_dockerfile in self.static_images: @@ -369,13 +369,13 @@ def merge_helm_chart(source_templates_path, dest_helm_chart_path=HELM_CHART_PATH pass -def copy_merge_base_deployment(dest_helm_chart_path, base_helm_chart): +def copy_merge_base_deployment(dest_helm_chart_path, base_helm_chart, envs=()): if not base_helm_chart.exists(): return if dest_helm_chart_path.exists(): logging.info("Merging/overriding all files in directory %s", dest_helm_chart_path) - merge_configuration_directories(f"{base_helm_chart}", f"{dest_helm_chart_path}") + merge_configuration_directories(f"{base_helm_chart}", f"{dest_helm_chart_path}", envs=envs) else: logging.info("Copying base deployment chart from %s to %s", base_helm_chart, dest_helm_chart_path) @@ -574,7 +574,7 @@ def validate_dependencies(values): f"Bad service application dependencies specified for application {app}: {','.join(not_found)}") -def collect_apps_helm_templates(search_root, dest_helm_chart_path, templates_path=HELM_PATH, exclude=(), include=None): +def collect_apps_helm_templates(search_root, dest_helm_chart_path, templates_path=HELM_PATH, exclude=(), include=None, envs=()): """ Searches recursively for helm templates inside the applications and collects the templates in the destination @@ -601,9 +601,12 @@ def collect_apps_helm_templates(search_root, dest_helm_chart_path, templates_pat if dest_dir.exists(): logging.warning( "Merging/overriding all files in directory %s", dest_dir) - merge_configuration_directories(f"{template_dir}", f"{dest_dir}") + merge_configuration_directories(f"{template_dir}", f"{dest_dir}", envs) else: shutil.copytree(template_dir, dest_dir) + if envs: + merge_configuration_directories(f"{dest_dir}", f"{dest_dir}", envs) + resources_dir = app_path / 'deploy' / 'resources' if resources_dir.exists(): dest_dir = dest_helm_chart_path / 'resources' / app_name @@ -611,7 +614,9 @@ def collect_apps_helm_templates(search_root, dest_helm_chart_path, templates_pat logging.info( "Collecting resources for application %s to %s", app_name, dest_dir) - merge_configuration_directories(f"{resources_dir}", f"{dest_dir}") + merge_configuration_directories(f"{resources_dir}", f"{dest_dir}", envs) + if envs: + merge_configuration_directories(f"{dest_dir}", f"{dest_dir}", envs) if templates_path == HELM_PATH: subchart_dir = app_path / 'deploy/charts' @@ -623,9 +628,11 @@ def collect_apps_helm_templates(search_root, dest_helm_chart_path, templates_pat if dest_dir.exists(): logging.warning( "Merging/overriding all files in directory %s", dest_dir) - merge_configuration_directories(f"{subchart_dir}", f"{dest_dir}") + merge_configuration_directories(f"{subchart_dir}", f"{dest_dir}", envs) else: shutil.copytree(subchart_dir, dest_dir) + if envs: + merge_configuration_directories(f"{dest_dir}", f"{dest_dir}", envs) # def collect_apps_helm_templates(search_root, dest_helm_chart_path, templates_path=None, exclude=(), include=None): diff --git a/tools/deployment-cli-tools/ch_cli_tools/helm.py b/tools/deployment-cli-tools/ch_cli_tools/helm.py index 0a84807c7..897aedf87 100644 --- a/tools/deployment-cli-tools/ch_cli_tools/helm.py +++ b/tools/deployment-cli-tools/ch_cli_tools/helm.py @@ -72,7 +72,7 @@ def process_values(self) -> HarnessMainConfig: for root_path in self.root_paths: collect_apps_helm_templates(root_path, exclude=self.exclude, include=self.include, - dest_helm_chart_path=self.dest_deployment_path) + dest_helm_chart_path=self.dest_deployment_path, envs=self.env) # Save values file for manual helm chart merged_values = merge_to_yaml_file(helm_values, os.path.join( diff --git a/tools/deployment-cli-tools/ch_cli_tools/utils.py b/tools/deployment-cli-tools/ch_cli_tools/utils.py index 8e973b812..8df1e8336 100644 --- a/tools/deployment-cli-tools/ch_cli_tools/utils.py +++ b/tools/deployment-cli-tools/ch_cli_tools/utils.py @@ -145,14 +145,6 @@ def get_template(yaml_path, base_default=False): return dict_template or {} -def file_is_yaml(fname): - return fname[-4:] == 'yaml' or fname[-3:] == 'yml' - - -def file_is_json(fname): - return fname[-4:] == 'json' - - def replaceindir(root_src_dir, source, replace): """ Does copy and merge (shutil.copytree requires that the destination does not exist) @@ -263,10 +255,10 @@ def movedircontent(root_src_dir, root_dst_dir): shutil.rmtree(root_src_dir) -def merge_configuration_directories(source: Union[str, pathlib.Path], destination: Union[str, pathlib.Path]) -> None: +def merge_configuration_directories(source: Union[str, pathlib.Path], destination: Union[str, pathlib.Path], envs=()) -> None: source_path, destination_path = pathlib.Path(source), pathlib.Path(destination) - if source_path == destination_path: + if source_path == destination_path and not envs: return if not source_path.exists(): @@ -275,17 +267,19 @@ def merge_configuration_directories(source: Union[str, pathlib.Path], destinatio if not destination_path.exists(): shutil.copytree(source_path, destination_path, ignore=shutil.ignore_patterns(*EXCLUDE_PATHS)) - return + if not envs: + return for source_directory, _, files in os.walk(source_path): # source_path.walk() from Python 3.12 - _merge_configuration_directory(source_path, destination_path, pathlib.Path(source_directory), files) + _merge_configuration_directory(source_path, destination_path, pathlib.Path(source_directory), files, envs) def _merge_configuration_directory( source: pathlib.Path, destination: pathlib.Path, source_directory: pathlib.Path, - files: list[str] + files: list[str], + envs=() ) -> None: if any(path in str(source_directory) for path in EXCLUDE_PATHS): return @@ -299,33 +293,7 @@ def _merge_configuration_directory( source_file_path = source_directory / file_name destination_file_path = destination_directory / file_name - _merge_configuration_file(source_file_path, destination_file_path) - - -def _merge_configuration_file(source_file_path: pathlib.Path, destination_file_path: pathlib.Path) -> None: - if not exists(destination_file_path): - shutil.copy2(source_file_path, destination_file_path) - return - - merge_operations = [ - (file_is_yaml, merge_yaml_files), - (file_is_json, merge_json_files), - ] - - for can_merge_file, merge_files in merge_operations: - if not can_merge_file(source_file_path.name): - continue - - try: - merge_files(source_file_path, destination_file_path) - logging.info(f'Merged/overridden file content of {destination_file_path} with {source_file_path}') - except: - break - - return - - logging.warning(f'Overwriting file {destination_file_path} with {source_file_path}') - shutil.copy2(source_file_path, destination_file_path) + _merge_configuration_file(source_file_path, destination_file_path, envs) def merge_yaml_files(fname, fdest): @@ -378,6 +346,43 @@ def merge_to_yaml_file(content_src, fdest): return merged +merge_operations = { + ".yaml": merge_yaml_files, + ".yml": merge_yaml_files, + ".json": merge_json_files, +} + + +def _merge_configuration_file(source_file_path: pathlib.Path, destination_file_path: pathlib.Path, envs=()) -> None: + if not exists(destination_file_path): + shutil.copy2(source_file_path, destination_file_path) + ext = source_file_path.suffix.lower() + merge_files = merge_operations.get(ext, None) + + if source_file_path != destination_file_path: + if merge_files is not None: + try: + merge_files(source_file_path, destination_file_path) + logging.info(f'Merged/overridden file content of {destination_file_path} with {source_file_path}') + except: + logging.warning(f'Merge error: overwriting file {destination_file_path} with {source_file_path}') + shutil.copy2(source_file_path, destination_file_path) + else: + logging.warning(f'Overwriting file {destination_file_path} with {source_file_path}') + shutil.copy2(source_file_path, destination_file_path) + + if merge_files is not None: + # override eventually with environment specific files + for e in envs: + env_specific_file = pathlib.Path(str(source_file_path).replace(f'{ext}', f'-{e}{ext}')) + if exists(env_specific_file): + try: + merge_files(env_specific_file, destination_file_path) + logging.info(f'Merged/overridden file content of {destination_file_path} with {env_specific_file}') + except: + pass + + def dict_merge(dct, merge_dct, add_keys=True): """ Recursive dict merge. Inspired by :meth:``dict.update()``, instead of updating only top-level keys, dict_merge recurses down into dicts nested diff --git a/tools/deployment-cli-tools/tests/resources/applications/myapp/deploy/charts/values-dev.yaml b/tools/deployment-cli-tools/tests/resources/applications/myapp/deploy/charts/values-dev.yaml new file mode 100644 index 000000000..455d529e3 --- /dev/null +++ b/tools/deployment-cli-tools/tests/resources/applications/myapp/deploy/charts/values-dev.yaml @@ -0,0 +1 @@ +test: dev \ No newline at end of file diff --git a/tools/deployment-cli-tools/tests/resources/applications/myapp/deploy/charts/values.yaml b/tools/deployment-cli-tools/tests/resources/applications/myapp/deploy/charts/values.yaml new file mode 100644 index 000000000..1002c2081 --- /dev/null +++ b/tools/deployment-cli-tools/tests/resources/applications/myapp/deploy/charts/values.yaml @@ -0,0 +1 @@ +test: default \ No newline at end of file diff --git a/tools/deployment-cli-tools/tests/resources/conf-source1/a-dev.yaml b/tools/deployment-cli-tools/tests/resources/conf-source1/a-dev.yaml new file mode 100644 index 000000000..93b7ae2a1 --- /dev/null +++ b/tools/deployment-cli-tools/tests/resources/conf-source1/a-dev.yaml @@ -0,0 +1,4 @@ +a: dev +b: + ba: ba + bb: bb \ No newline at end of file diff --git a/tools/deployment-cli-tools/tests/test_helm.py b/tools/deployment-cli-tools/tests/test_helm.py index c4faf0085..8e109cd6b 100644 --- a/tools/deployment-cli-tools/tests/test_helm.py +++ b/tools/deployment-cli-tools/tests/test_helm.py @@ -79,6 +79,10 @@ def test_collect_helm_values(tmp_path): # Not indicated as a build dependency assert 'cloudharness-base-debian' not in values[KEY_TASK_IMAGES] + chart_values = yaml.safe_load(open(helm_path / 'charts/myapp/values.yaml', 'r')) # Check if the values.yaml is valid YAML + assert chart_values is not None, "values.yaml should be valid YAML" + assert chart_values["test"] == "dev" + def test_collect_nobuild(tmp_path): out_folder = tmp_path / 'test_collect_helm_values' diff --git a/tools/deployment-cli-tools/tests/test_skaffold.py b/tools/deployment-cli-tools/tests/test_skaffold.py index 8e90e90f6..e1e83c9d6 100644 --- a/tools/deployment-cli-tools/tests/test_skaffold.py +++ b/tools/deployment-cli-tools/tests/test_skaffold.py @@ -8,15 +8,15 @@ HERE = os.path.dirname(os.path.realpath(__file__)) RESOURCES = os.path.join(HERE, 'resources') RESOURCES_BUGGY = os.path.join(HERE, 'resources_buggy') -OUT = '/tmp/deployment' + CLOUDHARNESS_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(HERE))) CLOUDHARNESS_DIRNAME = os.path.basename(CLOUDHARNESS_ROOT) -def test_create_skaffold_configuration(): +def test_create_skaffold_configuration(tmp_path): values = create_helm_chart( [CLOUDHARNESS_ROOT, RESOURCES], - output_path=OUT, + output_path=tmp_path, include=['samples', 'myapp'], exclude=['events'], domain="my.local", @@ -36,9 +36,9 @@ def test_create_skaffold_configuration(): sk = create_skaffold_configuration( root_paths=root_paths, helm_values=values, - output_path=OUT + output_path=tmp_path ) - assert os.path.exists(os.path.join(OUT, 'skaffold.yaml')) + assert os.path.exists(os.path.join(tmp_path, 'skaffold.yaml')) exp_apps = ('accounts', 'samples', 'workflows', 'myapp', 'common') assert len(sk['build']['artifacts']) == len( exp_apps) + len(values[KEY_TASK_IMAGES]) @@ -105,14 +105,14 @@ def test_create_skaffold_configuration(): assert '--timeout=10m' in flags['install'] assert '--install' in flags['upgrade'] - shutil.rmtree(OUT) + shutil.rmtree(tmp_path) shutil.rmtree(BUILD_DIR) def test_create_skaffold_configuration_with_conflicting_dependencies(tmp_path): values = create_helm_chart( [CLOUDHARNESS_ROOT, RESOURCES_BUGGY], - output_path=OUT, + output_path=tmp_path, include=['myapp'], exclude=['events'], domain="my.local", @@ -131,7 +131,7 @@ def test_create_skaffold_configuration_with_conflicting_dependencies(tmp_path): sk = create_skaffold_configuration( root_paths=root_paths, helm_values=values, - output_path=OUT + output_path=tmp_path ) releases = sk['deploy']['helm']['releases'] @@ -148,7 +148,7 @@ def test_create_skaffold_configuration_with_conflicting_dependencies(tmp_path): def test_create_skaffold_configuration_with_conflicting_dependencies_requirements_file(tmp_path): values = create_helm_chart( [CLOUDHARNESS_ROOT, RESOURCES_BUGGY], - output_path=OUT, + output_path=tmp_path, include=['myapp2'], exclude=['events'], domain="my.local", @@ -167,7 +167,7 @@ def test_create_skaffold_configuration_with_conflicting_dependencies_requirement sk = create_skaffold_configuration( root_paths=root_paths, helm_values=values, - output_path=OUT + output_path=tmp_path ) releases = sk['deploy']['helm']['releases'] @@ -181,10 +181,10 @@ def test_create_skaffold_configuration_with_conflicting_dependencies_requirement assert myapp_config['harness']['deployment']['args'][0] == '/usr/src/app/myapp_code/__main__.py' -def test_create_skaffold_configuration_nobuild(): +def test_create_skaffold_configuration_nobuild(tmp_path): values = create_helm_chart( [RESOURCES], - output_path=OUT, + output_path=tmp_path, include=['myapp'], domain="my.local", namespace='test', @@ -204,7 +204,7 @@ def test_create_skaffold_configuration_nobuild(): sk = create_skaffold_configuration( root_paths=root_paths, helm_values=values, - output_path=OUT + output_path=tmp_path ) releases = sk['deploy']['helm']['releases'] @@ -231,7 +231,7 @@ def test_app_depends_on_app(tmp_path): sk = create_skaffold_configuration( root_paths=root_paths, helm_values=values, - output_path=OUT + output_path=tmp_path ) releases = sk['deploy']['helm']['releases'] diff --git a/tools/deployment-cli-tools/tests/test_utils.py b/tools/deployment-cli-tools/tests/test_utils.py index aadc8764c..3880ca6fb 100644 --- a/tools/deployment-cli-tools/tests/test_utils.py +++ b/tools/deployment-cli-tools/tests/test_utils.py @@ -63,6 +63,37 @@ def test_merge_configuration_directories(): shutil.rmtree(res_path) +def test_merge_configuration_directories_envs(): + try: + basedir = os.path.join(HERE, "resources") + res_path = os.path.join(basedir, 'conf-res-envs') + if os.path.exists(res_path): + shutil.rmtree(res_path) + + merge_configuration_directories(os.path.join(basedir, 'conf-source1'), res_path, ("dev",)) + # + + assert os.path.exists(os.path.join(res_path, "a.yaml")) + assert os.path.exists(os.path.join(res_path, "b.yaml")) + + assert os.path.exists(os.path.join(res_path, "sub", "a.yaml")) + assert os.path.exists(os.path.join(res_path, "sub", "b.yaml")) + + with open(os.path.join(res_path, "a.yaml")) as f: + a = yaml.load(f) + assert a['a'] == 'dev' + + merge_configuration_directories(os.path.join(basedir, 'conf-source2'), res_path) + assert os.path.exists(os.path.join(res_path, "c.yaml")) + + with open(os.path.join(res_path, "a.yaml")) as f: + a = yaml.load(f) + assert a['a'] == 'a1' + finally: + if os.path.exists(res_path): + shutil.rmtree(res_path) + + def test_guess_build_dependencies_from_dockerfile(): deps = guess_build_dependencies_from_dockerfile(os.path.join(HERE, "resources/applications/myapp")) assert len(deps) == 1