From d07c97cd27084a454397dbc8e06b6da5fade7718 Mon Sep 17 00:00:00 2001 From: dellnoantechnp Date: Fri, 17 Nov 2023 17:29:14 +0800 Subject: [PATCH] chore: Update kafka chart. --- charts/kafka/Chart.yaml | 45 +- charts/kafka/README.md | 746 ++++----- charts/kafka/templates/NOTES.txt | 142 +- charts/kafka/templates/_helpers.tpl | 164 +- charts/kafka/templates/configmap.yaml | 3 +- charts/kafka/templates/jaas-secret.yaml | 15 +- charts/kafka/templates/jmx-configmap.yaml | 3 +- charts/kafka/templates/jmx-metrics-svc.yaml | 25 +- .../templates/kafka-metrics-deployment.yaml | 108 +- .../kafka-metrics-serviceaccount.yaml | 1 + charts/kafka/templates/kafka-metrics-svc.yaml | 23 +- .../kafka/templates/kafka-provisioning.yaml | 232 ++- charts/kafka/templates/log4j-configmap.yaml | 1 + .../kafka/templates/networkpolicy-egress.yaml | 2 +- .../templates/networkpolicy-ingress.yaml | 13 +- .../kafka/templates/poddisruptionbudget.yaml | 3 +- charts/kafka/templates/role.yaml | 3 +- charts/kafka/templates/rolebinding.yaml | 5 +- charts/kafka/templates/scripts-configmap.yaml | 75 +- charts/kafka/templates/serviceaccount.yaml | 11 +- .../templates/servicemonitor-jmx-metrics.yaml | 19 +- .../templates/servicemonitor-metrics.yaml | 17 +- charts/kafka/templates/statefulset.yaml | 178 ++- .../kafka/templates/svc-external-access.yaml | 20 +- charts/kafka/templates/svc-headless.yaml | 7 +- charts/kafka/templates/svc.yaml | 28 +- charts/kafka/values.yaml | 1400 ++++++++++------- 27 files changed, 1976 insertions(+), 1313 deletions(-) diff --git a/charts/kafka/Chart.yaml b/charts/kafka/Chart.yaml index b7e5346..4eaa05f 100644 --- a/charts/kafka/Chart.yaml +++ b/charts/kafka/Chart.yaml @@ -1,32 +1,33 @@ annotations: category: Infrastructure apiVersion: v2 -appVersion: 2.8.1 +appVersion: 3.2.0 dependencies: - - name: common - repository: https://dellnoantechnp.github.io/helm-chart-xxl-job-admin/ - tags: - - bitnami-common - version: 1.x.x - - condition: zookeeper.enabled - name: zookeeper - repository: https://dellnoantechnp.github.io/helm-chart-xxl-job-admin/ - version: 7.x.x -description: "[OpenSource by Bitnami] Apache Kafka is a distributed streaming platform." -engine: gotpl +- condition: zookeeper.enabled + name: zookeeper + repository: https://dellnoantechnp.github.io/helm-chart-xxl-job-admin/ + version: 9.x.x +- name: common + repository: https://dellnoantechnp.github.io/helm-chart-xxl-job-admin/ + tags: + - bitnami-common + version: 1.x.x +description: "[OpenSource by Bitnami] Apache Kafka is a distributed streaming platform designed to build real-time + pipelines and can be used as a message broker or as a replacement for a log aggregation + solution for big data applications." home: https://github.com/bitnami/charts/tree/master/bitnami/kafka icon: https://raw.githubusercontent.com/dellnoantechnp/helm-chart-xxl-job-admin/main/assets/stacks/kafka/img/kafka-stack-110x117.png keywords: - - kafka - - zookeeper - - streaming - - producer - - consumer +- kafka +- zookeeper +- streaming +- producer +- consumer maintainers: - - email: containers@bitnami.com - name: Bitnami +- email: containers@bitnami.com + name: Bitnami name: kafka sources: - - https://github.com/bitnami/bitnami-docker-kafka - - https://kafka.apache.org/ -version: 14.9.3 +- https://github.com/bitnami/bitnami-docker-kafka +- https://kafka.apache.org/ +version: 17.1.0 diff --git a/charts/kafka/README.md b/charts/kafka/README.md index 3b30650..46ea16b 100644 --- a/charts/kafka/README.md +++ b/charts/kafka/README.md @@ -1,10 +1,13 @@ -# Kafka -Version: *2.8.1* +# Apache Kafka packaged by Bitnami -[Kafka](https://kafka.apache.org/) is a distributed streaming platform used for building real-time data pipelines and streaming apps. It is horizontally scalable, fault-tolerant, wicked fast, and runs in production in thousands of companies. +Apache Kafka is a distributed streaming platform designed to build real-time pipelines and can be used as a message broker or as a replacement for a log aggregation solution for big data applications. +[Overview of Apache Kafka](http://kafka.apache.org/) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + ## TL;DR ```console @@ -64,234 +67,263 @@ The command removes all the Kubernetes components associated with the chart and | Name | Description | Value | | ------------------------ | --------------------------------------------------------------------------------------- | --------------- | -| `nameOverride` | String to partially override kafka.fullname | `""` | -| `fullnameOverride` | String to fully override kafka.fullname | `""` | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | | `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | | `commonLabels` | Labels to add to all deployed objects | `{}` | | `commonAnnotations` | Annotations to add to all deployed objects | `{}` | | `extraDeploy` | Array of extra objects to deploy with the release | `[]` | | `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | -| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | -| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | ### Kafka parameters -| Name | Description | Value | -| ------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | -| `image.registry` | Kafka image registry | `docker.io` | -| `image.repository` | Kafka image repository | `bitnami/kafka` | -| `image.tag` | Kafka image tag (immutable tags are recommended) | `2.8.1-debian-10-r99` | -| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` | -| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `image.debug` | Set to true if you would like to see extra information on logs | `false` | -| `config` | Configuration file for Kafka. Auto-generated based on other parameters when not specified (see [below]( | `""` | -| `existingConfigmap` | ConfigMap with Kafka Configuration | `""` | -| `log4j` | An optional log4j.properties file to overwrite the default of the Kafka brokers. | `""` | -| `existingLog4jConfigMap` | The name of an existing ConfigMap containing a log4j.properties file. | `""` | -| `heapOpts` | Kafka's Java Heap size | `-Xmx1024m -Xms1024m` | -| `deleteTopicEnable` | Switch to enable topic deletion or not | `false` | -| `autoCreateTopicsEnable` | Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments | `true` | -| `logFlushIntervalMessages` | The number of messages to accept before forcing a flush of data to disk | `_10000` | -| `logFlushIntervalMs` | The maximum amount of time a message can sit in a log before we force a flush | `1000` | -| `logRetentionBytes` | A size-based retention policy for logs | `_1073741824` | -| `logRetentionCheckIntervalMs` | The interval at which log segments are checked to see if they can be deleted | `300000` | -| `logRetentionHours` | The minimum age of a log file to be eligible for deletion due to age | `168` | -| `logSegmentBytes` | The maximum size of a log segment file. When this size is reached a new log segment will be created | `_1073741824` | -| `logsDirs` | A comma separated list of directories under which to store log files | `/bitnami/kafka/data` | -| `maxMessageBytes` | The largest record batch size allowed by Kafka | `_1000012` | -| `defaultReplicationFactor` | Default replication factors for automatically created topics | `1` | -| `offsetsTopicReplicationFactor` | The replication factor for the offsets topic | `1` | -| `transactionStateLogReplicationFactor` | The replication factor for the transaction topic | `1` | -| `transactionStateLogMinIsr` | Overridden min.insync.replicas config for the transaction topic | `1` | -| `numIoThreads` | The number of threads doing disk I/O | `8` | -| `numNetworkThreads` | The number of threads handling network requests | `3` | -| `numPartitions` | The default number of log partitions per topic | `1` | -| `numRecoveryThreadsPerDataDir` | The number of threads per data directory to be used for log recovery at startup and flushing at shutdown | `1` | -| `socketReceiveBufferBytes` | The receive buffer (SO_RCVBUF) used by the socket server | `102400` | -| `socketRequestMaxBytes` | The maximum size of a request that the socket server will accept (protection against OOM) | `_104857600` | -| `socketSendBufferBytes` | The send buffer (SO_SNDBUF) used by the socket server | `102400` | -| `zookeeperConnectionTimeoutMs` | Timeout in ms for connecting to Zookeeper | `6000` | -| `zookeeperChrootPath` | Path which puts data under some path in the global ZooKeeper namespace | `""` | -| `authorizerClassName` | The Authorizer is configured by setting authorizer.class.name=kafka.security.authorizer.AclAuthorizer in server.properties. | `""` | -| `allowEveryoneIfNoAclFound` | By default, if a resource has no associated ACLs, then no one is allowed to access that resource except super users. | `true` | -| `superUsers` | You can add super users in server.properties | `User:admin` | -| `command` | Override kafka container command | `["/scripts/setup.sh"]` | -| `args` | Override kafka container arguments | `[]` | -| `extraEnvVars` | Extra environment variables to add to kafka pods (see [below]({KEY} | `[]` | -| `extraVolumes` | Extra volume(s) to add to Kafka statefulset | `[]` | -| `extraVolumeMounts` | Extra volumeMount(s) to add to Kafka containers | `[]` | -| `auth.clientProtocol` | Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | -| `auth.interBrokerProtocol` | Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | -| `auth.sasl.mechanisms` | SASL mechanisms when either `auth.interBrokerProtocol` or `auth.clientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` | `plain,scram-sha-256,scram-sha-512` | -| `auth.sasl.interBrokerMechanism` | SASL mechanism for inter broker communication. | `plain` | -| `auth.sasl.jaas.clientUsers` | Kafka client user list | `["user"]` | -| `auth.sasl.jaas.clientPasswords` | Kafka client passwords. This is mandatory if more than one user is specified in clientUsers | `[]` | -| `auth.sasl.jaas.interBrokerUser` | Kafka inter broker communication user for SASL authentication | `admin` | -| `auth.sasl.jaas.interBrokerPassword` | Kafka inter broker communication password for SASL authentication | `""` | -| `auth.sasl.jaas.zookeeperUser` | Kafka Zookeeper user for SASL authentication | `""` | -| `auth.sasl.jaas.zookeeperPassword` | Kafka Zookeeper password for SASL authentication | `""` | -| `auth.sasl.jaas.existingSecret` | Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser | `""` | -| `auth.saslMechanisms` | DEPRECATED: use `auth.sasl.mechanisms` instead. | `plain,scram-sha-256,scram-sha-512` | -| `auth.saslInterBrokerMechanism` | DEPRECATED: use `auth.sasl.interBrokerMechanism` instead. | `plain` | -| `auth.jaas` | DEPRECATED: use `auth.sasl.jaas` instead. | `{}` | -| `auth.tls.type` | Format to use for TLS certificates. Allowed types: `jks` and `pem` | `jks` | -| `auth.tls.existingSecrets` | Array existing secrets containing the TLS certificates for the Kafka brokers | `[]` | -| `auth.tls.existingSecret` | DEPRECATED: use `auth.tls.existingSecrets` instead. | `""` | -| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if `auth.tls.type` is `pem` | `false` | -| `auth.tls.password` | Password to access the JKS files or PEM key when they are password-protected. | `""` | -| `auth.tls.jksTruststoreSecret` | Name of the existing secret containing your truststore if truststore not existing or different from the ones in the `auth.tls.existingSecrets` | `""` | -| `auth.tls.jksKeystoreSAN` | The secret key from the `auth.tls.existingSecret` containing the keystore with a SAN certificate | `""` | -| `auth.tls.jksTruststore` | The secret key from the `auth.tls.existingSecret` or `auth.tls.jksTruststoreSecret` containing the truststore | `""` | -| `auth.tls.endpointIdentificationAlgorithm` | The endpoint identification algorithm to validate server hostname using server certificate | `https` | -| `auth.jksSecret` | DEPRECATED: use `auth.tls.existingSecrets` instead. | `""` | -| `auth.jksTruststoreSecret` | DEPRECATED: use `auth.tls.jksTruststoreSecret` instead. | `""` | -| `auth.jksKeystoreSAN` | DEPRECATED: use `auth.tls.jksKeystoreSAN` instead. | `""` | -| `auth.jksTruststore` | DEPRECATED: use `auth.tls.jksTruststore` instead. | `""` | -| `auth.jksPassword` | DEPRECATED: use `auth.tls.password` instead. | `""` | -| `auth.tlsEndpointIdentificationAlgorithm` | DEPRECATED: use `auth.tls.endpointIdentificationAlgorithm` instead. | `https` | -| `listeners` | The address(es) the socket server listens on. Auto-calculated it's set to an empty array | `[]` | -| `advertisedListeners` | The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array | `[]` | -| `listenerSecurityProtocolMap` | The protocol->listener mapping. Auto-calculated it's set to nil | `""` | -| `allowPlaintextListener` | Allow to use the PLAINTEXT listener | `true` | -| `interBrokerListenerName` | The listener that the brokers should communicate on | `INTERNAL` | +| Name | Description | Value | +| ------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | +| `image.registry` | Kafka image registry | `docker.io` | +| `image.repository` | Kafka image repository | `bitnami/kafka` | +| `image.tag` | Kafka image tag (immutable tags are recommended) | `3.2.0-debian-10-r4` | +| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `config` | Configuration file for Kafka. Auto-generated based on other parameters when not specified | `""` | +| `existingConfigmap` | ConfigMap with Kafka Configuration | `""` | +| `log4j` | An optional log4j.properties file to overwrite the default of the Kafka brokers | `""` | +| `existingLog4jConfigMap` | The name of an existing ConfigMap containing a log4j.properties file | `""` | +| `heapOpts` | Kafka Java Heap size | `-Xmx1024m -Xms1024m` | +| `deleteTopicEnable` | Switch to enable topic deletion or not | `false` | +| `autoCreateTopicsEnable` | Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments | `true` | +| `logFlushIntervalMessages` | The number of messages to accept before forcing a flush of data to disk | `_10000` | +| `logFlushIntervalMs` | The maximum amount of time a message can sit in a log before we force a flush | `1000` | +| `logRetentionBytes` | A size-based retention policy for logs | `_1073741824` | +| `logRetentionCheckIntervalMs` | The interval at which log segments are checked to see if they can be deleted | `300000` | +| `logRetentionHours` | The minimum age of a log file to be eligible for deletion due to age | `168` | +| `logSegmentBytes` | The maximum size of a log segment file. When this size is reached a new log segment will be created | `_1073741824` | +| `logsDirs` | A comma separated list of directories under which to store log files | `/bitnami/kafka/data` | +| `maxMessageBytes` | The largest record batch size allowed by Kafka | `_1000012` | +| `defaultReplicationFactor` | Default replication factors for automatically created topics | `1` | +| `offsetsTopicReplicationFactor` | The replication factor for the offsets topic | `1` | +| `transactionStateLogReplicationFactor` | The replication factor for the transaction topic | `1` | +| `transactionStateLogMinIsr` | Overridden min.insync.replicas config for the transaction topic | `1` | +| `numIoThreads` | The number of threads doing disk I/O | `8` | +| `numNetworkThreads` | The number of threads handling network requests | `3` | +| `numPartitions` | The default number of log partitions per topic | `1` | +| `numRecoveryThreadsPerDataDir` | The number of threads per data directory to be used for log recovery at startup and flushing at shutdown | `1` | +| `socketReceiveBufferBytes` | The receive buffer (SO_RCVBUF) used by the socket server | `102400` | +| `socketRequestMaxBytes` | The maximum size of a request that the socket server will accept (protection against OOM) | `_104857600` | +| `socketSendBufferBytes` | The send buffer (SO_SNDBUF) used by the socket server | `102400` | +| `zookeeperConnectionTimeoutMs` | Timeout in ms for connecting to ZooKeeper | `6000` | +| `zookeeperChrootPath` | Path which puts data under some path in the global ZooKeeper namespace | `""` | +| `authorizerClassName` | The Authorizer is configured by setting authorizer.class.name=kafka.security.authorizer.AclAuthorizer in server.properties | `""` | +| `allowEveryoneIfNoAclFound` | By default, if a resource has no associated ACLs, then no one is allowed to access that resource except super users | `true` | +| `superUsers` | You can add super users in server.properties | `User:admin` | +| `auth.clientProtocol` | Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.externalClientProtocol` | Authentication protocol for communications with external clients. Defaults to value of `auth.clientProtocol`. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `""` | +| `auth.interBrokerProtocol` | Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.sasl.mechanisms` | SASL mechanisms when either `auth.interBrokerProtocol`, `auth.clientProtocol` or `auth.externalClientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` | `plain,scram-sha-256,scram-sha-512` | +| `auth.sasl.interBrokerMechanism` | SASL mechanism for inter broker communication. | `plain` | +| `auth.sasl.jaas.clientUsers` | Kafka client user list | `["user"]` | +| `auth.sasl.jaas.clientPasswords` | Kafka client passwords. This is mandatory if more than one user is specified in clientUsers | `[]` | +| `auth.sasl.jaas.interBrokerUser` | Kafka inter broker communication user for SASL authentication | `admin` | +| `auth.sasl.jaas.interBrokerPassword` | Kafka inter broker communication password for SASL authentication | `""` | +| `auth.sasl.jaas.zookeeperUser` | Kafka ZooKeeper user for SASL authentication | `""` | +| `auth.sasl.jaas.zookeeperPassword` | Kafka ZooKeeper password for SASL authentication | `""` | +| `auth.sasl.jaas.existingSecret` | Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser | `""` | +| `auth.tls.type` | Format to use for TLS certificates. Allowed types: `jks` and `pem` | `jks` | +| `auth.tls.pemChainIncluded` | Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. | `false` | +| `auth.tls.existingSecrets` | Array existing secrets containing the TLS certificates for the Kafka brokers | `[]` | +| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if `auth.tls.type` is `pem` | `false` | +| `auth.tls.password` | Password to access the JKS files or PEM key when they are password-protected. | `""` | +| `auth.tls.existingSecret` | Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`) | `""` | +| `auth.tls.jksTruststoreSecret` | Name of the existing secret containing your truststore if truststore not existing or different from the ones in the `auth.tls.existingSecrets` | `""` | +| `auth.tls.jksKeystoreSAN` | The secret key from the `auth.tls.existingSecrets` containing the keystore with a SAN certificate | `""` | +| `auth.tls.jksTruststore` | The secret key from the `auth.tls.existingSecrets` or `auth.tls.jksTruststoreSecret` containing the truststore | `""` | +| `auth.tls.endpointIdentificationAlgorithm` | The endpoint identification algorithm to validate server hostname using server certificate | `https` | +| `auth.zookeeper.tls.enabled` | Enable TLS for Zookeeper client connections. | `false` | +| `auth.zookeeper.tls.type` | Format to use for TLS certificates. Allowed types: `jks` and `pem`. | `jks` | +| `auth.zookeeper.tls.verifyHostname` | Hostname validation. | `true` | +| `auth.zookeeper.tls.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper client communications. | `""` | +| `auth.zookeeper.tls.existingSecretKeystoreKey` | The secret key from the auth.zookeeper.tls.existingSecret containing the Keystore. | `zookeeper.keystore.jks` | +| `auth.zookeeper.tls.existingSecretTruststoreKey` | The secret key from the auth.zookeeper.tls.existingSecret containing the Truststore. | `zookeeper.truststore.jks` | +| `auth.zookeeper.tls.passwordsSecret` | Existing secret containing Keystore and Truststore passwords. | `""` | +| `auth.zookeeper.tls.passwordsSecretKeystoreKey` | The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Keystore. | `keystore-password` | +| `auth.zookeeper.tls.passwordsSecretTruststoreKey` | The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Truststore. | `truststore-password` | +| `listeners` | The address(es) the socket server listens on. Auto-calculated it's set to an empty array | `[]` | +| `advertisedListeners` | The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array | `[]` | +| `listenerSecurityProtocolMap` | The protocol->listener mapping. Auto-calculated it's set to nil | `""` | +| `allowPlaintextListener` | Allow to use the PLAINTEXT listener | `true` | +| `interBrokerListenerName` | The listener that the brokers should communicate on | `INTERNAL` | +| `command` | Override Kafka container command | `["/scripts/setup.sh"]` | +| `args` | Override Kafka container arguments | `[]` | +| `extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | +| `extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `extraEnvVarsSecret` | Secret with extra environment variables | `""` | ### Statefulset parameters -| Name | Description | Value | -| ------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | -| `replicaCount` | Number of Kafka nodes | `1` | -| `minBrokerId` | Minimal broker.id value, nodes increment their `broker.id` respectively | `0` | -| `updateStrategy` | Update strategy for the stateful set | `RollingUpdate` | -| `rollingUpdatePartition` | Partition update strategy | `""` | -| `hostAliases` | Add deployment host aliases | `[]` | -| `podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | -| `schedulerName` | Name of the k8s scheduler (other than default) | `""` | -| `podLabels` | Kafka pod labels | `{}` | -| `podAnnotations` | Kafka Pod annotations | `{}` | -| `priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | -| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | -| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | -| `affinity` | Affinity for pod assignment | `{}` | -| `nodeSelector` | Node labels for pod assignment | `{}` | -| `tolerations` | Tolerations for pod assignment | `[]` | -| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `{}` | -| `terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | -| `podSecurityContext.enabled` | Enable security context for the pods | `true` | -| `podSecurityContext.fsGroup` | Group ID for the filesystem used by the containers | `1001` | -| `podSecurityContext.runAsUser` | User ID for the service user running the pod | `1001` | -| `containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `false` | -| `resources.limits` | The resources limits for Kafka containers | `{}` | -| `resources.requests` | The requested resources for Kafka containers | `{}` | -| `livenessProbe.enabled` | Enable livenessProbe | `true` | -| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | -| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | -| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | -| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `readinessProbe.enabled` | Enable readinessProbe | `true` | -| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | -| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | -| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | -| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | -| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `customLivenessProbe` | Custom Liveness probe configuration for Kafka | `{}` | -| `customReadinessProbe` | Custom Readiness probe configuration for Kafka | `{}` | -| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | -| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` | -| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `1` | -| `sidecars` | Attach additional sidecar containers to the Kafka pod | `[]` | -| `initContainers` | Add extra init containers | `[]` | - - -### Exposure parameters - -| Name | Description | Value | -| ------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------- | -| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources. | `false` | -| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | -| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | -| `networkPolicy.externalAccess.from` | customize the from section for External Access on tcp-external port | `[]` | -| `networkPolicy.egressRules.customRules` | Custom network policy rule | `{}` | -| `service.type` | Kubernetes Service type | `ClusterIP` | -| `service.port` | Kafka port for client connections | `9092` | -| `service.internalPort` | Kafka port for inter-broker connections | `9093` | -| `service.externalPort` | Kafka port for external connections | `9094` | -| `service.nodePorts` | Specify the nodePort value for the LoadBalancer and NodePort service types. | `{}` | -| `service.loadBalancerIP` | loadBalancerIP for Kafka Service | `""` | -| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | -| `service.annotations` | Service annotations | `{}` | -| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | -| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | -| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `docker.io` | -| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `bitnami/kubectl` | -| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (immutable tags are recommended) | `1.23.1-debian-10-r26` | -| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` | -| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` | -| `externalAccess.autoDiscovery.resources.limits` | Init container auto-discovery resource limits | `{}` | -| `externalAccess.autoDiscovery.resources.requests` | Init container auto-discovery resource requests | `{}` | -| `externalAccess.service.type` | Kubernetes Service type for external access. It can be NodePort or LoadBalancer | `LoadBalancer` | -| `externalAccess.service.port` | Kafka port used for external access when service type is LoadBalancer | `9094` | -| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | -| `externalAccess.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | -| `externalAccess.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | -| `externalAccess.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort | `""` | -| `externalAccess.service.annotations` | Service annotations for external access | `{}` | -| `externalAccess.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | +| Name | Description | Value | +| --------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `replicaCount` | Number of Kafka nodes | `1` | +| `minBrokerId` | Minimal broker.id value, nodes increment their `broker.id` respectively | `0` | +| `containerPorts.client` | Kafka client container port | `9092` | +| `containerPorts.internal` | Kafka inter-broker container port | `9093` | +| `containerPorts.external` | Kafka external container port | `9094` | +| `livenessProbe.enabled` | Enable livenessProbe on Kafka containers | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe on Kafka containers | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe on Kafka containers | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | +| `resources.limits` | The resources limits for the container | `{}` | +| `resources.requests` | The requested resources for the container | `{}` | +| `podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `podSecurityContext.fsGroup` | Set Kafka pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set Kafka containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set Kafka containers' Security Context runAsNonRoot | `true` | +| `hostAliases` | Kafka pods host aliases | `[]` | +| `hostNetwork` | Specify if host network should be enabled for Kafka pods | `false` | +| `hostIPC` | Specify if host IPC should be enabled for Kafka pods | `false` | +| `podLabels` | Extra labels for Kafka pods | `{}` | +| `podAnnotations` | Extra annotations for Kafka pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `{}` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | +| `podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | +| `priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `updateStrategy.type` | Kafka statefulset strategy type | `RollingUpdate` | +| `updateStrategy.rollingUpdate` | Kafka statefulset rolling update configuration parameters | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | +| `initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | +| `pdb.create` | Deploy a pdb object for the Kafka pod | `false` | +| `pdb.minAvailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | +| `pdb.maxUnavailable` | Maximum number/percentage of unavailable Kafka replicas | `1` | + + +### Traffic Exposure parameters + +| Name | Description | Value | +| ------------------------------------------------- | ------------------------------------------------------------------------------------------------- | --------------------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.ports.client` | Kafka svc port for client connections | `9092` | +| `service.ports.internal` | Kafka svc port for inter-broker connections | `9093` | +| `service.ports.external` | Kafka svc port for external connections | `9094` | +| `service.nodePorts.client` | Node port for the Kafka client connections | `""` | +| `service.nodePorts.external` | Node port for the Kafka external connections | `""` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.clusterIP` | Kafka service Cluster IP | `""` | +| `service.loadBalancerIP` | Kafka service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | Kafka service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | Kafka service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for Kafka service | `{}` | +| `service.extraPorts` | Extra ports to expose in the Kafka service (normally used with the `sidecar` value) | `[]` | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | +| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | +| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `docker.io` | +| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `bitnami/kubectl` | +| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (immutable tags are recommended) | `1.24.0-debian-10-r5` | +| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` | +| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` | +| `externalAccess.autoDiscovery.resources.limits` | The resources limits for the auto-discovery init container | `{}` | +| `externalAccess.autoDiscovery.resources.requests` | The requested resources for the auto-discovery init container | `{}` | +| `externalAccess.service.type` | Kubernetes Service type for external access. It can be NodePort or LoadBalancer | `LoadBalancer` | +| `externalAccess.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | +| `externalAccess.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | +| `externalAccess.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort | `""` | +| `externalAccess.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | +| `networkPolicy.externalAccess.from` | customize the from section for External Access on tcp-external port | `[]` | +| `networkPolicy.egressRules.customRules` | Custom network policy rule | `{}` | ### Persistence parameters -| Name | Description | Value | -| --------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | -| `persistence.enabled` | Enable Kafka data persistence using PVC, note that Zookeeper persistence is unaffected | `true` | -| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template | `""` | -| `persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | -| `persistence.accessModes` | PV Access Mode | `["ReadWriteOnce"]` | -| `persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | -| `persistence.annotations` | Annotations for the PVC | `{}` | -| `persistence.selector` | Selector to match an existing Persistent Volume for Kafka's data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | -| `persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | -| `logPersistence.enabled` | Enable Kafka logs persistence using PVC, note that Zookeeper persistence is unaffected | `false` | -| `logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | -| `logPersistence.existingLogClaim` | PV Storage Class | `""` | -| `logPersistence.accessModes` | PV Access Mode | `["ReadWriteOnce"]` | -| `logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | -| `logPersistence.annotations` | Annotations for the PVC | `{}` | -| `logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka's log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | -| `logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | - - -### RBAC parameters +| Name | Description | Value | +| ------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `persistence.enabled` | Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected | `true` | +| `persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | +| `persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `persistence.selector` | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | +| `logPersistence.enabled` | Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected | `false` | +| `logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `logPersistence.storageClass` | PVC Storage Class for Kafka logs volume | `""` | +| `logPersistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | +| `logPersistence.annotations` | Annotations for the PVC | `{}` | +| `logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------------------------ | ------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `10-debian-10-r434` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + + +### Other Parameters | Name | Description | Value | | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | ------- | | `serviceAccount.create` | Enable creation of ServiceAccount for Kafka pods | `true` | | `serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | | `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | | `rbac.create` | Whether to create & use RBAC resources or not | `false` | -### Volume Permissions parameters - -| Name | Description | Value | -| --------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | ----------------------- | -| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | -| `volumePermissions.securityContext.runAsUser` | User ID for the container. | `0` | -| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | -| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/bitnami-shell` | -| `volumePermissions.image.tag` | Init container volume-permissions image tag | `10-debian-10-r307` | -| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | -| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | -| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | - - ### Metrics parameters | Name | Description | Value | @@ -299,90 +331,146 @@ The command removes all the Kubernetes components associated with the chart and | `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` | | `metrics.kafka.image.registry` | Kafka exporter image registry | `docker.io` | | `metrics.kafka.image.repository` | Kafka exporter image repository | `bitnami/kafka-exporter` | -| `metrics.kafka.image.tag` | Kafka exporter image tag (immutable tags are recommended) | `1.4.2-debian-10-r115` | +| `metrics.kafka.image.tag` | Kafka exporter image tag (immutable tags are recommended) | `1.4.2-debian-10-r243` | | `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` | | `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `metrics.kafka.serviceAccount.create` | Enable creation of ServiceAccount for Kafka exporter pods | `true` | -| `metrics.kafka.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | -| `metrics.kafka.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | -| `metrics.kafka.schedulerName` | Name of the k8s scheduler (other than default) for Kafka Exporter | `""` | -| `metrics.kafka.extraFlags` | Extra flags to be passed to Kafka exporter | `{}` | | `metrics.kafka.certificatesSecret` | Name of the existing secret containing the optional certificate and key files | `""` | | `metrics.kafka.tlsCert` | The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file) | `cert-file` | | `metrics.kafka.tlsKey` | The secret key from the certificatesSecret if 'client-key' key different from the default (key-file) | `key-file` | -| `metrics.kafka.tlsCaSecret` | Name of the existing secret containing the optional ca certificate for Kafka Exporter client authentication | `""` | +| `metrics.kafka.tlsCaSecret` | Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication | `""` | | `metrics.kafka.tlsCaCert` | The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file) | `ca-file` | -| `metrics.kafka.podLabels` | Kafka exporter pod labels | `{}` | -| `metrics.kafka.podAnnotations` | Kafka exporter pod annotations | `{}` | -| `metrics.kafka.containerSecurityContext.enabled` | Enable Prometheus Kafka Exporter containers' Security Context | `false` | -| `metrics.kafka.resources.limits` | Kafka Exporter container resource limits | `{}` | -| `metrics.kafka.resources.requests` | Kafka Exporter container resource requests | `{}` | -| `metrics.kafka.affinity` | Affinity for Kafka Exporter pod assignment | `{}` | -| `metrics.kafka.nodeSelector` | Node labels for Kafka Exporter pod assignment | `{}` | -| `metrics.kafka.tolerations` | Tolerations for Kafka Exporter pod assignment | `[]` | +| `metrics.kafka.extraFlags` | Extra flags to be passed to Kafka exporter | `{}` | +| `metrics.kafka.command` | Override Kafka exporter container command | `[]` | +| `metrics.kafka.args` | Override Kafka exporter container arguments | `[]` | +| `metrics.kafka.containerPorts.metrics` | Kafka exporter metrics container port | `9308` | +| `metrics.kafka.resources.limits` | The resources limits for the container | `{}` | +| `metrics.kafka.resources.requests` | The requested resources for the container | `{}` | +| `metrics.kafka.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `metrics.kafka.podSecurityContext.fsGroup` | Set Kafka exporter pod's Security Context fsGroup | `1001` | +| `metrics.kafka.containerSecurityContext.enabled` | Enable Kafka exporter containers' Security Context | `true` | +| `metrics.kafka.containerSecurityContext.runAsUser` | Set Kafka exporter containers' Security Context runAsUser | `1001` | +| `metrics.kafka.containerSecurityContext.runAsNonRoot` | Set Kafka exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.kafka.hostAliases` | Kafka exporter pods host aliases | `[]` | +| `metrics.kafka.podLabels` | Extra labels for Kafka exporter pods | `{}` | +| `metrics.kafka.podAnnotations` | Extra annotations for Kafka exporter pods | `{}` | +| `metrics.kafka.podAffinityPreset` | Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.kafka.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `metrics.kafka.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.kafka.nodeAffinityPreset.key` | Node label key to match Ignored if `metrics.kafka.affinity` is set. | `""` | +| `metrics.kafka.nodeAffinityPreset.values` | Node label values to match. Ignored if `metrics.kafka.affinity` is set. | `[]` | +| `metrics.kafka.affinity` | Affinity for pod assignment | `{}` | +| `metrics.kafka.nodeSelector` | Node labels for pod assignment | `{}` | +| `metrics.kafka.tolerations` | Tolerations for pod assignment | `[]` | +| `metrics.kafka.schedulerName` | Name of the k8s scheduler (other than default) for Kafka exporter | `""` | +| `metrics.kafka.priorityClassName` | Kafka exporter pods' priorityClassName | `""` | +| `metrics.kafka.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | +| `metrics.kafka.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka exporter pod(s) | `[]` | +| `metrics.kafka.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s) | `[]` | +| `metrics.kafka.sidecars` | Add additional sidecar containers to the Kafka exporter pod(s) | `[]` | | `metrics.kafka.initContainers` | Add init containers to the Kafka exporter pods | `[]` | -| `metrics.kafka.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for Kafka Exporter | `ClusterIP` | -| `metrics.kafka.service.port` | Kafka Exporter Prometheus port | `9308` | -| `metrics.kafka.service.nodePort` | Kubernetes HTTP node port | `""` | -| `metrics.kafka.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `""` | -| `metrics.kafka.service.loadBalancerSourceRanges` | Load Balancer sources | `[]` | +| `metrics.kafka.service.ports.metrics` | Kafka exporter metrics service port | `9308` | | `metrics.kafka.service.clusterIP` | Static clusterIP or None for headless services | `""` | -| `metrics.kafka.service.annotations` | Annotations for the Kafka Exporter Prometheus metrics service | `{}` | +| `metrics.kafka.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.kafka.service.annotations` | Annotations for the Kafka exporter service | `{}` | +| `metrics.kafka.serviceAccount.create` | Enable creation of ServiceAccount for Kafka exporter pods | `true` | +| `metrics.kafka.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `metrics.kafka.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | | `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | | `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` | | `metrics.jmx.image.repository` | JMX exporter image repository | `bitnami/jmx-exporter` | -| `metrics.jmx.image.tag` | JMX exporter image tag (immutable tags are recommended) | `0.16.1-debian-10-r177` | +| `metrics.jmx.image.tag` | JMX exporter image tag (immutable tags are recommended) | `0.16.1-debian-10-r306` | | `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | | `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `metrics.jmx.containerSecurityContext.enabled` | Enable Prometheus JMX Exporter Containers' Security Context | `false` | -| `metrics.jmx.resources.limits` | JMX Exporter container resource limits | `{}` | -| `metrics.jmx.resources.requests` | JMX Exporter container resource requests | `{}` | -| `metrics.jmx.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for JMX Exporter | `ClusterIP` | -| `metrics.jmx.service.port` | JMX Exporter Prometheus port | `5556` | -| `metrics.jmx.service.nodePort` | Kubernetes HTTP node port | `""` | -| `metrics.jmx.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `""` | -| `metrics.jmx.service.loadBalancerSourceRanges` | Load Balancer sources | `[]` | +| `metrics.jmx.containerSecurityContext.enabled` | Enable Prometheus JMX exporter containers' Security Context | `true` | +| `metrics.jmx.containerSecurityContext.runAsUser` | Set Prometheus JMX exporter containers' Security Context runAsUser | `1001` | +| `metrics.jmx.containerSecurityContext.runAsNonRoot` | Set Prometheus JMX exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.jmx.containerPorts.metrics` | Prometheus JMX exporter metrics container port | `5556` | +| `metrics.jmx.resources.limits` | The resources limits for the JMX exporter container | `{}` | +| `metrics.jmx.resources.requests` | The requested resources for the JMX exporter container | `{}` | +| `metrics.jmx.service.ports.metrics` | Prometheus JMX exporter metrics service port | `5556` | | `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `""` | -| `metrics.jmx.service.annotations` | Annotations for the JMX Exporter Prometheus metrics service | `{}` | -| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX Exporter | `["kafka.controller:*","kafka.server:*","java.lang:*","kafka.network:*","kafka.log:*"]` | +| `metrics.jmx.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.jmx.service.annotations` | Annotations for the Prometheus JMX exporter service | `{}` | +| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter | `["kafka.controller:*","kafka.server:*","java.lang:*","kafka.network:*","kafka.log:*"]` | | `metrics.jmx.config` | Configuration file for JMX exporter | `""` | | `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `""` | | `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | | `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | | `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `""` | | `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | -| `metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `{}` | -| `metrics.serviceMonitor.relabelings` | Relabel configuration for the metrics | `[]` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | | `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | ### Kafka provisioning parameters -| Name | Description | Value | -| --------------------------------- | --------------------------------------------------------------------- | ------- | -| `provisioning.enabled` | Enable kafka provisioning Job | `false` | -| `provisioning.numPartitions` | Default number of partitions for topics when unspecified. | `1` | -| `provisioning.replicationFactor` | Default replication factor for topics when unspecified. | `1` | -| `provisioning.schedulerName` | Name of the k8s scheduler (other than default) for kafka provisioning | `""` | -| `provisioning.podAnnotations` | Provisioning Pod annotations. | `{}` | -| `provisioning.resources.limits` | The resources limits for the container | `{}` | -| `provisioning.resources.requests` | The requested resources for the container | `{}` | -| `provisioning.command` | Override provisioning container command | `[]` | -| `provisioning.args` | Override provisioning container arguments | `[]` | -| `provisioning.topics` | Kafka provisioning topics | `[]` | - - -### Zookeeper chart parameters - -| Name | Description | Value | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | -| `zookeeper.enabled` | Switch to enable or disable the Zookeeper helm chart | `true` | -| `zookeeper.auth.enabled` | Enable Zookeeper auth | `false` | -| `zookeeper.auth.clientUser` | User that will use Zookeeper clients to auth | `""` | -| `zookeeper.auth.clientPassword` | Password that will use Zookeeper clients to auth | `""` | -| `zookeeper.auth.serverUsers` | Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" | `""` | -| `zookeeper.auth.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" | `""` | -| `externalZookeeper.servers` | Server or list of external Zookeeper servers to use | `[]` | +| Name | Description | Value | +| ---------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `provisioning.enabled` | Enable kafka provisioning Job | `false` | +| `provisioning.numPartitions` | Default number of partitions for topics when unspecified | `1` | +| `provisioning.replicationFactor` | Default replication factor for topics when unspecified | `1` | +| `provisioning.topics` | Kafka topics to provision | `[]` | +| `provisioning.tolerations` | Tolerations for pod assignment | `[]` | +| `provisioning.extraProvisioningCommands` | Extra commands to run to provision cluster resources | `[]` | +| `provisioning.parallel` | Number of provisioning commands to run at the same time | `1` | +| `provisioning.preScript` | Extra bash script to run before topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | +| `provisioning.postScript` | Extra bash script to run after topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | +| `provisioning.auth.tls.type` | Format to use for TLS certificates. Allowed types: `jks` and `pem`. | `jks` | +| `provisioning.auth.tls.certificatesSecret` | Existing secret containing the TLS certificates for the Kafka provisioning Job. | `""` | +| `provisioning.auth.tls.cert` | The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) | `tls.crt` | +| `provisioning.auth.tls.key` | The secret key from the certificatesSecret if 'key' key different from the default (tls.key) | `tls.key` | +| `provisioning.auth.tls.caCert` | The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) | `ca.crt` | +| `provisioning.auth.tls.keystore` | The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) | `keystore.jks` | +| `provisioning.auth.tls.truststore` | The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) | `truststore.jks` | +| `provisioning.auth.tls.passwordsSecret` | Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. | `""` | +| `provisioning.auth.tls.keyPasswordSecretKey` | The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) | `key-password` | +| `provisioning.auth.tls.keystorePasswordSecretKey` | The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) | `keystore-password` | +| `provisioning.auth.tls.truststorePasswordSecretKey` | The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) | `truststore-password` | +| `provisioning.auth.tls.keyPassword` | Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.keystorePassword` | Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.truststorePassword` | Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.command` | Override provisioning container command | `[]` | +| `provisioning.args` | Override provisioning container arguments | `[]` | +| `provisioning.extraEnvVars` | Extra environment variables to add to the provisioning pod | `[]` | +| `provisioning.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `provisioning.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `provisioning.podAnnotations` | Extra annotations for Kafka provisioning pods | `{}` | +| `provisioning.podLabels` | Extra labels for Kafka provisioning pods | `{}` | +| `provisioning.resources.limits` | The resources limits for the Kafka provisioning container | `{}` | +| `provisioning.resources.requests` | The requested resources for the Kafka provisioning container | `{}` | +| `provisioning.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `provisioning.podSecurityContext.fsGroup` | Set Kafka provisioning pod's Security Context fsGroup | `1001` | +| `provisioning.containerSecurityContext.enabled` | Enable Kafka provisioning containers' Security Context | `true` | +| `provisioning.containerSecurityContext.runAsUser` | Set Kafka provisioning containers' Security Context runAsUser | `1001` | +| `provisioning.containerSecurityContext.runAsNonRoot` | Set Kafka provisioning containers' Security Context runAsNonRoot | `true` | +| `provisioning.schedulerName` | Name of the k8s scheduler (other than default) for kafka provisioning | `""` | +| `provisioning.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) | `[]` | +| `provisioning.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) | `[]` | +| `provisioning.sidecars` | Add additional sidecar containers to the Kafka provisioning pod(s) | `[]` | +| `provisioning.initContainers` | Add additional Add init containers to the Kafka provisioning pod(s) | `[]` | +| `provisioning.waitForKafka` | If true use an init container to wait until kafka is ready before starting provisioning | `true` | + + +### ZooKeeper chart parameters + +| Name | Description | Value | +| ------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `zookeeper.enabled` | Switch to enable or disable the ZooKeeper helm chart | `true` | +| `zookeeper.replicaCount` | Number of ZooKeeper nodes | `1` | +| `zookeeper.auth.enabled` | Enable ZooKeeper auth | `false` | +| `zookeeper.auth.clientUser` | User that will use ZooKeeper clients to auth | `""` | +| `zookeeper.auth.clientPassword` | Password that will use ZooKeeper clients to auth | `""` | +| `zookeeper.auth.serverUsers` | Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" | `""` | +| `zookeeper.auth.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" | `""` | +| `zookeeper.persistence.enabled` | Enable persistence on ZooKeeper using PVC(s) | `true` | +| `zookeeper.persistence.storageClass` | Persistent Volume storage class | `""` | +| `zookeeper.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `zookeeper.persistence.size` | Persistent Volume size | `8Gi` | +| `externalZookeeper.servers` | List of external zookeeper servers to use. Typically used in combination with 'zookeeperChrootPath'. | `[]` | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, @@ -403,103 +491,6 @@ helm install my-release -f values.yaml bitnami/kafka > **Tip**: You can use the default [values.yaml](values.yaml) -## Best practices values -#### 1. For **advertised.listeners** via domain -```yaml -externalAccess: - enabled: true - service: - type: NodePort - domain: your-external-domain - autoDiscovery: - enabled: true -metrics: - kafka: - enabled: true - jmx: - enabled: true - serviceMonitor: - enabled: true -serviceAccount: - create: true -rbac: - create: true -zookeeper: - enabled: true -``` -get *you-kafka-release* external svc from kubernetes: -```shell -$ kubectl get svc | grep external -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kafka (not this) ClusterIP 10.96.105.60 9092/TCP 17h -kafka-0-external (this) NodePort 10.96.113.241 9094:32039/TCP 17h -kafka-1-external (this) NodePort 10.96.4.12 9094:30795/TCP 17h -kafka-2-external (this) NodePort 10.96.103.118 9094:30681/TCP 17h -``` -and then you must be accessed via *your-external-domain*: -```log -;; QUESTION SECTION: -;your-external-domain. IN A - -;; ANSWER SECTION: -your-external-domain. 3600 IN A 10.96.4.12 -your-external-domain. 3600 IN A 10.96.113.241 -your-external-domain. 3600 IN A 10.96.103.118 -``` -Finally you can use like this: (used by shell script) -```shell -$ JMX_PORT=0 kafka-topics.sh --bootstrap-server your-external-domain:9094 --list -FINE_test -__consumer_offsets -.... - -$ JMX_PORT=0 kafka-console-consumer.sh --bootstrap-server your-external-domain:9094 --topic spot_robot_order_entrust --from-beginning -message1 -message2 -... -``` - -#### 2. For **advertised.listeners** via HostIP -```yaml -externalAccess: - enabled: true - service: - type: NodePort - nodePorts: - - 30001 - - 30002 - - 30003 - useHostIPs: true - domain: "" - autoDiscovery: - enabled: false -metrics: - kafka: - enabled: true - jmx: - enabled: true - serviceMonitor: - enabled: true -serviceAccount: - create: true -rbac: - create: false -zookeeper: - enabled: true -``` -Finally you can use like this: (used by shell script) -```shell -$ JMX_PORT=0 kafka-topics.sh --bootstrap-server hostIP1:30001 --list -FINE_test -__consumer_offsets -.... - -$ JMX_PORT=0 kafka-console-consumer.sh --bootstrap-server hostIP1:30002 --topic spot_robot_order_entrust --from-beginning -message1 -message2 -... -``` - ## Configuration and installation details ### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) @@ -563,6 +554,7 @@ If, for some reason (like using Cert-Manager) you can not use the default JKS se - `auth.tls.jksTruststoreSecret` to define additional secret, where the `kafka.truststore.jks` is being kept. The truststore password **must** be the same as in `auth.tls.password` - `auth.tls.jksTruststore` to overwrite the default value of the truststore key (`kafka.truststore.jks`). - `auth.tls.jksKeystoreSAN` if you want to use a SAN certificate for your brokers. Setting this parameter would mean that the chart expects a existing key in the `auth.tls.jksTruststoreSecret` with the `auth.tls.jksKeystoreSAN` value and use this as a keystore for **all** brokers +> **Note**: If you are using cert-manager, particularly when an ACME issuer is used, the `ca.crt` field is not put in the `Secret` that cert-manager creates. To handle this, the `auth.tls.pemChainIncluded` property can be set to `true` and the initContainer created by this Chart will attempt to extract the intermediate certs from the `tls.crt` field of the secret (which is a PEM chain) > **Note**: The truststore/keystore from above **must** be protected with the same password as in `auth.tls.password` @@ -630,7 +622,7 @@ You have two alternatives to use LoadBalancer services: ```console externalAccess.enabled=true externalAccess.service.type=LoadBalancer -externalAccess.service.port=9094 +externalAccess.service.ports.external=9094 externalAccess.autoDiscovery.enabled=true serviceAccount.create=true rbac.create=true @@ -643,7 +635,7 @@ Note: This option requires creating RBAC rules on clusters where RBAC policies a ```console externalAccess.enabled=true externalAccess.service.type=LoadBalancer -externalAccess.service.port=9094 +externalAccess.service.ports.external=9094 externalAccess.service.loadBalancerIPs[0]='external-ip-1' externalAccess.service.loadBalancerIPs[1]='external-ip-2'} ``` @@ -691,6 +683,7 @@ externalAccess: annotations: external-dns.alpha.kubernetes.io/hostname: "{{ .targetPod }}.example.com" ``` + ### Sidecars If you have a need for additional containers to run within the same pod as Kafka (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. @@ -808,10 +801,27 @@ You can enable this initContainer by setting `volumePermissions.enabled` to `tru ## Troubleshooting -Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). ## Upgrading +### To 16.0.0 + +This major updates the Zookeeper subchart to it newest major, 9.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/master/bitnami/zookeeper#to-900). + +### To 15.0.0 + +This major release bumps Kafka major version to `3.x` series. +It also renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. Some affected values are: + +- `service.port`, `service.internalPort` and `service.externalPort` have been regrouped under the `service.ports` map. +- `metrics.kafka.service.port` has been regrouped under the `metrics.kafka.service.ports` map. +- `metrics.jmx.service.port` has been regrouped under the `metrics.jmx.service.ports` map. +- `updateStrategy` (string) and `rollingUpdatePartition` are regrouped under the `updateStrategy` map. +- Several parameters marked as deprecated `14.x.x` are not supported anymore. + +Additionally updates the ZooKeeper subchart to it newest major, `8.0.0`, which contains similar changes. + ### To 14.0.0 In this version, the `image` block is defined once and is used in the different templates, while in the previous version, the `image` block was duplicated for the main container and the provisioning one @@ -822,7 +832,9 @@ image: repository: bitnami/kafka tag: 2.8.0 ``` + VS + ```yaml image: registry: docker.io @@ -939,8 +951,8 @@ Backwards compatibility is not guaranteed when Kafka metrics are enabled, unless Use the workaround below to upgrade from versions previous to 7.0.0. The following example assumes that the release name is kafka: ```console -helm upgrade kafka ${REPO_NAME}/kafka --version 6.1.8 --set metrics.kafka.enabled=false -helm upgrade kafka ${REPO_NAME}/kafka --version 7.0.0 --set metrics.kafka.enabled=true +helm upgrade kafka bitnami/kafka --version 6.1.8 --set metrics.kafka.enabled=false +helm upgrade kafka bitnami/kafka --version 7.0.0 --set metrics.kafka.enabled=true ``` ### To 2.0.0 @@ -977,4 +989,4 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the License. +limitations under the License. \ No newline at end of file diff --git a/charts/kafka/templates/NOTES.txt b/charts/kafka/templates/NOTES.txt index ed93cdf..d4d94b7 100644 --- a/charts/kafka/templates/NOTES.txt +++ b/charts/kafka/templates/NOTES.txt @@ -25,46 +25,16 @@ In order to replicate the container startup scripts execute this command: {{- $replicaCount := int .Values.replicaCount -}} {{- $releaseNamespace := .Release.Namespace -}} {{- $clusterDomain := .Values.clusterDomain -}} -{{- $fullname := include "kafka.fullname" . -}} +{{- $fullname := include "common.names.fullname" . -}} {{- $clientProtocol := include "kafka.listenerType" (dict "protocol" .Values.auth.clientProtocol) -}} -{{- $saslMechanisms := coalesce .Values.auth.sasl.mechanisms .Values.auth.saslMechanisms -}} -{{- $tlsEndpointIdentificationAlgorithm := default "" (coalesce .Values.auth.tls.endpointIdentificationAlgorithm .Values.auth.tlsEndpointIdentificationAlgorithm) -}} -{{- $tlsPassword := coalesce .Values.auth.tls.password .Values.auth.jksPassword -}} -{{- $servicePort := int .Values.service.port -}} -{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs -}} -{{- if and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer") }} - -############################################################################### -### ERROR: You enabled external access to Kafka brokers without specifying ### -### the array of load balancer IPs for Kafka brokers. ### -############################################################################### - -This deployment will be incomplete until you configure the array of load balancer -IPs for Kafka brokers. To complete your deployment follow the steps below: - -1. Wait for the load balancer IPs (it may take a few minutes for them to be available): - - kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -w - -2. Obtain the load balancer IPs and upgrade your chart: - - {{- range $i, $e := until $replicaCount }} - LOAD_BALANCER_IP_{{ add $i 1 }}="$(kubectl get svc --namespace {{ $releaseNamespace }} {{ $fullname }}-{{ $i }}-external -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" - {{- end }} - -3. Upgrade you chart: - - helm upgrade --namespace {{ .Release.Namespace }} {{ .Release.Name }} bitnami/{{ .Chart.Name }} \ - --set replicaCount={{ $replicaCount }} \ - --set externalAccess.enabled=true \ - {{- range $i, $e := until $replicaCount }} - --set externalAccess.service.loadBalancerIPs[{{ $i }}]=$LOAD_BALANCER_IP_{{ add $i 1 }} \ - {{- end }} - --set externalAccess.service.type=LoadBalancer - -{{- else }} - -{{- if and (or (eq .Values.service.type "LoadBalancer") .Values.externalAccess.enabled) (eq $clientProtocol "PLAINTEXT") }} +{{- $externalClientProtocol := include "kafka.listenerType" (dict "protocol" (include "kafka.externalClientProtocol" . )) -}} +{{- $saslMechanisms := .Values.auth.sasl.mechanisms -}} +{{- $tlsEndpointIdentificationAlgorithm := default "" .Values.auth.tls.endpointIdentificationAlgorithm -}} +{{- $tlsPasswordSecret := printf "$(kubectl get secret %s --namespace %s -o jsonpath='{.data.password}' | base64 --decode | cut -d , -f 1)" .Values.auth.tls.existingSecret $releaseNamespace -}} +{{- $tlsPassword := ternary .Values.auth.tls.password $tlsPasswordSecret (eq .Values.auth.tls.existingSecret "") -}} +{{- $servicePort := int .Values.service.ports.client -}} + +{{- if and (or (eq .Values.service.type "LoadBalancer") .Values.externalAccess.enabled) (eq $externalClientProtocol "PLAINTEXT") }} --------------------------------------------------------------------------------------------- WARNING @@ -103,7 +73,7 @@ org.apache.kafka.common.security.scram.ScramLoginModule required {{- else }} org.apache.kafka.common.security.plain.PlainLoginModule required {{- end }} -username="{{ index (coalesce .Values.auth.sasl.jaas.clientUsers .Values.auth.jaas.clientUsers) 0 }}" +username="{{ index .Values.auth.sasl.jaas.clientUsers 0 }}" password="$(kubectl get secret {{ $fullname }}-jaas --namespace {{ $releaseNamespace }} -o jsonpath='{.data.client-passwords}' | base64 --decode | cut -d , -f 1)"; }; @@ -201,7 +171,7 @@ To create a pod that you can use as a Kafka client run the following commands: CONSUMER: kafka-console-consumer.sh \ {{ if or (include "kafka.client.saslAuthentication" .) (include "kafka.client.tlsEncryption" .) }}--consumer.config /tmp/client.properties \{{ end }} - --bootstrap-server {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ .Values.service.port }} \ + --bootstrap-server {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ .Values.service.ports.client }} \ --topic test \ --from-beginning @@ -241,16 +211,94 @@ To connect to your Kafka server from outside the cluster, follow the instruction echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')" - Kafka Brokers port: {{ .Values.externalAccess.service.port }} + Kafka Brokers port: {{ .Values.externalAccess.service.ports.external }} + +{{- end }} + +{{- if not (eq $clientProtocol $externalClientProtocol) }} +{{- if (include "kafka.client.saslAuthentication" .) }} + +You need to configure your Kafka client to access using SASL authentication. To do so, you need to create the 'kafka_jaas.conf' and 'client.properties' configuration files with the content below: + + - kafka_jaas.conf: + +KafkaClient { +{{- if $saslMechanisms | regexFind "scram" }} +org.apache.kafka.common.security.scram.ScramLoginModule required +{{- else }} +org.apache.kafka.common.security.plain.PlainLoginModule required +{{- end }} +username="{{ index .Values.auth.sasl.jaas.clientUsers 0 }}" +password="$(kubectl get secret {{ $fullname }}-jaas --namespace {{ $releaseNamespace }} -o jsonpath='{.data.client-passwords}' | base64 --decode | cut -d , -f 1)"; +}; + + - client.properties: + +security.protocol={{ $externalClientProtocol }} +{{- if $saslMechanisms | regexFind "scram-sha-256" }} +sasl.mechanism=SCRAM-SHA-256 +{{- else if $saslMechanisms | regexFind "scram-sha-512" }} +sasl.mechanism=SCRAM-SHA-512 +{{- else }} +sasl.mechanism=PLAIN +{{- end }} +{{- if eq $externalClientProtocol "SASL_SSL" }} +ssl.truststore.type={{ upper .Values.auth.tls.type }} + {{- if eq .Values.auth.tls.type "jks" }} +ssl.truststore.location=/tmp/kafka.truststore.jks + {{- if not (empty $tlsPassword) }} +ssl.truststore.password={{ $tlsPassword }} + {{- end }} + {{- else if eq .Values.auth.tls.type "pem" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- + {{- end }} + {{- if eq $tlsEndpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= + {{- end }} +{{- end }} + +{{- else if (include "kafka.externalClient.tlsEncryption" .) }} + +You need to configure your Kafka client to access using TLS authentication. To do so, you need to create the 'client.properties' configuration file with the content below: + +security.protocol={{ $externalClientProtocol }} +ssl.truststore.type={{ upper .Values.auth.tls.type }} +{{- if eq .Values.auth.tls.type "jks" }} +ssl.truststore.location=/tmp/kafka.truststore.{{ .Values.auth.tls.type }} + {{- if not (empty $tlsPassword) }} +ssl.truststore.password={{ $tlsPassword }} + {{- end }} +{{- else if eq .Values.auth.tls.type "pem" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +{{- end }} +{{- if eq .Values.auth.externalClientProtocol "mtls" }} +ssl.keystore.type={{ upper .Values.auth.tls.type }} + {{- if eq .Values.auth.tls.type "jks" }} +ssl.keystore.location=/tmp/client.keystore.jks + {{- if not (empty $tlsPassword) }} +ssl.keystore.password={{ $tlsPassword }} + {{- end }} + {{- else if eq .Values.auth.tls.type "pem" }} +ssl.keystore.certificate.chain=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +ssl.keystore.key=-----BEGIN ENCRYPTED PRIVATE KEY----- \ +... \ +-----END ENCRYPTED PRIVATE KEY----- + {{- end }} +{{- end }} +{{- if eq $tlsEndpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= +{{- end }} {{- end }} {{- end }} {{- end }} {{- end }} -{{- include "common.warnings.rollingTag" .Values.image }} -{{- include "common.warnings.rollingTag" .Values.externalAccess.autoDiscovery.image }} -{{- include "common.warnings.rollingTag" .Values.metrics.kafka.image }} -{{- include "common.warnings.rollingTag" .Values.metrics.jmx.image }} -{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- include "kafka.checkRollingTags" . }} {{- include "kafka.validateValues" . }} diff --git a/charts/kafka/templates/_helpers.tpl b/charts/kafka/templates/_helpers.tpl index 926eb3a..059dab6 100644 --- a/charts/kafka/templates/_helpers.tpl +++ b/charts/kafka/templates/_helpers.tpl @@ -1,4 +1,5 @@ {{/* vim: set filetype=mustache: */}} + {{/* Expand the name of the chart. */}} @@ -6,15 +7,6 @@ Expand the name of the chart. {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "kafka.fullname" -}} -{{- include "common.names.fullname" . -}} -{{- end -}} - {{/* Create a default fully qualified zookeeper name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). @@ -33,7 +25,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this */}} {{- define "kafka.serviceAccountName" -}} {{- if .Values.serviceAccount.create -}} - {{ default (include "kafka.fullname" .) .Values.serviceAccount.name }} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} {{- else -}} {{ default "default" .Values.serviceAccount.name }} {{- end -}} @@ -65,11 +57,11 @@ Create a default fully qualified Kafka exporter name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). */}} {{- define "kafka.metrics.kafka.fullname" -}} - {{- printf "%s-exporter" (include "kafka.fullname" .) | trunc 63 | trimSuffix "-" }} + {{- printf "%s-exporter" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} {{- end -}} {{/* - Create the name of the service account to use for Kafka exporer pods + Create the name of the service account to use for Kafka exporter pods */}} {{- define "kafka.metrics.kafka.serviceAccountName" -}} {{- if .Values.metrics.kafka.serviceAccount.create -}} @@ -165,6 +157,23 @@ Return true if encryption via TLS for client connections should be configured {{- end -}} {{- end -}} +{{/* +Return the configured value for the external client protocol, defaults to the same value as clientProtocol +*/}} +{{- define "kafka.externalClientProtocol" -}} + {{- coalesce .Values.auth.externalClientProtocol .Values.auth.clientProtocol -}} +{{- end -}} + +{{/* +Return true if encryption via TLS for external client connections should be configured +*/}} +{{- define "kafka.externalClient.tlsEncryption" -}} +{{- $tlsProtocols := list "tls" "mtls" "sasl_tls" -}} +{{- if (has (include "kafka.externalClientProtocol" . ) $tlsProtocols) -}} + {{- true -}} +{{- end -}} +{{- end -}} + {{/* Return true if encryption via TLS for inter broker communication connections should be configured */}} @@ -179,7 +188,7 @@ Return true if encryption via TLS for inter broker communication connections sho Return true if encryption via TLS should be configured */}} {{- define "kafka.tlsEncryption" -}} -{{- if or (include "kafka.client.tlsEncryption" .) (include "kafka.interBroker.tlsEncryption" .) -}} +{{- if or (include "kafka.client.tlsEncryption" .) (include "kafka.interBroker.tlsEncryption" .) (include "kafka.externalClient.tlsEncryption" .) -}} {{- true -}} {{- end -}} {{- end -}} @@ -201,15 +210,30 @@ SASL_PLAINTEXT {{- end -}} {{- end -}} +{{/* +Return the protocol used with zookeeper +*/}} +{{- define "kafka.zookeeper.protocol" -}} +{{- if and .Values.auth.zookeeper.tls.enabled .Values.zookeeper.auth.enabled .Values.auth.sasl.jaas.zookeeperUser -}} +SASL_SSL +{{- else if and .Values.auth.zookeeper.tls.enabled -}} +SSL +{{- else if and .Values.zookeeper.auth.enabled .Values.auth.sasl.jaas.zookeeperUser -}} +SASL +{{- else -}} +PLAINTEXT +{{- end -}} +{{- end -}} + {{/* Return the Kafka JAAS credentials secret */}} {{- define "kafka.jaasSecretName" -}} -{{- $secretName := coalesce .Values.auth.sasl.jaas.existingSecret .Values.auth.jaas.existingSecret -}} +{{- $secretName := .Values.auth.sasl.jaas.existingSecret -}} {{- if $secretName -}} {{- printf "%s" (tpl $secretName $) -}} {{- else -}} - {{- printf "%s-jaas" (include "kafka.fullname" .) -}} + {{- printf "%s-jaas" (include "common.names.fullname" .) -}} {{- end -}} {{- end -}} @@ -217,8 +241,8 @@ Return the Kafka JAAS credentials secret Return true if a JAAS credentials secret object should be created */}} {{- define "kafka.createJaasSecret" -}} -{{- $secretName := coalesce .Values.auth.sasl.jaas.existingSecret .Values.auth.jaas.existingSecret -}} -{{- if and (or (include "kafka.client.saslAuthentication" .) (include "kafka.interBroker.saslAuthentication" .) (and .Values.zookeeper.auth.enabled .Values.auth.jaas.zookeeperUser)) (empty $secretName) -}} +{{- $secretName := .Values.auth.sasl.jaas.existingSecret -}} +{{- if and (or (include "kafka.client.saslAuthentication" .) (include "kafka.interBroker.saslAuthentication" .) (and .Values.zookeeper.auth.enabled .Values.auth.sasl.jaas.zookeeperUser)) (empty $secretName) -}} {{- true -}} {{- end -}} {{- end -}} @@ -227,10 +251,7 @@ Return true if a JAAS credentials secret object should be created Return true if a TLS credentials secret object should be created */}} {{- define "kafka.createTlsSecret" -}} -{{- $secretName := coalesce .Values.auth.tls.existingSecret .Values.auth.jksSecret -}} -{{- if and (include "kafka.tlsEncryption" .) (empty .Values.auth.tls.existingSecrets) (empty $secretName) (eq .Values.auth.tls.type "jks") (.Files.Glob "files/tls/*.jks") }} - {{- true -}} -{{- else if and (include "kafka.tlsEncryption" .) (empty .Values.auth.tls.existingSecrets) (empty $secretName) (eq .Values.auth.tls.type "pem") (or (.Files.Glob "files/tls/*.{crt,pem}") .Values.auth.tls.autoGenerated) }} +{{- if and (include "kafka.tlsEncryption" .) (empty .Values.auth.tls.existingSecrets) (eq .Values.auth.tls.type "pem") .Values.auth.tls.autoGenerated }} {{- true -}} {{- end -}} {{- end -}} @@ -242,7 +263,19 @@ Return the Kafka configuration configmap {{- if .Values.existingConfigmap -}} {{- printf "%s" (tpl .Values.existingConfigmap $) -}} {{- else -}} - {{- printf "%s-configuration" (include "kafka.fullname" .) -}} + {{- printf "%s-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + + +{{/* +Returns the secret name for the Kafka Provisioning client +*/}} +{{- define "kafka.client.passwordsSecretName" -}} +{{- if .Values.provisioning.auth.tls.passwordsSecret -}} + {{- printf "%s" (tpl .Values.provisioning.auth.tls.passwordsSecret $) -}} +{{- else -}} + {{- printf "%s-client-secret" (include "common.names.fullname" .) -}} {{- end -}} {{- end -}} @@ -262,7 +295,7 @@ Return the Kafka log4j ConfigMap name. {{- if .Values.existingLog4jConfigMap -}} {{- printf "%s" (tpl .Values.existingLog4jConfigMap $) -}} {{- else -}} - {{- printf "%s-log4j-configuration" (include "kafka.fullname" .) -}} + {{- printf "%s-log4j-configuration" (include "common.names.fullname" .) -}} {{- end -}} {{- end -}} @@ -280,13 +313,13 @@ Return the SASL mechanism to use for the Kafka exporter to access Kafka The exporter uses a different nomenclature so we need to do this hack */}} {{- define "kafka.metrics.kafka.saslMechanism" -}} -{{- $saslMechanisms := coalesce .Values.auth.sasl.mechanisms .Values.auth.saslMechanisms }} +{{- $saslMechanisms := .Values.auth.sasl.mechanisms }} {{- if contains "scram-sha-512" $saslMechanisms }} - {{- printf "scram-sha512" -}} + {{- print "scram-sha512" -}} {{- else if contains "scram-sha-256" $saslMechanisms }} - {{- printf "scram-sha256" -}} + {{- print "scram-sha256" -}} {{- else -}} - {{- printf "plain" -}} + {{- print "plain" -}} {{- end -}} {{- end -}} @@ -297,7 +330,7 @@ Return the Kafka configuration configmap {{- if .Values.metrics.jmx.existingConfigmap -}} {{- printf "%s" (tpl .Values.metrics.jmx.existingConfigmap $) -}} {{- else -}} - {{- printf "%s-jmx-configuration" (include "kafka.fullname" .) -}} + {{- printf "%s-jmx-configuration" (include "common.names.fullname" .) -}} {{- end -}} {{- end -}} @@ -310,6 +343,17 @@ Return true if a configmap object should be created {{- end -}} {{- end -}} +{{/* +Check if there are rolling tags in the images +*/}} +{{- define "kafka.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.externalAccess.autoDiscovery.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.kafka.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.jmx.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} + {{/* Compile all warnings into a single message, and call fail. */}} @@ -319,9 +363,14 @@ Compile all warnings into a single message, and call fail. {{- $messages := append $messages (include "kafka.validateValues.nodePortListLength" .) -}} {{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceType" .) -}} {{- $messages := append $messages (include "kafka.validateValues.externalAccessAutoDiscoveryRBAC" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessAutoDiscoveryIPsOrNames" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerIPs" "context" .)) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerNames" "context" .)) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerAnnotations" "context" . )) -}} {{- $messages := append $messages (include "kafka.validateValues.saslMechanisms" .) -}} {{- $messages := append $messages (include "kafka.validateValues.tlsSecrets" .) -}} {{- $messages := append $messages (include "kafka.validateValues.tlsSecrets.length" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.tlsPasswords" .) -}} {{- $messages := without $messages "" -}} {{- $message := join "\n" $messages -}} @@ -333,13 +382,13 @@ Compile all warnings into a single message, and call fail. {{/* Validate values of Kafka - Authentication protocols for Kafka */}} {{- define "kafka.validateValues.authProtocols" -}} {{- $authProtocols := list "plaintext" "tls" "mtls" "sasl" "sasl_tls" -}} -{{- if or (not (has .Values.auth.clientProtocol $authProtocols)) (not (has .Values.auth.interBrokerProtocol $authProtocols)) -}} -kafka: auth.clientProtocol auth.interBrokerProtocol +{{- if or (not (has .Values.auth.clientProtocol $authProtocols)) (not (has .Values.auth.interBrokerProtocol $authProtocols)) (not (has (include "kafka.externalClientProtocol" . ) $authProtocols)) -}} +kafka: auth.clientProtocol auth.externalClientProtocol auth.interBrokerProtocol Available authentication protocols are "plaintext", "tls", "mtls", "sasl" and "sasl_tls" {{- end -}} {{- end -}} -{{/* Validate values of Kafka - number of replicas must be the same than NodePort list */}} +{{/* Validate values of Kafka - number of replicas must be the same as NodePort list */}} {{- define "kafka.validateValues.nodePortListLength" -}} {{- $replicaCount := int .Values.replicaCount }} {{- $nodePortListLength := len .Values.externalAccess.service.nodePorts }} @@ -368,27 +417,49 @@ kafka: rbac.create {{- end -}} {{- end -}} +{{/* Validate values of Kafka - LoadBalancerIPs or LoadBalancerNames should be set when autoDiscovery is disabled */}} +{{- define "kafka.validateValues.externalAccessAutoDiscoveryIPsOrNames" -}} +{{- $loadBalancerNameListLength := len .Values.externalAccess.service.loadBalancerNames -}} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs -}} +{{- if and .Values.externalAccess.enabled (eq .Values.externalAccess.service.type "LoadBalancer") (not .Values.externalAccess.autoDiscovery.enabled) (eq $loadBalancerNameListLength 0) (eq $loadBalancerIPListLength 0) }} +kafka: externalAccess.service.loadBalancerNames or externalAccess.service.loadBalancerIPs + By specifying "externalAccess.enabled=true", "externalAccess.autoDiscovery.enabled=false" and + "externalAccess.service.type=LoadBalancer" at least one of externalAccess.service.loadBalancerNames + or externalAccess.service.loadBalancerIPs must be set and the length of those arrays must be equal + to the number of replicas. +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - number of replicas must be the same as loadBalancerIPs list */}} +{{- define "kafka.validateValues.externalAccessServiceList" -}} +{{- $replicaCount := int .context.Values.replicaCount }} +{{- $listLength := len (get .context.Values.externalAccess.service .element) -}} +{{- if and .context.Values.externalAccess.enabled (not .context.Values.externalAccess.autoDiscovery.enabled) (eq .context.Values.externalAccess.service.type "LoadBalancer") (gt $listLength 0) (not (eq $replicaCount $listLength)) }} +kafka: externalAccess.service.{{ .element }} + Number of replicas and {{ .element }} array length must be the same. Currently: replicaCount = {{ $replicaCount }} and {{ .element }} = {{ $listLength }} +{{- end -}} +{{- end -}} + {{/* Validate values of Kafka - SASL mechanisms must be provided when using SASL */}} {{- define "kafka.validateValues.saslMechanisms" -}} -{{- if and (or (.Values.auth.clientProtocol | regexFind "sasl") (.Values.auth.interBrokerProtocol | regexFind "sasl") (and .Values.zookeeper.auth.enabled .Values.auth.jaas.zookeeperUser)) (not .Values.auth.saslMechanisms) }} -kafka: auth.saslMechanisms +{{- if and (or (.Values.auth.clientProtocol | regexFind "sasl") (.Values.auth.interBrokerProtocol | regexFind "sasl") (and .Values.zookeeper.auth.enabled .Values.auth.sasl.jaas.zookeeperUser)) (not .Values.auth.sasl.mechanisms) }} +kafka: auth.sasl.mechanisms The SASL mechanisms are required when either auth.clientProtocol or auth.interBrokerProtocol use SASL or Zookeeper user is provided. {{- end }} -{{- if not (contains .Values.auth.saslInterBrokerMechanism .Values.auth.saslMechanisms) }} -kafka: auth.saslMechanisms - auth.saslInterBrokerMechanism must be provided and it should be one of the specified mechanisms at auth.saslMechanisms +{{- if not (contains .Values.auth.sasl.interBrokerMechanism .Values.auth.sasl.mechanisms) }} +kafka: auth.sasl.mechanisms + auth.sasl.interBrokerMechanism must be provided and it should be one of the specified mechanisms at auth.saslMechanisms {{- end -}} {{- end -}} {{/* Validate values of Kafka - Secrets containing TLS certs must be provided when TLS authentication is enabled */}} {{- define "kafka.validateValues.tlsSecrets" -}} -{{- $secretName := coalesce .Values.auth.tls.existingSecret .Values.auth.jksSecret -}} -{{- if and (include "kafka.tlsEncryption" .) (eq .Values.auth.tls.type "jks") (empty .Values.auth.tls.existingSecrets) (empty $secretName) (not (.Files.Glob "files/tls/*.jks}")) }} -kafka: auth.tls.existingSecret +{{- if and (include "kafka.tlsEncryption" .) (eq .Values.auth.tls.type "jks") (empty .Values.auth.tls.existingSecrets) }} +kafka: auth.tls.existingSecrets A secret containing the Kafka JKS keystores and truststore is required when TLS encryption in enabled and TLS format is "JKS" -{{- else if and (include "kafka.tlsEncryption" .) (eq .Values.auth.tls.type "pem") (empty .Values.auth.tls.existingSecrets) (empty $secretName) (not (.Files.Glob "files/tls/*.{crt,pem}")) (not .Values.auth.tls.autoGenerated) }} -kafka: auth.tls.existingSecret +{{- else if and (include "kafka.tlsEncryption" .) (eq .Values.auth.tls.type "pem") (empty .Values.auth.tls.existingSecrets) (not .Values.auth.tls.autoGenerated) }} +kafka: auth.tls.existingSecrets A secret containing the Kafka TLS certificates and keys is required when TLS encryption in enabled and TLS format is "PEM" {{- end -}} @@ -405,3 +476,14 @@ kafka: .Values.auth.tls.existingSecrets {{- end -}} {{- end -}} {{- end -}} + +{{/* Validate values of Kafka provisioning - keyPasswordSecretKey, keystorePasswordSecretKey or truststorePasswordSecretKey must not be used without passwordsSecret */}} +{{- define "kafka.validateValues.tlsPasswords" -}} +{{- if and (include "kafka.client.tlsEncryption" .) (not .Values.auth.tls.passwordsSecret) }} +{{- if or .Values.auth.tls.keyPasswordSecretKey .Values.auth.tls.keystorePasswordSecretKey .Values.auth.tls.truststorePasswordSecretKey }} +kafka: auth.tls.keyPasswordSecretKey,auth.tls.keystorePasswordSecretKey,auth.tls.truststorePasswordSecretKey + auth.tls.keyPasswordSecretKey,auth.tls.keystorePasswordSecretKey,auth.tls.truststorePasswordSecretKey + must not be used without passwordsSecret setted. +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/kafka/templates/configmap.yaml b/charts/kafka/templates/configmap.yaml index 717c0c7..509fd1c 100644 --- a/charts/kafka/templates/configmap.yaml +++ b/charts/kafka/templates/configmap.yaml @@ -2,7 +2,8 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ template "kafka.fullname" . }}-configuration + name: {{ printf "%s-configuration" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} {{- if .Values.commonLabels }} {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} diff --git a/charts/kafka/templates/jaas-secret.yaml b/charts/kafka/templates/jaas-secret.yaml index 7fa3a92..968d486 100644 --- a/charts/kafka/templates/jaas-secret.yaml +++ b/charts/kafka/templates/jaas-secret.yaml @@ -2,7 +2,8 @@ apiVersion: v1 kind: Secret metadata: - name: {{ template "kafka.fullname" . }}-jaas + name: {{ printf "%s-jaas" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} {{- if .Values.commonLabels }} {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} @@ -13,25 +14,27 @@ metadata: type: Opaque data: {{- if (include "kafka.client.saslAuthentication" .) }} - {{- $clientUsers := coalesce .Values.auth.sasl.jaas.clientUsers .Values.auth.jaas.clientUsers }} - {{- $clientPasswords := coalesce .Values.auth.sasl.jaas.clientPasswords .Values.auth.jaas.clientPasswords }} + {{- $clientUsers := .Values.auth.sasl.jaas.clientUsers }} + {{- $clientPasswords := .Values.auth.sasl.jaas.clientPasswords }} {{- if $clientPasswords }} client-passwords: {{ join "," $clientPasswords | b64enc | quote }} + system-user-password: {{ index $clientPasswords 0 | b64enc | quote }} {{- else }} {{- $passwords := list }} {{- range $clientUsers }} {{- $passwords = append $passwords (randAlphaNum 10) }} {{- end }} client-passwords: {{ join "," $passwords | b64enc | quote }} + system-user-password: {{ index $passwords 0 | b64enc | quote }} {{- end }} {{- end }} - {{- $zookeeperUser := coalesce .Values.auth.sasl.jaas.zookeeperUser .Values.auth.jaas.zookeeperUser }} + {{- $zookeeperUser := .Values.auth.sasl.jaas.zookeeperUser }} {{- if and .Values.zookeeper.auth.enabled $zookeeperUser }} - {{- $zookeeperPassword := coalesce .Values.auth.sasl.jaas.zookeeperPassword .Values.auth.jaas.zookeeperPassword }} + {{- $zookeeperPassword := .Values.auth.sasl.jaas.zookeeperPassword }} zookeeper-password: {{ default (randAlphaNum 10) $zookeeperPassword | b64enc | quote }} {{- end }} {{- if (include "kafka.interBroker.saslAuthentication" .) }} - {{- $interBrokerPassword := coalesce .Values.auth.sasl.jaas.interBrokerPassword .Values.auth.jaas.interBrokerPassword }} + {{- $interBrokerPassword := .Values.auth.sasl.jaas.interBrokerPassword }} inter-broker-password: {{ default (randAlphaNum 10) $interBrokerPassword | b64enc | quote }} {{- end }} {{- end }} diff --git a/charts/kafka/templates/jmx-configmap.yaml b/charts/kafka/templates/jmx-configmap.yaml index 9a24a94..4650580 100644 --- a/charts/kafka/templates/jmx-configmap.yaml +++ b/charts/kafka/templates/jmx-configmap.yaml @@ -2,7 +2,8 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ template "kafka.fullname" . }}-jmx-configuration + name: {{ printf "%s-jmx-configuration" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} {{- if .Values.commonLabels }} {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} diff --git a/charts/kafka/templates/jmx-metrics-svc.yaml b/charts/kafka/templates/jmx-metrics-svc.yaml index 9f70d66..35c79f4 100644 --- a/charts/kafka/templates/jmx-metrics-svc.yaml +++ b/charts/kafka/templates/jmx-metrics-svc.yaml @@ -2,9 +2,10 @@ apiVersion: v1 kind: Service metadata: - name: {{ template "kafka.fullname" . }}-jmx-metrics + name: {{ printf "%s-jmx-metrics" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: kafka + app.kubernetes.io/component: metrics {{- if .Values.commonLabels }} {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} @@ -18,28 +19,16 @@ metadata: {{- end }} {{- end }} spec: - type: {{ .Values.metrics.jmx.service.type }} - {{- if eq .Values.metrics.jmx.service.type "LoadBalancer" }} - {{- if .Values.metrics.jmx.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.metrics.jmx.service.loadBalancerIP }} - {{- end }} - {{- if .Values.metrics.jmx.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: {{- toYaml .Values.metrics.jmx.service.loadBalancerSourceRanges | nindent 4 }} - {{- end }} - {{- end }} - {{- if and (eq .Values.metrics.jmx.service.type "ClusterIP") .Values.metrics.jmx.service.clusterIP }} + type: ClusterIP + sessionAffinity: {{ .Values.metrics.jmx.service.sessionAffinity }} + {{- if .Values.metrics.jmx.service.clusterIP }} clusterIP: {{ .Values.metrics.jmx.service.clusterIP }} {{- end }} ports: - name: http-metrics - port: {{ .Values.metrics.jmx.service.port }} + port: {{ .Values.metrics.jmx.service.ports.metrics }} protocol: TCP targetPort: metrics - {{- if and (or (eq .Values.metrics.jmx.service.type "NodePort") (eq .Values.metrics.jmx.service.type "LoadBalancer")) (not (empty .Values.metrics.jmx.service.nodePort)) }} - nodePort: {{ .Values.metrics.jmx.service.nodePort }} - {{- else if eq .Values.metrics.jmx.service.type "ClusterIP" }} - nodePort: null - {{- end }} selector: {{- include "common.labels.matchLabels" . | nindent 4 }} app.kubernetes.io/component: kafka {{- end }} diff --git a/charts/kafka/templates/kafka-metrics-deployment.yaml b/charts/kafka/templates/kafka-metrics-deployment.yaml index 3a79bf1..1b1b3e4 100644 --- a/charts/kafka/templates/kafka-metrics-deployment.yaml +++ b/charts/kafka/templates/kafka-metrics-deployment.yaml @@ -2,12 +2,13 @@ {{- $replicaCount := int .Values.replicaCount -}} {{- $releaseNamespace := .Release.Namespace -}} {{- $clusterDomain := .Values.clusterDomain -}} -{{- $fullname := include "kafka.fullname" . -}} -{{- $servicePort := int .Values.service.port -}} +{{- $fullname := include "common.names.fullname" . -}} +{{- $servicePort := int .Values.service.ports.client -}} apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} kind: Deployment metadata: - name: {{ template "kafka.metrics.kafka.fullname" . }} + name: {{ include "kafka.metrics.kafka.fullname" . }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: metrics {{- if .Values.commonLabels }} @@ -34,8 +35,34 @@ spec: {{- end }} spec: {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.metrics.kafka.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAffinityPreset "component" "metrics" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAntiAffinityPreset "component" "metrics" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.metrics.kafka.nodeAffinityPreset.type "key" .Values.metrics.kafka.nodeAffinityPreset.key "values" .Values.metrics.kafka.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.metrics.kafka.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.priorityClassName }} + priorityClassName: {{ .Values.metrics.kafka.priorityClassName }} + {{- end }} {{- if .Values.metrics.kafka.schedulerName }} - schedulerName: {{ .Values.metrics.kafka.schedulerName | quote }} + schedulerName: {{ .Values.metrics.kafka.schedulerName }} + {{- end }} + {{- if .Values.metrics.kafka.podSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.kafka.podSecurityContext "enabled" | toYaml | nindent 8 }} {{- end }} serviceAccountName: {{ template "kafka.metrics.kafka.serviceAccountName" . }} {{- if .Values.metrics.kafka.initContainers }} @@ -50,11 +77,19 @@ spec: {{- end }} {{- if .Values.diagnosticMode.enabled }} command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} - args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.kafka.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.command "context" $) | nindent 12 }} {{- else }} command: - - /bin/bash - - -ec + - bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.kafka.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.args "context" $) | nindent 12 }} + {{- else }} + args: + - -ce - | kafka_exporter \ {{- range $i, $e := until $replicaCount }} @@ -62,28 +97,29 @@ spec: {{- end }} {{- if (include "kafka.client.saslAuthentication" .) }} --sasl.enabled \ - --sasl.username="$SASL_USERNAME" \ - --sasl.password="${SASL_USER_PASSWORD%%,*}" \ - --sasl.mechanism="{{ include "kafka.metrics.kafka.saslMechanism" . }}" \ + --sasl.username=$SASL_USERNAME \ + --sasl.password=$SASL_USER_PASSWORD \ + --sasl.mechanism={{ include "kafka.metrics.kafka.saslMechanism" . }} \ {{- end }} {{- if (include "kafka.client.tlsEncryption" .) }} --tls.enabled \ {{- if .Values.metrics.kafka.certificatesSecret }} - --tls.key-file="/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsKey }}" \ - --tls.cert-file="/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsCert }}" \ + --tls.key-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsKey }} \ + --tls.cert-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsCert }} \ {{- if .Values.metrics.kafka.tlsCaSecret }} - --tls.ca-file="/opt/bitnami/kafka-exporter/cacert/{{ .Values.metrics.kafka.tlsCaCert }}" \ + --tls.ca-file=/opt/bitnami/kafka-exporter/cacert/{{ .Values.metrics.kafka.tlsCaCert }} \ {{- else }} - --tls.ca-file="/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsCaCert }}" \ + --tls.ca-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsCaCert }} \ {{- end }} {{- end }} {{- end }} {{- range $key, $value := .Values.metrics.kafka.extraFlags }} --{{ $key }}{{ if $value }}={{ $value }}{{ end }} \ {{- end }} - --web.listen-address=:9308 + --web.listen-address=:{{ .Values.metrics.kafka.containerPorts.metrics }} + {{- end }} {{- if (include "kafka.client.saslAuthentication" .) }} - {{- $clientUsers := coalesce .Values.auth.sasl.jaas.clientUsers .Values.auth.jaas.clientUsers }} + {{- $clientUsers := .Values.auth.sasl.jaas.clientUsers }} env: - name: SASL_USERNAME value: {{ index $clientUsers 0 | quote }} @@ -91,47 +127,45 @@ spec: valueFrom: secretKeyRef: name: {{ include "kafka.jaasSecretName" . }} - key: client-passwords - {{- end }} + key: system-user-password {{- end }} ports: - name: metrics - containerPort: 9308 + containerPort: {{ .Values.metrics.kafka.containerPorts.metrics }} {{- if .Values.metrics.kafka.resources }} resources: {{ toYaml .Values.metrics.kafka.resources | nindent 12 }} {{- end }} - {{- if and (include "kafka.client.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }} volumeMounts: + {{- if .Values.metrics.kafka.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if and (include "kafka.client.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }} - name: kafka-exporter-certificates mountPath: /opt/bitnami/kafka-exporter/certs/ readOnly: true - {{- if .Values.metrics.kafka.tlsCaSecret }} + {{- if .Values.metrics.kafka.tlsCaSecret }} - name: kafka-exporter-ca-certificate mountPath: /opt/bitnami/kafka-exporter/cacert/ readOnly: true - {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.kafka.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.sidecars "context" $) | nindent 8 }} + {{- end }} volumes: + {{- if .Values.metrics.kafka.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and (include "kafka.client.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }} - name: kafka-exporter-certificates secret: secretName: {{ .Values.metrics.kafka.certificatesSecret }} defaultMode: 0440 - {{- if .Values.metrics.kafka.tlsCaSecret }} + {{- if .Values.metrics.kafka.tlsCaSecret }} - name: kafka-exporter-ca-certificate secret: secretName: {{ .Values.metrics.kafka.tlsCaSecret }} defaultMode: 0440 - {{- end }} - {{- end }} - {{- if .Values.metrics.kafka.affinity }} - affinity: - {{ toYaml .Values.metrics.kafka.affinity | nindent 8 }} - {{- end }} - {{- if .Values.metrics.kafka.tolerations }} - tolerations: - {{ toYaml .Values.metrics.kafka.tolerations | nindent 8 }} - {{- end }} - {{- if .Values.metrics.kafka.nodeSelector }} - nodeSelector: - {{ toYaml .Values.metrics.kafka.nodeSelector | nindent 8 }} - {{- end }} + {{- end }} + {{- end }} {{- end }} diff --git a/charts/kafka/templates/kafka-metrics-serviceaccount.yaml b/charts/kafka/templates/kafka-metrics-serviceaccount.yaml index 7dc4e20..8dba0aa 100644 --- a/charts/kafka/templates/kafka-metrics-serviceaccount.yaml +++ b/charts/kafka/templates/kafka-metrics-serviceaccount.yaml @@ -3,6 +3,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "kafka.metrics.kafka.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: metrics {{- if .Values.commonLabels }} diff --git a/charts/kafka/templates/kafka-metrics-svc.yaml b/charts/kafka/templates/kafka-metrics-svc.yaml index b123d43..4f0da59 100644 --- a/charts/kafka/templates/kafka-metrics-svc.yaml +++ b/charts/kafka/templates/kafka-metrics-svc.yaml @@ -2,7 +2,8 @@ apiVersion: v1 kind: Service metadata: - name: {{ template "kafka.fullname" . }}-metrics + name: {{ printf "%s-metrics" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: metrics {{- if .Values.commonLabels }} @@ -18,28 +19,16 @@ metadata: {{- end }} {{- end }} spec: - type: {{ .Values.metrics.kafka.service.type }} - {{- if eq .Values.metrics.kafka.service.type "LoadBalancer" }} - {{- if .Values.metrics.kafka.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.metrics.kafka.service.loadBalancerIP }} - {{- end }} - {{- if .Values.metrics.kafka.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: {{- toYaml .Values.metrics.kafka.service.loadBalancerSourceRanges | nindent 4 }} - {{- end }} - {{- end }} - {{- if and (eq .Values.metrics.kafka.service.type "ClusterIP") .Values.metrics.kafka.service.clusterIP }} + type: ClusterIP + sessionAffinity: {{ .Values.metrics.kafka.service.sessionAffinity }} + {{- if .Values.metrics.kafka.service.clusterIP }} clusterIP: {{ .Values.metrics.kafka.service.clusterIP }} {{- end }} ports: - name: http-metrics - port: {{ .Values.metrics.kafka.service.port }} + port: {{ .Values.metrics.kafka.service.ports.metrics }} protocol: TCP targetPort: metrics - {{- if and (or (eq .Values.metrics.kafka.service.type "NodePort") (eq .Values.metrics.kafka.service.type "LoadBalancer")) (not (empty .Values.metrics.kafka.service.nodePort)) }} - nodePort: {{ .Values.metrics.kafka.service.nodePort }} - {{- else if eq .Values.metrics.kafka.service.type "ClusterIP" }} - nodePort: null - {{- end }} selector: {{- include "common.labels.matchLabels" . | nindent 4 }} app.kubernetes.io/component: metrics {{- end }} diff --git a/charts/kafka/templates/kafka-provisioning.yaml b/charts/kafka/templates/kafka-provisioning.yaml index 24337c3..212f22e 100644 --- a/charts/kafka/templates/kafka-provisioning.yaml +++ b/charts/kafka/templates/kafka-provisioning.yaml @@ -3,7 +3,8 @@ kind: Job apiVersion: batch/v1 metadata: - name: {{ include "kafka.fullname" . }}-provisioning + name: {{ printf "%s-provisioning" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: kafka-provisioning {{- if .Values.commonLabels }} @@ -11,7 +12,7 @@ metadata: {{- end }} annotations: helm.sh/hook: post-install,post-upgrade - helm.sh/hook-delete-policy: before-hook-creation + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded {{- if .Values.commonAnnotations }} {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} @@ -20,8 +21,8 @@ spec: metadata: labels: {{- include "common.labels.standard" . | nindent 8 }} app.kubernetes.io/component: kafka-provisioning - {{- if .Values.podLabels }} - {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- if .Values.provisioning.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.podLabels "context" $) | nindent 8 }} {{- end }} annotations: {{- if .Values.provisioning.podAnnotations }} @@ -32,133 +33,224 @@ spec: {{- if .Values.provisioning.schedulerName }} schedulerName: {{ .Values.provisioning.schedulerName | quote }} {{- end }} + {{- if .Values.provisioning.podSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} restartPolicy: OnFailure terminationGracePeriodSeconds: 0 + {{- if .Values.provisioning.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if or .Values.provisioning.initContainers .Values.provisioning.waitForKafka }} initContainers: + {{- if .Values.provisioning.waitForKafka }} - name: wait-for-available-kafka image: {{ include "kafka.image" . }} imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} command: - /bin/bash - - -c - - >- - set -e; + args: + - -ec + - | wait-for-port \ - --host={{ include "kafka.fullname" . }} \ + --host={{ include "common.names.fullname" . }} \ --state=inuse \ --timeout=120 \ - {{ .Values.service.port | int64 }}; + {{ .Values.service.ports.client | int64 }}; echo "Kafka is available"; {{- if .Values.provisioning.resources }} resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} {{- end }} + {{- end }} + {{- if .Values.provisioning.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.initContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} containers: - name: kafka-provisioning image: {{ include "kafka.image" . }} imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} {{- if .Values.diagnosticMode.enabled }} command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.provisioning.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.command "context" $) | nindent 12 }} {{- else }} command: - {{- if .Values.provisioning.command }} - {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.command "context" $) | nindent 12 }} - {{- else }} - /bin/bash - {{- end }} {{- end }} {{- if .Values.diagnosticMode.enabled }} args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.provisioning.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.args "context" $) | nindent 12 }} {{- else }} args: - {{- if .Values.provisioning.args }} - {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.args "context" $) | nindent 12 }} - {{- else }} - -ec - | - {{- $bootstrapServer := printf "%s:%d" (include "kafka.fullname" .) (.Values.service.port | int64) }} - {{- range $topic := .Values.provisioning.topics }} - echo "Ensure topic '{{ $topic.name }}' exists" - /opt/bitnami/kafka/bin/kafka-topics.sh \ - --create \ - --if-not-exists \ - --bootstrap-server {{ $bootstrapServer }} \ - --replication-factor {{ $topic.replicationFactor | default $.Values.provisioning.replicationFactor }} \ - --partitions {{ $topic.partitions | default $.Values.provisioning.numPartitions }} \ - {{- range $name, $value := $topic.config }} - --config {{ $name }}={{ $value }} \ + echo "Configuring environment" + . /opt/bitnami/scripts/libkafka.sh + export CLIENT_CONF="${CLIENT_CONF:-/opt/bitnami/kafka/config/client.properties}" + if [ ! -f "$CLIENT_CONF" ]; then + touch $CLIENT_CONF + + kafka_common_conf_set "$CLIENT_CONF" security.protocol {{ include "kafka.listenerType" ( dict "protocol" .Values.auth.clientProtocol ) | quote }} + {{- if (include "kafka.client.tlsEncryption" .) }} + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.type {{ upper .Values.provisioning.auth.tls.type | quote }} + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.type {{ upper .Values.provisioning.auth.tls.type | quote }} + ! is_empty_value "$KAFKA_CLIENT_KEY_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.key.password "$KAFKA_CLIENT_KEY_PASSWORD" + {{- if eq (upper .Values.provisioning.auth.tls.type) "PEM" }} + file_to_multiline_property() { + awk 'NR > 1{print line" \\"}{line=$0;}END{print $0" "}' <"${1:?missing file}" + } + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.key "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.key }}")" + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.certificate.chain "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.caCert }}")" + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.certificates "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.cert }}")" + {{- else if eq (upper .Values.provisioning.auth.tls.type) "JKS" }} + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.location "/certs/{{ .Values.provisioning.auth.tls.keystore }}" + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.location "/certs/{{ .Values.provisioning.auth.tls.truststore }}" + ! is_empty_value "$KAFKA_CLIENT_KEYSTORE_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.password "$KAFKA_CLIENT_KEYSTORE_PASSWORD" + ! is_empty_value "$KAFKA_CLIENT_TRUSTSTORE_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.password "$KAFKA_CLIENT_TRUSTSTORE_PASSWORD" + {{- end }} + {{- end }} + {{- if (include "kafka.client.saslAuthentication" .) }} + {{- if contains "plain" .Values.auth.sasl.mechanisms }} + kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism PLAIN + kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- else if contains "scram-sha-256" .Values.auth.sasl.mechanisms }} + kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism SCRAM-SHA-256 + kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- else if contains "scram-sha-512" .Values.auth.sasl.mechanisms }} + kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism SCRAM-SHA-512 + kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" {{- end }} - --topic {{ $topic.name }} + {{- end }} + fi + + echo "Running pre-provisioning script if any given" + {{ .Values.provisioning.preScript | nindent 14 }} + + kafka_provisioning_commands=( + {{- range $topic := .Values.provisioning.topics }} + "/opt/bitnami/kafka/bin/kafka-topics.sh \ + --create \ + --if-not-exists \ + --bootstrap-server ${KAFKA_SERVICE} \ + --replication-factor {{ $topic.replicationFactor | default $.Values.provisioning.replicationFactor }} \ + --partitions {{ $topic.partitions | default $.Values.provisioning.numPartitions }} \ + {{- range $name, $value := $topic.config }} + --config {{ $name }}={{ $value }} \ + {{- end }} + --command-config ${CLIENT_CONF} \ + --topic {{ $topic.name }}" {{- end }} + {{- range $command := .Values.provisioning.extraProvisioningCommands }} + {{- $command | quote | nindent 16 }} + {{- end }} + ) + + echo "Starting provisioning" + for ((index=0; index < ${#kafka_provisioning_commands[@]}; index+={{ .Values.provisioning.parallel }})) + do + for j in $(seq ${index} $((${index}+{{ .Values.provisioning.parallel }}-1))) + do + ${kafka_provisioning_commands[j]} & # Async command + done + wait # Wait the end of the jobs + done + + echo "Running post-provisioning script if any given" + {{ .Values.provisioning.postScript | nindent 14 }} + echo "Provisioning succeeded" - {{- end }} {{- end }} env: - name: BITNAMI_DEBUG value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if (include "kafka.client.tlsEncryption" .) }} + - name: KAFKA_CLIENT_KEY_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.keyPasswordSecretKey }} + - name: KAFKA_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.keystorePasswordSecretKey }} + - name: KAFKA_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.truststorePasswordSecretKey }} + {{- end }} + - name: KAFKA_SERVICE + value: {{ printf "%s:%d" (include "common.names.fullname" .) (.Values.service.ports.client | int64) }} + {{- if (include "kafka.client.saslAuthentication" .) }} + {{- $clientUsers := .Values.auth.sasl.jaas.clientUsers }} + - name: SASL_USERNAME + value: {{ index $clientUsers 0 | quote }} + - name: SASL_USER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: system-user-password + {{- end }} + {{- if .Values.provisioning.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.provisioning.extraEnvVarsCM .Values.provisioning.extraEnvVarsSecret }} + envFrom: + {{- if .Values.provisioning.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.provisioning.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.provisioning.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.provisioning.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} {{- if .Values.provisioning.resources }} resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} {{- end }} volumeMounts: - {{- if or .Values.config .Values.existingConfigmap }} - - name: kafka-config - mountPath: {{ .Values.persistence.mountPath }}/config/server.properties - subPath: server.properties - {{- end }} {{- if or .Values.log4j .Values.existingLog4jConfigMap }} - name: log4j-config mountPath: {{ .Values.persistence.mountPath }}/config/log4j.properties subPath: log4j.properties {{- end }} - {{- if (include "kafka.tlsEncryption" .) }} - {{- if not (empty .Values.auth.tls.existingSecrets) }} - {{- range $index := .Values.auth.tls.existingSecrets }} - - name: kafka-certs-{{ $index }} - mountPath: /certs-{{ $index }} - readOnly: true - {{- end }} - {{- else if or (not (empty (coalesce .Values.auth.tls.existingSecret .Values.auth.jksSecret))) (.Files.Glob "files/tls/*.{crt,pem}") }} - - name: kafka-certs + {{- if (include "kafka.client.tlsEncryption" .) }} + {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }} + - name: kafka-client-certs mountPath: /certs readOnly: true - {{- else if .Values.auth.tls.autoGenerated }} - {{- range $index := until $replicaCount }} - - name: kafka-certs-{{ $index }} - mountPath: /certs-{{ $index }} - readOnly: true {{- end }} {{- end }} + {{- if .Values.provisioning.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumeMounts "context" $) | nindent 12 }} {{- end }} - volumes: - {{- if or .Values.config .Values.existingConfigmap }} - - name: kafka-config - configMap: - name: {{ include "kafka.configmapName" . }} + {{- if .Values.provisioning.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.sidecars "context" $) | nindent 8 }} {{- end }} + volumes: {{- if or .Values.log4j .Values.existingLog4jConfigMap }} - name: log4j-config configMap: name: {{ include "kafka.log4j.configMapName" . }} {{ end }} - {{- if (include "kafka.tlsEncryption" .) }} - {{- if not (empty .Values.auth.tls.existingSecrets) }} - {{- range $index, $secret := .Values.auth.tls.existingSecrets }} - - name: kafka-certs-{{ $index }} - secret: - secretName: {{ tpl $secret $ }} - defaultMode: 256 - {{- end }} - {{- else if or (not (empty (coalesce .Values.auth.tls.existingSecret .Values.auth.jksSecret))) (.Files.Glob "files/tls/*.{crt,pem}") }} - - name: kafka-certs - secret: - secretName: {{ if not (empty (coalesce .Values.auth.tls.existingSecret .Values.auth.jksSecret)) }}{{ tpl (coalesce .Values.auth.tls.existingSecret .Values.auth.jksSecret) . }}{{ else }}{{ printf "%s-tls" (include "kafka.fullname" .) }}{{ end }} - defaultMode: 256 - {{- else if .Values.auth.tls.autoGenerated }} - {{- range $index := until $replicaCount }} - - name: kafka-certs-{{ $index }} + {{- if (include "kafka.client.tlsEncryption" .) }} + {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }} + - name: kafka-client-certs secret: - secretName: {{ printf "%s-%d-tls" (include "kafka.fullname" $) $index }} + secretName: {{ .Values.provisioning.auth.tls.certificatesSecret }} defaultMode: 256 {{- end }} {{- end }} + {{- if .Values.provisioning.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumes "context" $) | nindent 8 }} {{- end }} {{- end }} diff --git a/charts/kafka/templates/log4j-configmap.yaml b/charts/kafka/templates/log4j-configmap.yaml index 90eccde..8f7bc6c 100644 --- a/charts/kafka/templates/log4j-configmap.yaml +++ b/charts/kafka/templates/log4j-configmap.yaml @@ -3,6 +3,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: {{ include "kafka.log4j.configMapName" . }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} {{- if .Values.commonLabels }} {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} diff --git a/charts/kafka/templates/networkpolicy-egress.yaml b/charts/kafka/templates/networkpolicy-egress.yaml index 16f66a1..068024a 100644 --- a/charts/kafka/templates/networkpolicy-egress.yaml +++ b/charts/kafka/templates/networkpolicy-egress.yaml @@ -3,6 +3,7 @@ kind: NetworkPolicy apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} metadata: name: {{ printf "%s-egress" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} {{- if .Values.commonLabels }} {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} @@ -10,7 +11,6 @@ metadata: {{- if .Values.commonAnnotations }} annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} - namespace: {{ .Release.Namespace }} spec: podSelector: matchLabels: diff --git a/charts/kafka/templates/networkpolicy-ingress.yaml b/charts/kafka/templates/networkpolicy-ingress.yaml index 56e9417..258dcab 100644 --- a/charts/kafka/templates/networkpolicy-ingress.yaml +++ b/charts/kafka/templates/networkpolicy-ingress.yaml @@ -3,6 +3,7 @@ kind: NetworkPolicy apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} metadata: name: {{ printf "%s-ingress" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} {{- if .Values.commonLabels }} {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} @@ -10,7 +11,6 @@ metadata: {{- if .Values.commonAnnotations }} annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} - namespace: {{ .Release.Namespace }} spec: podSelector: matchLabels: @@ -20,20 +20,19 @@ spec: ingress: # Allow client connections - ports: - - port: {{ .Values.service.port }} + - port: {{ .Values.containerPorts.client }} {{- if not .Values.networkPolicy.allowExternal }} from: - podSelector: matchLabels: {{ template "common.names.fullname" . }}-client: "true" {{- if .Values.networkPolicy.explicitNamespacesSelector }} - namespaceSelector: -{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + namespaceSelector: {{- toYaml .Values.networkPolicy.explicitNamespacesSelector | nindent 12 }} {{- end }} {{- end }} # Allow communication inter-broker - ports: - - port: {{ .Values.service.internalPort }} + - port: {{ .Values.containerPorts.internal }} from: - podSelector: matchLabels: @@ -41,7 +40,7 @@ spec: # Allow External connection {{- if .Values.externalAccess.enabled }} - ports: - - port: {{ .Values.service.externalPort }} + - port: {{ .Values.containerPorts.external }} {{- if .Values.externalAccess.from }} from: {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.externalAccess.from "context" $ ) | nindent 8 }} {{- end }} @@ -49,6 +48,6 @@ spec: {{- if .Values.metrics.kafka.enabled }} # Allow prometheus scrapes - ports: - - port: {{ .Values.metrics.kafka.service.port }} + - port: {{ .Values.metrics.kafka.containerPorts.metrics }} {{- end }} {{- end }} diff --git a/charts/kafka/templates/poddisruptionbudget.yaml b/charts/kafka/templates/poddisruptionbudget.yaml index 972f82a..e0a6015 100644 --- a/charts/kafka/templates/poddisruptionbudget.yaml +++ b/charts/kafka/templates/poddisruptionbudget.yaml @@ -3,7 +3,8 @@ apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} kind: PodDisruptionBudget metadata: - name: {{ template "kafka.fullname" . }} + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: kafka {{- if .Values.commonLabels }} diff --git a/charts/kafka/templates/role.yaml b/charts/kafka/templates/role.yaml index 08e2784..63215b3 100644 --- a/charts/kafka/templates/role.yaml +++ b/charts/kafka/templates/role.yaml @@ -2,7 +2,8 @@ apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} kind: Role metadata: - name: {{ template "kafka.fullname" . }} + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: kafka {{- if .Values.commonLabels }} diff --git a/charts/kafka/templates/rolebinding.yaml b/charts/kafka/templates/rolebinding.yaml index 7c69df0..5b6ae26 100644 --- a/charts/kafka/templates/rolebinding.yaml +++ b/charts/kafka/templates/rolebinding.yaml @@ -2,7 +2,8 @@ apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} kind: RoleBinding metadata: - name: {{ template "kafka.fullname" . }} + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: kafka {{- if .Values.commonLabels }} @@ -13,7 +14,7 @@ metadata: {{- end }} roleRef: kind: Role - name: {{ template "kafka.fullname" . }} + name: {{ include "common.names.fullname" . }} apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount diff --git a/charts/kafka/templates/scripts-configmap.yaml b/charts/kafka/templates/scripts-configmap.yaml index fad9595..1fc3ee0 100644 --- a/charts/kafka/templates/scripts-configmap.yaml +++ b/charts/kafka/templates/scripts-configmap.yaml @@ -1,7 +1,8 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ template "kafka.fullname" . }}-scripts + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} {{- if .Values.commonLabels }} {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} @@ -10,14 +11,14 @@ metadata: annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} data: - {{- $fullname := include "kafka.fullname" . }} + {{- $fullname := include "common.names.fullname" . }} {{- $releaseNamespace := .Release.Namespace }} {{- $clusterDomain := .Values.clusterDomain }} - {{- $interBrokerPort := .Values.service.internalPort }} - {{- $clientPort := .Values.service.port }} - {{- $jksTruststoreSecret := coalesce .Values.auth.tls.jksTruststoreSecret .Values.auth.jksTruststoreSecret -}} - {{- $jksTruststore := coalesce .Values.auth.tls.jksTruststore .Values.auth.jksTruststore -}} - {{- $jksKeystoreSAN := coalesce .Values.auth.tls.jksKeystoreSAN .Values.auth.jksKeystoreSAN -}} + {{- $interBrokerPort := .Values.service.ports.internal }} + {{- $clientPort := .Values.service.ports.client }} + {{- $jksTruststoreSecret := .Values.auth.tls.jksTruststoreSecret -}} + {{- $jksTruststore := .Values.auth.tls.jksTruststore -}} + {{- $jksKeystoreSAN := .Values.auth.tls.jksKeystoreSAN -}} {{- if .Values.externalAccess.autoDiscovery.enabled }} auto-discovery.sh: |- #!/bin/bash @@ -85,20 +86,20 @@ data: # Configure external ip and port {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} {{- if .Values.externalAccess.autoDiscovery.enabled }} - export EXTERNAL_ACCESS_IP="$(<${SHARED_FILE})" + export EXTERNAL_ACCESS_HOST="$(<${SHARED_FILE})" {{- else }} - export EXTERNAL_ACCESS_IP=$(echo '{{ .Values.externalAccess.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + export EXTERNAL_ACCESS_HOST=$(echo '{{ .Values.externalAccess.service.loadBalancerNames | default .Values.externalAccess.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") {{- end }} - export EXTERNAL_ACCESS_PORT={{ .Values.externalAccess.service.port }} + export EXTERNAL_ACCESS_PORT={{ .Values.externalAccess.service.ports.external }} {{- else if eq .Values.externalAccess.service.type "NodePort" }} {{- if and .Values.externalAccess.service.usePodIPs .Values.externalAccess.autoDiscovery.enabled }} - export EXTERNAL_ACCESS_IP="${MY_POD_IP}" + export EXTERNAL_ACCESS_HOST="${MY_POD_IP}" {{- else if or .Values.externalAccess.service.useHostIPs .Values.externalAccess.autoDiscovery.enabled }} - export EXTERNAL_ACCESS_IP="${HOST_IP}" + export EXTERNAL_ACCESS_HOST="${HOST_IP}" {{- else if .Values.externalAccess.service.domain }} - export EXTERNAL_ACCESS_IP={{ .Values.externalAccess.service.domain }} + export EXTERNAL_ACCESS_HOST={{ .Values.externalAccess.service.domain }} {{- else }} - export EXTERNAL_ACCESS_IP=$(curl -s https://ipinfo.io/ip) + export EXTERNAL_ACCESS_HOST=$(curl -s https://ipinfo.io/ip) {{- end }} {{- if .Values.externalAccess.autoDiscovery.enabled }} export EXTERNAL_ACCESS_PORT="$(<${SHARED_FILE})" @@ -111,7 +112,7 @@ data: {{- if .Values.advertisedListeners }} export KAFKA_CFG_ADVERTISED_LISTENERS={{ join "," .Values.advertisedListeners }} {{- else }} - export KAFKA_CFG_ADVERTISED_LISTENERS="INTERNAL://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }},EXTERNAL://${EXTERNAL_ACCESS_IP}:${EXTERNAL_ACCESS_PORT}" + export KAFKA_CFG_ADVERTISED_LISTENERS="INTERNAL://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }},EXTERNAL://${EXTERNAL_ACCESS_HOST}:${EXTERNAL_ACCESS_PORT}" {{- end }} {{- end }} @@ -120,7 +121,7 @@ data: {{- if eq .Values.auth.tls.type "jks" }} {{- if not (empty .Values.auth.tls.existingSecrets) }} JKS_TRUSTSTORE={{ printf "/%s/%s" (ternary "certs-${ID}" "truststore" (empty $jksTruststoreSecret)) (default "kafka.truststore.jks" $jksTruststore) | quote }} - JKS_KEYSTORE="/certs-${ID}/kafka-keystore.jks" + JKS_KEYSTORE={{ printf "/certs-${ID}/%s" (default "kafka.keystore.jks" $jksKeystoreSAN) | quote }} {{- else }} JKS_TRUSTSTORE={{ printf "/%s/%s" (ternary "certs" "truststore" (empty $jksTruststoreSecret)) (default "kafka.truststore.jks" $jksTruststore) | quote }} JKS_KEYSTORE={{ printf "/certs/%s" (default "kafka-${ID}.keystore.jks" $jksKeystoreSAN) | quote }} @@ -135,7 +136,8 @@ data: export KAFKA_TLS_TRUSTSTORE_FILE="/opt/bitnami/kafka/config/certs/kafka.truststore.jks" {{- else if eq .Values.auth.tls.type "pem" }} - {{- if or (not (empty .Values.auth.tls.existingSecrets)) (and .Values.auth.tls.autoGenerated (empty (coalesce .Values.auth.tls.existingSecret .Values.auth.jksSecret)) (not (.Files.Glob "files/tls/*.{crt,pem}"))) }} + + {{- if or (not (empty .Values.auth.tls.existingSecrets)) .Values.auth.tls.autoGenerated }} PEM_CA="/certs-${ID}/ca.crt" PEM_CERT="/certs-${ID}/tls.crt" PEM_KEY="/certs-${ID}/tls.key" @@ -144,9 +146,29 @@ data: PEM_CERT="/certs/kafka-${ID}.keystore.pem" PEM_KEY="/certs/kafka-${ID}.keystore.key" {{- end }} - if [[ -f "$PEM_CA" ]] && [[ -f "$PEM_CERT" ]] && [[ -f "$PEM_KEY" ]]; then - cp "$PEM_CA" "/opt/bitnami/kafka/config/certs/kafka.truststore.pem" - cp "$PEM_CERT" "/opt/bitnami/kafka/config/certs/kafka.keystore.pem" + if [[ -f "$PEM_CERT" ]] && [[ -f "$PEM_KEY" ]]; then + CERT_DIR="/opt/bitnami/kafka/config/certs" + PEM_CA_LOCATION="${CERT_DIR}/kafka.truststore.pem" + PEM_CERT_LOCATION="${CERT_DIR}/kafka.keystore.pem" + {{- if .Values.auth.tls.pemChainIncluded }} + cat $PEM_CERT | csplit - -s -z '/\-*END CERTIFICATE\-*/+1' '{*}' -f ${CERT_DIR}/xx + FIND_CA_RESULT=$(find ${CERT_DIR} -not -name 'xx00' -name 'xx*') + if [[ $(echo $FIND_CA_RESULT | wc -l) < 1 ]]; then + echo "auth.tls.pemChainIncluded was set, but PEM chain only contained 1 cert" + exit 1 + fi + echo $FIND_CA_RESULT | sort | xargs cat >> "$PEM_CA_LOCATION" + cat ${CERT_DIR}/xx00 > "$PEM_CERT_LOCATION" + {{- else }} + if [[ -f "$PEM_CA" ]]; then + cp "$PEM_CA" "$PEM_CA_LOCATION" + cp "$PEM_CERT" "$PEM_CERT_LOCATION" + else + echo "PEM_CA not provided, and auth.tls.pemChainIncluded was not true. One of these values must be set when using PEM type for TLS." + exit 1 + fi + {{- end }} + # Ensure the key used PEM format with PKCS#8 openssl pkcs8 -topk8 -nocrypt -in "$PEM_KEY" > "/opt/bitnami/kafka/config/certs/kafka.keystore.key" else @@ -157,4 +179,17 @@ data: {{- end }} {{- end }} + # Configure zookeeper client + {{- if and (not (empty .Values.auth.zookeeper.tls.existingSecret)) .Values.auth.zookeeper.tls.enabled }} + JKS_TRUSTSTORE={{ printf "/kafka-zookeeper-cert/%s" (.Values.auth.zookeeper.tls.existingSecretTruststoreKey) | quote }} + JKS_KEYSTORE={{ printf "/kafka-zookeeper-cert/%s" (.Values.auth.zookeeper.tls.existingSecretKeystoreKey) | quote }} + if [[ -f "$JKS_TRUSTSTORE" ]] && [[ -f "$JKS_KEYSTORE" ]]; then + CERT_DIR="/opt/bitnami/kafka/config/certs" + TRUSTSTORE_LOCATION="${CERT_DIR}/zookeeper.truststore.jks" + cp "$JKS_TRUSTSTORE" "$TRUSTSTORE_LOCATION" + cp "$JKS_KEYSTORE" "${CERT_DIR}/zookeeper.keystore.jks" + export KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_FILE="${TRUSTSTORE_LOCATION}" + fi + {{- end }} + exec /entrypoint.sh /run.sh diff --git a/charts/kafka/templates/serviceaccount.yaml b/charts/kafka/templates/serviceaccount.yaml index 14dc4fa..73091f5 100644 --- a/charts/kafka/templates/serviceaccount.yaml +++ b/charts/kafka/templates/serviceaccount.yaml @@ -3,13 +3,18 @@ apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "kafka.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: kafka {{- if .Values.commonLabels }} {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} {{- end }} diff --git a/charts/kafka/templates/servicemonitor-jmx-metrics.yaml b/charts/kafka/templates/servicemonitor-jmx-metrics.yaml index 53ede60..eae8e70 100644 --- a/charts/kafka/templates/servicemonitor-jmx-metrics.yaml +++ b/charts/kafka/templates/servicemonitor-jmx-metrics.yaml @@ -2,12 +2,17 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: - name: {{ template "kafka.fullname" . }}-jmx-metrics + name: {{ printf "%s-jmx-metrics" (include "common.names.fullname" .) }} {{- if .Values.metrics.serviceMonitor.namespace }} namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} {{- end }} labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: kafka + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} {{- if .Values.commonLabels }} {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} @@ -15,12 +20,15 @@ metadata: annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} selector: matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} {{- if .Values.metrics.serviceMonitor.selector }} {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} {{- end }} - app.kubernetes.io/component: kafka + app.kubernetes.io/component: metrics endpoints: - port: http-metrics path: "/" @@ -31,10 +39,13 @@ spec: scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} {{- end }} {{- if .Values.metrics.serviceMonitor.relabelings }} - relabelings: {{- toYaml .Values.metrics.serviceMonitor.relabelings | nindent 6 }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} {{- end }} {{- if .Values.metrics.serviceMonitor.metricRelabelings }} - metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 6 }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} {{- end }} namespaceSelector: matchNames: diff --git a/charts/kafka/templates/servicemonitor-metrics.yaml b/charts/kafka/templates/servicemonitor-metrics.yaml index e5dddb4..1e428c6 100644 --- a/charts/kafka/templates/servicemonitor-metrics.yaml +++ b/charts/kafka/templates/servicemonitor-metrics.yaml @@ -2,12 +2,17 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: - name: {{ template "kafka.fullname" . }}-metrics + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} {{- if .Values.metrics.serviceMonitor.namespace }} namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} {{- end }} labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} {{- if .Values.commonLabels }} {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} @@ -15,6 +20,9 @@ metadata: annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} selector: matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} {{- if .Values.metrics.serviceMonitor.selector }} @@ -31,10 +39,13 @@ spec: scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} {{- end }} {{- if .Values.metrics.serviceMonitor.relabelings }} - relabelings: {{- toYaml .Values.metrics.serviceMonitor.relabelings | nindent 6 }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} {{- end }} {{- if .Values.metrics.serviceMonitor.metricRelabelings }} - metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 6 }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} {{- end }} namespaceSelector: matchNames: diff --git a/charts/kafka/templates/statefulset.yaml b/charts/kafka/templates/statefulset.yaml index e8b13f4..4affb5d 100644 --- a/charts/kafka/templates/statefulset.yaml +++ b/charts/kafka/templates/statefulset.yaml @@ -1,17 +1,17 @@ {{- $replicaCount := int .Values.replicaCount }} -{{- $fullname := include "kafka.fullname" . }} +{{- $fullname := include "common.names.fullname" . }} {{- $releaseNamespace := .Release.Namespace }} {{- $clusterDomain := .Values.clusterDomain }} -{{- $interBrokerPort := .Values.service.internalPort }} -{{- $clientPort := .Values.service.port }} +{{- $interBrokerPort := .Values.service.ports.internal }} +{{- $clientPort := .Values.service.ports.client }} {{- $interBrokerProtocol := include "kafka.listenerType" (dict "protocol" .Values.auth.interBrokerProtocol) -}} {{- $clientProtocol := include "kafka.listenerType" (dict "protocol" .Values.auth.clientProtocol) -}} -{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs }} -{{- if not (and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer")) }} +{{- $externalClientProtocol := include "kafka.listenerType" (dict "protocol" (include "kafka.externalClientProtocol" . )) -}} apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} kind: StatefulSet metadata: - name: {{ include "kafka.fullname" . }} + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: kafka {{- if .Values.commonLabels }} @@ -26,15 +26,8 @@ spec: selector: matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} app.kubernetes.io/component: kafka - serviceName: {{ template "kafka.fullname" . }}-headless - updateStrategy: - type: {{ .Values.updateStrategy | quote }} - {{- if (eq "OnDelete" .Values.updateStrategy) }} - rollingUpdate: null - {{- else if .Values.rollingUpdatePartition }} - rollingUpdate: - partition: {{ .Values.rollingUpdatePartition }} - {{- end }} + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + updateStrategy: {{- include "common.tplvalues.render" (dict "value" .Values.updateStrategy "context" $ ) | nindent 4 }} template: metadata: labels: {{- include "common.labels.standard" . | nindent 8 }} @@ -42,7 +35,6 @@ spec: {{- if .Values.podLabels }} {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} {{- end }} - {{- if or (include "kafka.createConfigmap" .) (include "kafka.createJaasSecret" .) .Values.externalAccess.enabled (include "kafka.metrics.jmx.createConfigmap" .) .Values.podAnnotations }} annotations: {{- if (include "kafka.createConfigmap" .) }} checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} @@ -51,7 +43,7 @@ spec: checksum/jaas-secret: {{ include (print $.Template.BasePath "/jaas-secret.yaml") . | sha256sum }} {{- end }} {{- if (include "kafka.createTlsSecret" .) }} - checksum/tls-secret: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + checksum/tls-secret: {{ include (print $.Template.BasePath "/tls-secrets.yaml") . | sha256sum }} {{- end }} {{- if .Values.externalAccess.enabled }} checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} @@ -62,12 +54,13 @@ spec: {{- if .Values.podAnnotations }} {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} {{- end }} - {{- end }} spec: {{- include "kafka.imagePullSecrets" . | nindent 6 }} {{- if .Values.hostAliases }} hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} {{- end }} + hostNetwork: {{ .Values.hostNetwork }} + hostIPC: {{ .Values.hostIPC }} {{- if .Values.schedulerName }} schedulerName: {{ .Values.schedulerName | quote }} {{- end }} @@ -86,7 +79,7 @@ spec: tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} {{- end }} {{- if .Values.topologySpreadConstraints }} - topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" $) | nindent 8 }} {{- end }} {{- if .Values.terminationGracePeriodSeconds }} terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} @@ -97,7 +90,7 @@ spec: {{- if .Values.podSecurityContext.enabled }} securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} {{- end }} - serviceAccountName: {{ template "kafka.serviceAccountName" . }} + serviceAccountName: {{ include "kafka.serviceAccountName" . }} {{- if or (and .Values.volumePermissions.enabled .Values.persistence.enabled) (and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled) .Values.initContainers }} initContainers: {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} @@ -109,12 +102,14 @@ spec: args: - -ec - | - chown -R "{{ .Values.podSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "{{ .Values.persistence.mountPath }}" - chown -R "{{ .Values.podSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "{{ .Values.logPersistence.mountPath }}" - {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} - securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + mkdir -p "{{ .Values.persistence.mountPath }}" "{{ .Values.logPersistence.mountPath }}" + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} "{{ .Values.persistence.mountPath }}" "{{ .Values.logPersistence.mountPath }}" + find "{{ .Values.persistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + find "{{ .Values.logPersistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} {{- else }} - securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} {{- end }} {{- if .Values.volumePermissions.resources }} resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} @@ -186,7 +181,7 @@ spec: {{- if .Values.zookeeper.enabled }} value: {{ printf "%s%s" (include "kafka.zookeeper.fullname" .) (tpl .Values.zookeeperChrootPath .) | quote }} {{- else }} - value: {{ include "common.tplvalues.render" (dict "value" (join "," .Values.externalZookeeper.servers) "context" $) }} + value: {{ include "common.tplvalues.render" (dict "value" (printf "%s%s" (join "," .Values.externalZookeeper.servers) (tpl .Values.zookeeperChrootPath .)) "context" $) }} {{- end }} - name: KAFKA_INTER_BROKER_LISTENER_NAME value: {{ .Values.interBrokerListenerName | quote }} @@ -194,15 +189,15 @@ spec: {{- if .Values.listenerSecurityProtocolMap }} value: {{ .Values.listenerSecurityProtocolMap | quote }} {{- else if .Values.externalAccess.enabled }} - value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }},EXTERNAL:{{ $clientProtocol }}" + value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }},EXTERNAL:{{ $externalClientProtocol }}" {{- else }} value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }}" {{- end }} - {{- if or ($clientProtocol | regexFind "SASL") ($interBrokerProtocol | regexFind "SASL") (coalesce .Values.auth.sasl.jaas.zookeeperUser .Values.auth.jaas.zookeeperUser) }} + {{- if or ($clientProtocol | regexFind "SASL") ($externalClientProtocol | regexFind "SASL") ($interBrokerProtocol | regexFind "SASL") .Values.auth.sasl.jaas.zookeeperUser }} - name: KAFKA_CFG_SASL_ENABLED_MECHANISMS - value: {{ upper (coalesce .Values.auth.sasl.mechanisms .Values.auth.saslMechanisms) | quote }} + value: {{ upper .Values.auth.sasl.mechanisms | quote }} - name: KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL - value: {{ upper (coalesce .Values.auth.sasl.interBrokerMechanism .Values.auth.saslInterBrokerMechanism) | quote }} + value: {{ upper .Values.auth.sasl.interBrokerMechanism | quote }} {{- end }} - name: KAFKA_CFG_LISTENERS {{- if .Values.listeners }} @@ -238,7 +233,7 @@ spec: value: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf" {{- if (include "kafka.client.saslAuthentication" .) }} - name: KAFKA_CLIENT_USERS - value: {{ join "," (coalesce .Values.auth.sasl.jaas.clientUsers .Values.auth.jaas.clientUsers) | quote }} + value: {{ join "," .Values.auth.sasl.jaas.clientUsers | quote }} - name: KAFKA_CLIENT_PASSWORDS valueFrom: secretKeyRef: @@ -247,7 +242,7 @@ spec: {{- end }} {{- if (include "kafka.interBroker.saslAuthentication" .) }} - name: KAFKA_INTER_BROKER_USER - value: {{ coalesce .Values.auth.sasl.jaas.interBrokerUser .Values.auth.jaas.interBrokerUser | quote }} + value: {{ .Values.auth.sasl.jaas.interBrokerUser | quote }} - name: KAFKA_INTER_BROKER_PASSWORD valueFrom: secretKeyRef: @@ -255,27 +250,51 @@ spec: key: inter-broker-password {{- end }} {{- end }} - {{- if and .Values.zookeeper.auth.enabled (coalesce .Values.auth.sasl.jaas.zookeeperUser .Values.auth.jaas.zookeeperUser) }} - - name: KAFKA_ZOOKEEPER_PROTOCOL - value: "SASL" + {{- if and .Values.zookeeper.auth.enabled .Values.auth.sasl.jaas.zookeeperUser }} - name: KAFKA_ZOOKEEPER_USER - value: {{ coalesce .Values.auth.sasl.jaas.zookeeperUser .Values.auth.jaas.zookeeperUser | quote }} + value: {{ .Values.auth.sasl.jaas.zookeeperUser | quote }} - name: KAFKA_ZOOKEEPER_PASSWORD valueFrom: secretKeyRef: name: {{ include "kafka.jaasSecretName" . }} key: zookeeper-password {{- end }} + - name: KAFKA_ZOOKEEPER_PROTOCOL + value: {{ include "kafka.zookeeper.protocol" . }} + {{- if .Values.auth.zookeeper.tls.enabled }} + - name: KAFKA_ZOOKEEPER_TLS_TYPE + value: {{ upper .Values.auth.zookeeper.tls.type | quote }} + - name: KAFKA_ZOOKEEPER_TLS_VERIFY_HOSTNAME + value: {{ .Values.auth.zookeeper.tls.verifyHostname | quote }} + {{- if .Values.auth.zookeeper.tls.passwordsSecret }} + - name: KAFKA_ZOOKEEPER_TLS_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.auth.zookeeper.tls.passwordsSecret }} + key: {{ .Values.auth.zookeeper.tls.passwordsSecretKeystoreKey | quote }} + - name: KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.auth.zookeeper.tls.passwordsSecret }} + key: {{ .Values.auth.zookeeper.tls.passwordsSecretTruststoreKey | quote }} + {{- end }} + {{- end }} {{- if (include "kafka.tlsEncryption" .) }} - name: KAFKA_TLS_TYPE value: {{ upper .Values.auth.tls.type | quote }} - name: KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM - value: {{ default "" (coalesce .Values.auth.tls.endpointIdentificationAlgorithm .Values.auth.tlsEndpointIdentificationAlgorithm) | quote }} + value: {{ default "" .Values.auth.tls.endpointIdentificationAlgorithm | quote }} - name: KAFKA_TLS_CLIENT_AUTH - value: {{ ternary "required" "none" (eq .Values.auth.clientProtocol "mtls") | quote }} - {{- $tlsPassword := coalesce .Values.auth.tls.password .Values.auth.jksPassword }} + value: {{ ternary "required" "none" (or (eq (include "kafka.externalClientProtocol" . ) "mtls") (eq .Values.auth.clientProtocol "mtls")) | quote }} - name: KAFKA_CERTIFICATE_PASSWORD - value: {{ default "" $tlsPassword | quote }} + {{- if .Values.auth.tls.existingSecret }} + valueFrom: + secretKeyRef: + name: {{ .Values.auth.tls.existingSecret }} + key: password + {{- else }} + value: {{ default "" .Values.auth.tls.password | quote }} + {{- end }} {{- end }} {{- if .Values.metrics.jmx.enabled }} - name: JMX_PORT @@ -338,42 +357,53 @@ spec: - name: KAFKA_CFG_SUPER_USERS value: {{ .Values.superUsers | quote }} {{- if .Values.extraEnvVars }} - {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} {{- end }} + {{- end }} ports: - name: kafka-client - containerPort: 9092 + containerPort: {{ .Values.containerPorts.client }} - name: kafka-internal - containerPort: {{ $interBrokerPort }} + containerPort: {{ .Values.containerPorts.internal }} {{- if .Values.externalAccess.enabled }} - name: kafka-external - containerPort: 9094 + containerPort: {{ .Values.containerPorts.external }} {{- end }} {{- if not .Values.diagnosticMode.enabled }} {{- if .Values.livenessProbe.enabled }} - livenessProbe: + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled") "context" $) | nindent 12 }} tcpSocket: port: kafka-client - initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} - timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} - failureThreshold: {{ .Values.livenessProbe.failureThreshold }} - periodSeconds: {{ .Values.livenessProbe.periodSeconds }} - successThreshold: {{ .Values.livenessProbe.successThreshold }} {{- else if .Values.customLivenessProbe }} livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} {{- end }} {{- if .Values.readinessProbe.enabled }} - readinessProbe: + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled") "context" $) | nindent 12 }} tcpSocket: port: kafka-client - initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} - timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} - failureThreshold: {{ .Values.readinessProbe.failureThreshold }} - periodSeconds: {{ .Values.readinessProbe.periodSeconds }} - successThreshold: {{ .Values.readinessProbe.successThreshold }} {{- else if .Values.customReadinessProbe }} readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} {{- end }} + {{- if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: kafka-client + {{- else if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} {{- end }} {{- if .Values.resources }} resources: {{- toYaml .Values.resources | nindent 12 }} @@ -407,10 +437,6 @@ spec: mountPath: /certs-{{ $index }} readOnly: true {{- end }} - {{- else if or (not (empty (coalesce .Values.auth.tls.existingSecret .Values.auth.jksSecret))) (.Files.Glob "files/tls/*.{crt,pem}") }} - - name: kafka-certs - mountPath: /certs - readOnly: true {{- else if .Values.auth.tls.autoGenerated }} {{- range $index := until $replicaCount }} - name: kafka-certs-{{ $index }} @@ -418,7 +444,12 @@ spec: readOnly: true {{- end }} {{- end }} - {{- if (coalesce .Values.auth.tls.jksTruststoreSecret .Values.auth.jksTruststoreSecret) }} + {{- if and .Values.auth.zookeeper.tls.enabled .Values.auth.zookeeper.tls.existingSecret }} + - name: kafka-zookeeper-cert + mountPath: /kafka-zookeeper-cert + readOnly: true + {{- end }} + {{- if .Values.auth.tls.jksTruststoreSecret }} - name: kafka-truststore mountPath: /truststore readOnly: true @@ -429,7 +460,7 @@ spec: {{- end }} {{- if .Values.metrics.jmx.enabled }} - name: jmx-exporter - image: {{ template "kafka.metrics.jmx.image" . }} + image: {{ include "kafka.metrics.jmx.image" . }} imagePullPolicy: {{ .Values.metrics.jmx.image.pullPolicy | quote }} {{- if .Values.metrics.jmx.containerSecurityContext.enabled }} securityContext: {{- omit .Values.metrics.jmx.containerSecurityContext "enabled" | toYaml | nindent 12 }} @@ -440,6 +471,7 @@ spec: {{- else }} command: - java + args: - -XX:+UnlockExperimentalVMOptions - -XX:+UseCGroupMemoryLimitForHeap - -XX:MaxRAMFraction=1 @@ -451,7 +483,7 @@ spec: {{- end }} ports: - name: metrics - containerPort: 5556 + containerPort: {{ .Values.metrics.jmx.containerPorts.metrics }} {{- if .Values.metrics.jmx.resources }} resources: {{- toYaml .Values.metrics.jmx.resources | nindent 12 }} {{- end }} @@ -475,7 +507,7 @@ spec: {{ end }} - name: scripts configMap: - name: {{ include "kafka.fullname" . }}-scripts + name: {{ include "common.names.fullname" . }}-scripts defaultMode: 0755 {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} - name: shared @@ -494,23 +526,24 @@ spec: secretName: {{ tpl $secret $ }} defaultMode: 256 {{- end }} - {{- else if or (not (empty (coalesce .Values.auth.tls.existingSecret .Values.auth.jksSecret))) (.Files.Glob "files/tls/*.{crt,pem}") }} - - name: kafka-certs - secret: - secretName: {{ if not (empty (coalesce .Values.auth.tls.existingSecret .Values.auth.jksSecret)) }}{{ tpl (coalesce .Values.auth.tls.existingSecret .Values.auth.jksSecret) . }}{{ else }}{{ printf "%s-tls" (include "kafka.fullname" .) }}{{ end }} - defaultMode: 256 {{- else if .Values.auth.tls.autoGenerated }} {{- range $index := until $replicaCount }} - name: kafka-certs-{{ $index }} secret: - secretName: {{ printf "%s-%d-tls" (include "kafka.fullname" $) $index }} + secretName: {{ printf "%s-%d-tls" (include "common.names.fullname" $) $index }} defaultMode: 256 {{- end }} {{- end }} - {{- if (coalesce .Values.auth.tls.jksTruststoreSecret .Values.auth.jksTruststoreSecret) }} + {{- if and .Values.auth.zookeeper.tls.enabled .Values.auth.zookeeper.tls.existingSecret }} + - name: kafka-zookeeper-cert + secret: + secretName: {{ .Values.auth.zookeeper.tls.existingSecret }} + defaultMode: 256 + {{- end }} + {{- if .Values.auth.tls.jksTruststoreSecret }} - name: kafka-truststore secret: - secretName: {{ coalesce .Values.auth.tls.jksTruststoreSecret .Values.auth.jksTruststoreSecret }} + secretName: {{ .Values.auth.tls.jksTruststoreSecret }} defaultMode: 256 {{- end }} {{- end }} @@ -574,4 +607,3 @@ spec: selector: {{- include "common.tplvalues.render" (dict "value" .Values.logPersistence.selector "context" $) | nindent 10 }} {{- end -}} {{- end }} -{{- end }} diff --git a/charts/kafka/templates/svc-external-access.yaml b/charts/kafka/templates/svc-external-access.yaml index 9169a65..a4a88e8 100644 --- a/charts/kafka/templates/svc-external-access.yaml +++ b/charts/kafka/templates/svc-external-access.yaml @@ -1,26 +1,29 @@ {{- if .Values.externalAccess.enabled }} -{{- $fullName := include "kafka.fullname" . }} +{{- $fullName := include "common.names.fullname" . }} {{- $replicaCount := .Values.replicaCount | int }} {{- $root := . }} {{- range $i, $e := until $replicaCount }} {{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }} {{- $_ := set $ "targetPod" $targetPod }} ---- apiVersion: v1 kind: Service metadata: - name: {{ template "kafka.fullname" $ }}-{{ $i }}-external + name: {{ printf "%s-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + namespace: {{ $root.Release.Namespace | quote }} labels: {{- include "common.labels.standard" $ | nindent 4 }} app.kubernetes.io/component: kafka pod: {{ $targetPod }} {{- if $root.Values.commonLabels }} {{- include "common.tplvalues.render" ( dict "value" $root.Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} - {{- if or $root.Values.externalAccess.service.annotations $root.Values.commonAnnotations }} + {{- if or $root.Values.externalAccess.service.annotations $root.Values.commonAnnotations $root.Values.externalAccess.service.loadBalancerAnnotations }} annotations: + {{- if and (not (empty $root.Values.externalAccess.service.loadBalancerAnnotations)) (eq (len $root.Values.externalAccess.service.loadBalancerAnnotations) $replicaCount) }} + {{ include "common.tplvalues.render" ( dict "value" (index $root.Values.externalAccess.service.loadBalancerAnnotations $i) "context" $) | nindent 4 }} + {{- end }} {{- if $root.Values.externalAccess.service.annotations }} - {{ include "common.tplvalues.render" ( dict "value" $root.Values.externalAccess.service.annotations "context" $) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.externalAccess.service.annotations "context" $) | nindent 4 }} {{- end }} {{- if $root.Values.commonAnnotations }} {{- include "common.tplvalues.render" ( dict "value" $root.Values.commonAnnotations "context" $ ) | nindent 4 }} @@ -29,7 +32,7 @@ metadata: spec: type: {{ $root.Values.externalAccess.service.type }} {{- if eq $root.Values.externalAccess.service.type "LoadBalancer" }} - {{- if not (empty $root.Values.externalAccess.service.loadBalancerIPs) }} + {{- if and (not (empty $root.Values.externalAccess.service.loadBalancerIPs)) (eq (len $root.Values.externalAccess.service.loadBalancerIPs) $replicaCount) }} loadBalancerIP: {{ index $root.Values.externalAccess.service.loadBalancerIPs $i }} {{- end }} {{- if $root.Values.externalAccess.service.loadBalancerSourceRanges }} @@ -38,13 +41,16 @@ spec: {{- end }} ports: - name: tcp-kafka - port: {{ $root.Values.externalAccess.service.port }} + port: {{ $root.Values.externalAccess.service.ports.external }} {{- if not (empty $root.Values.externalAccess.service.nodePorts) }} nodePort: {{ index $root.Values.externalAccess.service.nodePorts $i }} {{- else }} nodePort: null {{- end }} targetPort: kafka-external + {{- if $root.Values.externalAccess.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $root.Values.externalAccess.service.extraPorts "context" $) | nindent 4 }} + {{- end }} selector: {{- include "common.labels.matchLabels" $ | nindent 4 }} app.kubernetes.io/component: kafka statefulset.kubernetes.io/pod-name: {{ $targetPod }} diff --git a/charts/kafka/templates/svc-headless.yaml b/charts/kafka/templates/svc-headless.yaml index ad2303a..72f9960 100644 --- a/charts/kafka/templates/svc-headless.yaml +++ b/charts/kafka/templates/svc-headless.yaml @@ -1,7 +1,8 @@ apiVersion: v1 kind: Service metadata: - name: {{ template "kafka.fullname" . }}-headless + name: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: kafka {{- if .Values.commonLabels }} @@ -15,11 +16,11 @@ spec: clusterIP: None ports: - name: tcp-client - port: {{ .Values.service.port }} + port: {{ .Values.service.ports.client }} protocol: TCP targetPort: kafka-client - name: tcp-internal - port: {{ .Values.service.internalPort }} + port: {{ .Values.service.ports.internal }} protocol: TCP targetPort: kafka-internal selector: {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/charts/kafka/templates/svc.yaml b/charts/kafka/templates/svc.yaml index 70b24dd..97947b7 100644 --- a/charts/kafka/templates/svc.yaml +++ b/charts/kafka/templates/svc.yaml @@ -1,7 +1,8 @@ apiVersion: v1 kind: Service metadata: - name: {{ template "kafka.fullname" . }} + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: kafka {{- if .Values.commonLabels }} @@ -18,17 +19,27 @@ metadata: {{- end }} spec: type: {{ .Values.service.type }} - {{- if eq .Values.service.type "LoadBalancer" }} - {{- if .Values.service.loadBalancerIP }} + {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} loadBalancerIP: {{ .Values.service.loadBalancerIP }} {{- end }} - {{- if .Values.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} {{- end }} ports: - name: tcp-client - port: {{ .Values.service.port }} + port: {{ .Values.service.ports.client }} protocol: TCP targetPort: kafka-client {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }} @@ -38,12 +49,15 @@ spec: {{- end }} {{- if and .Values.externalAccess.enabled (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) }} - name: tcp-external - port: {{ .Values.service.externalPort }} + port: {{ .Values.service.ports.external }} protocol: TCP targetPort: kafka-external {{- if (not (empty .Values.service.nodePorts.external)) }} nodePort: {{ .Values.service.nodePorts.external }} {{- end }} {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} selector: {{- include "common.labels.matchLabels" . | nindent 4 }} app.kubernetes.io/component: kafka diff --git a/charts/kafka/values.yaml b/charts/kafka/values.yaml index 51669d0..8f8646e 100644 --- a/charts/kafka/values.yaml +++ b/charts/kafka/values.yaml @@ -18,10 +18,13 @@ global: ## @section Common parameters -## @param nameOverride String to partially override kafka.fullname +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname ## nameOverride: "" -## @param fullnameOverride String to fully override kafka.fullname +## @param fullnameOverride String to fully override common.names.fullname ## fullnameOverride: "" ## @param clusterDomain Default Kubernetes cluster domain @@ -36,18 +39,17 @@ commonAnnotations: {} ## @param extraDeploy Array of extra objects to deploy with the release ## extraDeploy: [] - -## Enable diagnostic mode in the deployment +## Enable diagnostic mode in the statefulset ## diagnosticMode: ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) ## enabled: false - ## @param diagnosticMode.command Command to override all containers in the deployment + ## @param diagnosticMode.command Command to override all containers in the statefulset ## command: - sleep - ## @param diagnosticMode.args Args to override all containers in the deployment + ## @param diagnosticMode.args Args to override all containers in the statefulset ## args: - infinity @@ -61,20 +63,21 @@ diagnosticMode: ## @param image.tag Kafka image tag (immutable tags are recommended) ## @param image.pullPolicy Kafka image pull policy ## @param image.pullSecrets Specify docker-registry secret names as an array -## @param image.debug Set to true if you would like to see extra information on logs +## @param image.debug Specify if debug values should be set ## image: registry: docker.io repository: bitnami/kafka - tag: 2.8.1-debian-10-r99 + tag: 3.2.0-debian-10-r4 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: + ## e.g: ## pullSecrets: ## - myRegistryKeySecretName ## @@ -82,12 +85,11 @@ image: ## Set to true if you would like to see extra information on logs ## debug: false -## @param config Configuration file for Kafka. Auto-generated based on other parameters when not specified (see [below]( +## @param config Configuration file for Kafka. Auto-generated based on other parameters when not specified ## Specify content for server.properties ## NOTE: This will override any KAFKA_CFG_ environment variables (including those set by the chart) ## The server.properties is auto-generated based on other parameters when this parameter is not specified -## -## Example: +## e.g: ## config: |- ## broker.id=-1 ## listeners=PLAINTEXT://:9092 @@ -115,21 +117,20 @@ image: ## config: "" ## @param existingConfigmap ConfigMap with Kafka Configuration -## NOTE: This will override config AND any KAFKA_CFG_ environment variables. +## NOTE: This will override `config` AND any KAFKA_CFG_ environment variables ## existingConfigmap: "" -## @param log4j An optional log4j.properties file to overwrite the default of the Kafka brokers. -## An optional log4j.properties file to overwrite the default of the Kafka brokers. -## See an example log4j.properties at: -## https://github.com/apache/kafka/blob/trunk/config/log4j.properties +## @param log4j An optional log4j.properties file to overwrite the default of the Kafka brokers +## An optional log4j.properties file to overwrite the default of the Kafka brokers +## ref: https://github.com/apache/kafka/blob/trunk/config/log4j.properties ## log4j: "" -## @param existingLog4jConfigMap The name of an existing ConfigMap containing a log4j.properties file. -## The name of an existing ConfigMap containing a log4j.properties file. -## NOTE: this will override log4j. +## @param existingLog4jConfigMap The name of an existing ConfigMap containing a log4j.properties file +## The name of an existing ConfigMap containing a log4j.properties file +## NOTE: this will override `log4j` ## existingLog4jConfigMap: "" -## @param heapOpts Kafka's Java Heap size +## @param heapOpts Kafka Java Heap size ## heapOpts: -Xmx1024m -Xms1024m ## @param deleteTopicEnable Switch to enable topic deletion or not @@ -195,51 +196,23 @@ socketRequestMaxBytes: _104857600 ## @param socketSendBufferBytes The send buffer (SO_SNDBUF) used by the socket server ## socketSendBufferBytes: 102400 -## @param zookeeperConnectionTimeoutMs Timeout in ms for connecting to Zookeeper +## @param zookeeperConnectionTimeoutMs Timeout in ms for connecting to ZooKeeper ## zookeeperConnectionTimeoutMs: 6000 ## @param zookeeperChrootPath Path which puts data under some path in the global ZooKeeper namespace ## ref: https://kafka.apache.org/documentation/#brokerconfigs_zookeeper.connect ## zookeeperChrootPath: "" -## @param authorizerClassName The Authorizer is configured by setting authorizer.class.name=kafka.security.authorizer.AclAuthorizer in server.properties. +## @param authorizerClassName The Authorizer is configured by setting authorizer.class.name=kafka.security.authorizer.AclAuthorizer in server.properties ## authorizerClassName: "" -## @param allowEveryoneIfNoAclFound By default, if a resource has no associated ACLs, then no one is allowed to access that resource except super users. +## @param allowEveryoneIfNoAclFound By default, if a resource has no associated ACLs, then no one is allowed to access that resource except super users ## allowEveryoneIfNoAclFound: true ## @param superUsers You can add super users in server.properties ## superUsers: User:admin -## @param command Override kafka container command -## -command: - - /scripts/setup.sh -## @param args Override kafka container arguments -## -args: [] -## @param extraEnvVars Extra environment variables to add to kafka pods (see [below]({KEY} -## ref: https://github.com/bitnami/bitnami-docker-kafka#configuration -## Example: -## extraEnvVars: -## - name: KAFKA_CFG_BACKGROUND_THREADS -## value: "10" -## -extraEnvVars: [] -## @param extraVolumes Extra volume(s) to add to Kafka statefulset -## Examples: -## extraVolumes: -## - name: kafka-jaas -## secret: -## secretName: kafka-jaas -extraVolumes: [] -## @param extraVolumeMounts Extra volumeMount(s) to add to Kafka containers -## extraVolumeMounts: -## - name: kafka-jaas -## mountPath: /bitnami/kafka/config/kafka_jaas.conf -## subPath: kafka_jaas.conf -extraVolumeMounts: [] -## Authentication parameteres +## Authentication parameters ## https://github.com/bitnami/bitnami-docker-kafka#security ## auth: @@ -252,14 +225,18 @@ auth: ## | sasl | Yes (via SASL) | No | ## | sasl_tls | Yes (via SASL) | Yes | ## @param auth.clientProtocol Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` + ## @param auth.externalClientProtocol Authentication protocol for communications with external clients. Defaults to value of `auth.clientProtocol`. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` ## @param auth.interBrokerProtocol Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` ## clientProtocol: plaintext + # Note: empty by default for backwards compatibility reasons, find more information at + # https://github.com/bitnami/charts/pull/8902/ + externalClientProtocol: "" interBrokerProtocol: plaintext ## SASL configuration ## sasl: - ## @param auth.sasl.mechanisms SASL mechanisms when either `auth.interBrokerProtocol` or `auth.clientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` + ## @param auth.sasl.mechanisms SASL mechanisms when either `auth.interBrokerProtocol`, `auth.clientProtocol` or `auth.externalClientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` ## mechanisms: plain,scram-sha-256,scram-sha-512 ## @param auth.sasl.interBrokerMechanism SASL mechanism for inter broker communication. @@ -289,10 +266,10 @@ auth: ## @param auth.sasl.jaas.interBrokerPassword Kafka inter broker communication password for SASL authentication ## interBrokerPassword: "" - ## @param auth.sasl.jaas.zookeeperUser Kafka Zookeeper user for SASL authentication + ## @param auth.sasl.jaas.zookeeperUser Kafka ZooKeeper user for SASL authentication ## zookeeperUser: "" - ## @param auth.sasl.jaas.zookeeperPassword Kafka Zookeeper password for SASL authentication + ## @param auth.sasl.jaas.zookeeperPassword Kafka ZooKeeper password for SASL authentication ## zookeeperPassword: "" ## @param auth.sasl.jaas.existingSecret Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser @@ -300,30 +277,16 @@ auth: ## kubectl create secret generic SECRET_NAME --from-literal=client-passwords=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD ## existingSecret: "" - ## @param auth.saslMechanisms DEPRECATED: use `auth.sasl.mechanisms` instead. - ## - saslMechanisms: plain,scram-sha-256,scram-sha-512 - ## @param auth.saslInterBrokerMechanism DEPRECATED: use `auth.sasl.interBrokerMechanism` instead. - ## - saslInterBrokerMechanism: plain - ## @param auth.jaas [object] DEPRECATED: use `auth.sasl.jaas` instead. - ## @skip auth.jaas.clientUsers - ## - jaas: - clientUsers: - - user - clientPasswords: [] - interBrokerUser: admin - interBrokerPassword: "" - zookeeperUser: "" - zookeeperPassword: "" - existingSecret: "" ## TLS configuration ## tls: ## @param auth.tls.type Format to use for TLS certificates. Allowed types: `jks` and `pem` ## type: jks + ## @param auth.tls.pemChainIncluded Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. + ## Certificates must be in proper order, where the top certificate is the leaf and the bottom certificate is the top-most intermediate CA. + ## + pemChainIncluded: false ## @param auth.tls.existingSecrets Array existing secrets containing the TLS certificates for the Kafka brokers ## When using 'jks' format for certificates, each secret should contain a truststore and a keystore. ## Create these secrets following the steps below: @@ -347,21 +310,22 @@ auth: ## ... ## existingSecrets: [] - ## @param auth.tls.existingSecret DEPRECATED: use `auth.tls.existingSecrets` instead. - ## - existingSecret: "" ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if `auth.tls.type` is `pem` ## Note: ignored when using 'jks' format or `auth.tls.existingSecrets` is not empty ## autoGenerated: false ## @param auth.tls.password Password to access the JKS files or PEM key when they are password-protected. + ## Note: ignored when using 'existingSecret'. ## password: "" + ## @param auth.tls.existingSecret Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`) + ## + existingSecret: "" ## @param auth.tls.jksTruststoreSecret Name of the existing secret containing your truststore if truststore not existing or different from the ones in the `auth.tls.existingSecrets` - ## Note: ignored when using 'pem' format for certificates . + ## Note: ignored when using 'pem' format for certificates. ## jksTruststoreSecret: "" - ## @param auth.tls.jksKeystoreSAN The secret key from the `auth.tls.existingSecret` containing the keystore with a SAN certificate + ## @param auth.tls.jksKeystoreSAN The secret key from the `auth.tls.existingSecrets` containing the keystore with a SAN certificate ## The SAN certificate in it should be issued with Subject Alternative Names for all headless services: ## - kafka-0.kafka-headless.kafka.svc.cluster.local ## - kafka-1.kafka-headless.kafka.svc.cluster.local @@ -369,7 +333,7 @@ auth: ## Note: ignored when using 'pem' format for certificates. ## jksKeystoreSAN: "" - ## @param auth.tls.jksTruststore The secret key from the `auth.tls.existingSecret` or `auth.tls.jksTruststoreSecret` containing the truststore + ## @param auth.tls.jksTruststore The secret key from the `auth.tls.existingSecrets` or `auth.tls.jksTruststoreSecret` containing the truststore ## Note: ignored when using 'pem' format for certificates. ## jksTruststore: "" @@ -378,36 +342,51 @@ auth: ## ref: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings ## endpointIdentificationAlgorithm: https - ## @param auth.jksSecret DEPRECATED: use `auth.tls.existingSecrets` instead. - ## - jksSecret: "" - ## @param auth.jksTruststoreSecret DEPRECATED: use `auth.tls.jksTruststoreSecret` instead. + ## Zookeeper client configuration for kafka brokers ## - jksTruststoreSecret: "" - ## @param auth.jksKeystoreSAN DEPRECATED: use `auth.tls.jksKeystoreSAN` instead. - ## - jksKeystoreSAN: "" - ## @param auth.jksTruststore DEPRECATED: use `auth.tls.jksTruststore` instead. - ## - jksTruststore: "" - ## @param auth.jksPassword DEPRECATED: use `auth.tls.password` instead. - ## - jksPassword: "" - ## @param auth.tlsEndpointIdentificationAlgorithm DEPRECATED: use `auth.tls.endpointIdentificationAlgorithm` instead. - ## - tlsEndpointIdentificationAlgorithm: https + zookeeper: + ## TLS configuration + ## + tls: + ## @param auth.zookeeper.tls.enabled Enable TLS for Zookeeper client connections. + ## + enabled: false + ## @param auth.zookeeper.tls.type Format to use for TLS certificates. Allowed types: `jks` and `pem`. + ## + type: jks + ## @param auth.zookeeper.tls.verifyHostname Hostname validation. + ## + verifyHostname: true + ## @param auth.zookeeper.tls.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper client communications. + ## + existingSecret: "" + ## @param auth.zookeeper.tls.existingSecretKeystoreKey The secret key from the auth.zookeeper.tls.existingSecret containing the Keystore. + ## + existingSecretKeystoreKey: zookeeper.keystore.jks + ## @param auth.zookeeper.tls.existingSecretTruststoreKey The secret key from the auth.zookeeper.tls.existingSecret containing the Truststore. + ## + existingSecretTruststoreKey: zookeeper.truststore.jks + ## @param auth.zookeeper.tls.passwordsSecret Existing secret containing Keystore and Truststore passwords. + ## + passwordsSecret: "" + ## @param auth.zookeeper.tls.passwordsSecretKeystoreKey The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: keystore-password + ## @param auth.zookeeper.tls.passwordsSecretTruststoreKey The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: truststore-password ## @param listeners The address(es) the socket server listens on. Auto-calculated it's set to an empty array ## When it's set to an empty array, the listeners will be configured -## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters) ## listeners: [] ## @param advertisedListeners The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array ## When it's set to an empty array, the advertised listeners will be configured -## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters) ## advertisedListeners: [] ## @param listenerSecurityProtocolMap The protocol->listener mapping. Auto-calculated it's set to nil -## When it's nil, the listeners will be configured based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## When it's nil, the listeners will be configured based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters) ## listenerSecurityProtocolMap: "" ## @param allowPlaintextListener Allow to use the PLAINTEXT listener @@ -416,6 +395,27 @@ allowPlaintextListener: true ## @param interBrokerListenerName The listener that the brokers should communicate on ## interBrokerListenerName: INTERNAL +## @param command Override Kafka container command +## +command: + - /scripts/setup.sh +## @param args Override Kafka container arguments +## +args: [] +## @param extraEnvVars Extra environment variables to add to Kafka pods +## ref: https://github.com/bitnami/bitnami-docker-kafka#configuration +## e.g: +## extraEnvVars: +## - name: KAFKA_CFG_BACKGROUND_THREADS +## value: "10" +## +extraEnvVars: [] +## @param extraEnvVarsCM ConfigMap with extra environment variables +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Secret with extra environment variables +## +extraEnvVarsSecret: "" ## @section Statefulset parameters @@ -427,38 +427,120 @@ replicaCount: 1 ## E.g., with `minBrokerId=100` and 3 nodes, IDs will be 100, 101, 102 for brokers 0, 1, and 2, respectively. ## minBrokerId: 0 -## @param updateStrategy Update strategy for the stateful set -## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +## @param containerPorts.client Kafka client container port +## @param containerPorts.internal Kafka inter-broker container port +## @param containerPorts.external Kafka external container port +## +containerPorts: + client: 9092 + internal: 9093 + external: 9094 +## Configure extra options for Kafka containers' liveness, readiness and startup probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe on Kafka containers +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 +## @param readinessProbe.enabled Enable readinessProbe on Kafka containers +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 5 + failureThreshold: 6 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 +## @param startupProbe.enabled Enable startupProbe on Kafka containers +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 +## @param customLivenessProbe Custom livenessProbe that overrides the default one +## +customLivenessProbe: {} +## @param customReadinessProbe Custom readinessProbe that overrides the default one +## +customReadinessProbe: {} +## @param customStartupProbe Custom startupProbe that overrides the default one +## +customStartupProbe: {} +## @param lifecycleHooks lifecycleHooks for the Kafka container to automate configuration before or after startup +## +lifecycleHooks: {} +## Kafka resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## @param resources.limits The resources limits for the container +## @param resources.requests The requested resources for the container +## +resources: + limits: {} + requests: {} +## Kafka pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable security context for the pods +## @param podSecurityContext.fsGroup Set Kafka pod's Security Context fsGroup ## -updateStrategy: RollingUpdate -## @param rollingUpdatePartition Partition update strategy -## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +podSecurityContext: + enabled: true + fsGroup: 1001 +## Kafka containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enable Kafka containers' Security Context +## @param containerSecurityContext.runAsUser Set Kafka containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set Kafka containers' Security Context runAsNonRoot +## e.g: +## containerSecurityContext: +## enabled: true +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true ## -rollingUpdatePartition: "" -## @param hostAliases Add deployment host aliases +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true +## @param hostAliases Kafka pods host aliases ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ ## hostAliases: [] -## @param podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel -## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## @param hostNetwork Specify if host network should be enabled for Kafka pods ## -podManagementPolicy: Parallel -## @param schedulerName Name of the k8s scheduler (other than default) -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +hostNetwork: false +## @param hostIPC Specify if host IPC should be enabled for Kafka pods ## -schedulerName: "" -## @param podLabels Kafka pod labels +hostIPC: false +## @param podLabels Extra labels for Kafka pods ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ ## podLabels: {} -## @param podAnnotations Kafka Pod annotations +## @param podAnnotations Extra annotations for Kafka pods ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ ## podAnnotations: {} -## @param priorityClassName Name of the existing priority class to be used by kafka pods -## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ -## -priorityClassName: "" ## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity ## @@ -507,102 +589,42 @@ topologySpreadConstraints: {} ## ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution ## terminationGracePeriodSeconds: "" -## Kafka pods' Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod -## @param podSecurityContext.enabled Enable security context for the pods -## @param podSecurityContext.fsGroup Group ID for the filesystem used by the containers -## @param podSecurityContext.runAsUser User ID for the service user running the pod -## -podSecurityContext: - enabled: true - fsGroup: 1001 - runAsUser: 1001 -## Kafka containers' Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container -## @param containerSecurityContext.enabled Enable Kafka containers' Security Context -## Example: -## containerSecurityContext: -## enabled: true -## capabilities: -## drop: ["NET_RAW"] -## readOnlyRootFilesystem: true -## -containerSecurityContext: - enabled: false -## Kafka containers' resource requests and limits -## ref: https://kubernetes.io/docs/user-guide/compute-resources/ -## We usually recommend not to specify default resources and to leave this as a conscious -## choice for the user. This also increases chances charts run on environments with little -## resources, such as Minikube. If you do want to specify resources, uncomment the following -## lines, adjust them as necessary, and remove the curly braces after 'resources:'. -## @param resources.limits The resources limits for Kafka containers -## @param resources.requests The requested resources for Kafka containers -## -resources: - ## Example: - ## limits: - ## cpu: 250m - ## memory: 1Gi - limits: {} - ## Examples: - ## requests: - ## cpu: 250m - ## memory: 256Mi - requests: {} -## Kafka containers' liveness probe. Evaluated as a template. -## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes -## @param livenessProbe.enabled Enable livenessProbe -## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe -## @param livenessProbe.periodSeconds Period seconds for livenessProbe -## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe -## @param livenessProbe.failureThreshold Failure threshold for livenessProbe -## @param livenessProbe.successThreshold Success threshold for livenessProbe +## @param podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy ## -livenessProbe: - enabled: true - initialDelaySeconds: 10 - timeoutSeconds: 5 - failureThreshold: 3 - periodSeconds: 10 - successThreshold: 1 -## Kafka containers' readiness probe. Evaluated as a template. -## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes -## @param readinessProbe.enabled Enable readinessProbe -## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe -## @param readinessProbe.periodSeconds Period seconds for readinessProbe -## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe -## @param readinessProbe.failureThreshold Failure threshold for readinessProbe -## @param readinessProbe.successThreshold Success threshold for readinessProbe +podManagementPolicy: Parallel +## @param priorityClassName Name of the existing priority class to be used by kafka pods +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ ## -readinessProbe: - enabled: true - initialDelaySeconds: 5 - failureThreshold: 6 - timeoutSeconds: 5 - periodSeconds: 10 - successThreshold: 1 -## @param customLivenessProbe Custom Liveness probe configuration for Kafka +priorityClassName: "" +## @param schedulerName Name of the k8s scheduler (other than default) +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## -customLivenessProbe: {} -## @param customReadinessProbe Custom Readiness probe configuration for Kafka +schedulerName: "" +## @param updateStrategy.type Kafka statefulset strategy type +## @param updateStrategy.rollingUpdate Kafka statefulset rolling update configuration parameters +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + rollingUpdate: {} +## @param extraVolumes Optionally specify extra list of additional volumes for the Kafka pod(s) +## e.g: +## extraVolumes: +## - name: kafka-jaas +## secret: +## secretName: kafka-jaas ## -customReadinessProbe: {} -## Pod Disruption Budget configuration -## The PDB will only be created if replicaCount is greater than 1 -## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka container(s) +## extraVolumeMounts: +## - name: kafka-jaas +## mountPath: /bitnami/kafka/config/kafka_jaas.conf +## subPath: kafka_jaas.conf ## -pdb: - ## @param pdb.create Enable/disable a Pod Disruption Budget creation - ## - create: false - ## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled - ## - minAvailable: "" - ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable - ## - maxUnavailable: 1 -## @param sidecars Attach additional sidecar containers to the Kafka pod -## Example: +extraVolumeMounts: [] +## @param sidecars Add additional sidecar containers to the Kafka pod(s) +## e.g: ## sidecars: ## - name: your-image-name ## image: your-image @@ -612,91 +634,88 @@ pdb: ## containerPort: 1234 ## sidecars: [] -## @param initContainers Add extra init containers +## @param initContainers Add additional Add init containers to the Kafka pod(s) +## e.g: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 ## initContainers: [] +## Kafka Pod Disruption Budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## @param pdb.create Deploy a pdb object for the Kafka pod +## @param pdb.minAvailable Maximum number/percentage of unavailable Kafka replicas +## @param pdb.maxUnavailable Maximum number/percentage of unavailable Kafka replicas +## +pdb: + create: false + minAvailable: "" + maxUnavailable: 1 + +## @section Traffic Exposure parameters -## @section Exposure parameters -networkPolicy: - ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. - ## - enabled: false - ## @param networkPolicy.allowExternal Don't require client label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to Kafka port for client connections (service.port). - ## When true, kafka will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed - ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace - ## and that match other criteria, the ones that have the good label, can reach the kafka. - ## But sometimes, we want the kafka to be accessible to clients from other namespaces, in this case, we can use this - ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. - ## - ## Example: - ## explicitNamespacesSelector: - ## matchLabels: - ## role: frontend - ## matchExpressions: - ## - {key: role, operator: In, values: [frontend]} - ## - explicitNamespacesSelector: {} - ## @param networkPolicy.externalAccess.from customize the from section for External Access on tcp-external port - ## Example: - ## - ipBlock: - ## cidr: 172.9.0.0/16 - ## except: - ## - 172.9.1.0/24 - ## - externalAccess: - from: [] - ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule - ## - egressRules: - ## Additional custom egress rules - ## e.g: - ## customRules: - ## - to: - ## - namespaceSelector: - ## matchLabels: - ## label: example - customRules: [] ## Service parameters ## service: ## @param service.type Kubernetes Service type ## type: ClusterIP - ## @param service.port Kafka port for client connections - ## - port: 9092 - ## @param service.internalPort Kafka port for inter-broker connections - ## - internalPort: 9093 - ## @param service.externalPort Kafka port for external connections - ## - externalPort: 9094 - ## @param service.nodePorts [object] Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## @param service.ports.client Kafka svc port for client connections + ## @param service.ports.internal Kafka svc port for inter-broker connections + ## @param service.ports.external Kafka svc port for external connections + ## + ports: + client: 9092 + internal: 9093 + external: 9094 + ## @param service.nodePorts.client Node port for the Kafka client connections + ## @param service.nodePorts.external Node port for the Kafka external connections + ## NOTE: choose port between <30000-32767> ## nodePorts: client: "" external: "" - ## @param service.loadBalancerIP loadBalancerIP for Kafka Service - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param service.clusterIP Kafka service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP Kafka service Load Balancer IP + ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer ## loadBalancerIP: "" - ## @param service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer + ## @param service.loadBalancerSourceRanges Kafka service Load Balancer sources ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## Example: + ## e.g: ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 + ## - 10.10.10.0/24 ## loadBalancerSourceRanges: [] - ## @param service.annotations Service annotations + ## @param service.externalTrafficPolicy Kafka service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for Kafka service ## annotations: {} + ## @param service.extraPorts Extra ports to expose in the Kafka service (normally used with the `sidecar` value) + ## + extraPorts: [] ## External Access to Kafka brokers configuration ## externalAccess: @@ -722,7 +741,7 @@ externalAccess: image: registry: docker.io repository: bitnami/kubectl - tag: 1.23.1-debian-10-r26 + tag: 1.24.0-debian-10-r5 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images @@ -730,57 +749,60 @@ externalAccess: pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: + ## e.g: ## pullSecrets: ## - myRegistryKeySecretName ## pullSecrets: [] ## Init Container resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## We usually recommend not to specify default resources and to leave this as a conscious - ## choice for the user. This also increases chances charts run on environments with little - ## resources, such as Minikube. If you do want to specify resources, uncomment the following - ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. - ## @param externalAccess.autoDiscovery.resources.limits Init container auto-discovery resource limits - ## @param externalAccess.autoDiscovery.resources.requests Init container auto-discovery resource requests + ## @param externalAccess.autoDiscovery.resources.limits The resources limits for the auto-discovery init container + ## @param externalAccess.autoDiscovery.resources.requests The requested resources for the auto-discovery init container ## resources: - ## Example: - ## limits: - ## cpu: 100m - ## memory: 128Mi limits: {} - ## Examples: - ## requests: - ## cpu: 100m - ## memory: 128Mi requests: {} ## Parameters to configure K8s service(s) used to externally access Kafka brokers - ## A new service per broker will be created + ## Note: A new service per broker will be created ## service: ## @param externalAccess.service.type Kubernetes Service type for external access. It can be NodePort or LoadBalancer ## type: LoadBalancer - ## @param externalAccess.service.port Kafka port used for external access when service type is LoadBalancer + ## @param externalAccess.service.ports.external Kafka port used for external access when service type is LoadBalancer ## - port: 9094 + ports: + external: 9094 ## @param externalAccess.service.loadBalancerIPs Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount - ## Example: + ## e.g: ## loadBalancerIPs: ## - X.X.X.X ## - Y.Y.Y.Y ## loadBalancerIPs: [] + ## @param externalAccess.service.loadBalancerNames Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerNames: + ## - broker1.external.example.com + ## - broker2.external.example.com + ## + loadBalancerNames: [] + ## @param externalAccess.service.loadBalancerAnnotations Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerAnnotations: + ## - external-dns.alpha.kubernetes.io/hostname: broker1.external.example.com. + ## - external-dns.alpha.kubernetes.io/hostname: broker2.external.example.com. + ## + loadBalancerAnnotations: [] ## @param externalAccess.service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## Example: + ## e.g: ## loadBalancerSourceRanges: ## - 10.10.10.0/24 ## loadBalancerSourceRanges: [] ## @param externalAccess.service.nodePorts Array of node ports used for each Kafka broker. Length must be the same as replicaCount - ## Example: + ## e.g: ## nodePorts: ## - 30001 ## - 30002 @@ -789,6 +811,9 @@ externalAccess: ## @param externalAccess.service.useHostIPs Use service host IPs to configure Kafka external listener when service type is NodePort ## useHostIPs: false + ## @param externalAccess.service.usePodIPs using the MY_POD_IP address for external access. + ## + usePodIPs: false ## @param externalAccess.service.domain Domain or external ip used to configure Kafka external listener when service type is NodePort ## If not specified, the container will try to get the kubernetes node external IP ## @@ -796,19 +821,66 @@ externalAccess: ## @param externalAccess.service.annotations Service annotations for external access ## annotations: {} - ## @param externalAccess.service.usePodIPs using the MY_POD_IP address for external access. + ## @param externalAccess.service.extraPorts Extra ports to expose in the Kafka external service ## - usePodIPs: false + extraPorts: [] +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the port Kafka is + ## listening on. When true, zookeeper accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed + ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the kafka. + ## But sometimes, we want the kafka to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## e.g: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + ## @param networkPolicy.externalAccess.from customize the from section for External Access on tcp-external port + ## e.g: + ## - ipBlock: + ## cidr: 172.9.0.0/16 + ## except: + ## - 172.9.1.0/24 + ## + externalAccess: + from: [] + ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule + ## + egressRules: + ## Additional custom egress rules + ## e.g: + ## customRules: + ## - to: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: [] ## @section Persistence parameters -## Persistence parameters +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: - ## @param persistence.enabled Enable Kafka data persistence using PVC, note that Zookeeper persistence is unaffected + ## @param persistence.enabled Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected ## enabled: true - ## @param persistence.existingClaim Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template + ## @param persistence.existingClaim A manually managed Persistent Volume and Claim ## If defined, PVC must be created manually before volume will be bound ## The value is evaluated as a template ## @@ -820,7 +892,7 @@ persistence: ## set, choosing the default provisioner. ## storageClass: "" - ## @param persistence.accessModes PV Access Mode + ## @param persistence.accessModes Persistent Volume Access Modes ## accessModes: - ReadWriteOnce @@ -830,10 +902,11 @@ persistence: ## @param persistence.annotations Annotations for the PVC ## annotations: {} - ## @param persistence.selector Selector to match an existing Persistent Volume for Kafka's data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## @param persistence.selector Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it ## selector: ## matchLabels: ## app: my-app + ## selector: {} ## @param persistence.mountPath Mount path of the Kafka data volume ## @@ -841,7 +914,7 @@ persistence: ## Log Persistence parameters ## logPersistence: - ## @param logPersistence.enabled Enable Kafka logs persistence using PVC, note that Zookeeper persistence is unaffected + ## @param logPersistence.enabled Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected ## enabled: false ## @param logPersistence.existingClaim A manually managed Persistent Volume and Claim @@ -849,13 +922,14 @@ logPersistence: ## The value is evaluated as a template ## existingClaim: "" - ## @param logPersistence.existingLogClaim PV Storage Class + ## @param logPersistence.storageClass PVC Storage Class for Kafka logs volume ## If defined, storageClassName: ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. - existingLogClaim: "" - ## @param logPersistence.accessModes PV Access Mode + ## + storageClass: "" + ## @param logPersistence.accessModes Persistent Volume Access Modes ## accessModes: - ReadWriteOnce @@ -865,107 +939,97 @@ logPersistence: ## @param logPersistence.annotations Annotations for the PVC ## annotations: {} - ## @param logPersistence.selector Selector to match an existing Persistent Volume for Kafka's log data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## @param logPersistence.selector Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it ## selector: ## matchLabels: ## app: my-app + ## selector: {} ## @param logPersistence.mountPath Mount path of the Kafka logs volume ## mountPath: /opt/bitnami/kafka/logs -## @section RBAC parameters - -## Kafka pods ServiceAccount -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -## -serviceAccount: - ## @param serviceAccount.create Enable creation of ServiceAccount for Kafka pods - ## - create: true - ## @param serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated - ## If not set and create is true, a name is generated using the kafka.serviceAccountName template - ## - name: "" - ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created - ## Can be set to false if pods using this serviceAccount do not need to use K8s API - ## - automountServiceAccountToken: true -## Role Based Access -## ref: https://kubernetes.io/docs/admin/authorization/rbac/ -## -rbac: - ## @param rbac.create Whether to create & use RBAC resources or not - ## binding Kafka ServiceAccount to a role - ## that allows Kafka pods querying the K8s API - ## - create: false - ## @section Volume Permissions parameters +## -## Init Container parameters -## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component -## values from the securityContext section of the component +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node ## volumePermissions: - ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume ## enabled: false - ## The security context for the volumePermissions init container - ## @param volumePermissions.securityContext.runAsUser User ID for the container. - ## Can be set to "auto". "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed). - ## - securityContext: - runAsUser: 0 ## @param volumePermissions.image.registry Init container volume-permissions image registry - ## @param volumePermissions.image.repository Init container volume-permissions image name - ## @param volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy - ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets ## image: registry: docker.io repository: bitnami/bitnami-shell - tag: 10-debian-10-r307 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## + tag: 10-debian-10-r434 pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## Example: ## pullSecrets: ## - myRegistryKeySecretName ## pullSecrets: [] - ## Init Container resource requests and limits + ## Init container resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## We usually recommend not to specify default resources and to leave this as a conscious - ## choice for the user. This also increases chances charts run on environments with little - ## resources, such as Minikube. If you do want to specify resources, uncomment the following - ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. - ## @param volumePermissions.resources.limits Init container volume-permissions resource limits - ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests ## resources: - ## Example: - ## limits: - ## cpu: 100m - ## memory: 128Mi limits: {} - ## Examples: - ## requests: - ## cpu: 100m - ## memory: 128Mi requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 + +## @section Other Parameters + +## ServiceAccount for Kafka +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for Kafka pods + ## + create: true + ## @param serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the kafka.serviceAccountName template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Role Based Access Control +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether to create & use RBAC resources or not + ## binding Kafka ServiceAccount to a role + ## that allows Kafka pods querying the K8s API + ## + create: false ## @section Metrics parameters ## Prometheus Exporters / Metrics ## metrics: - ## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter + ## Prometheus Kafka exporter: exposes complimentary metrics to JMX exporter ## kafka: ## @param metrics.kafka.enabled Whether or not to create a standalone Kafka exporter to expose Kafka metrics @@ -982,7 +1046,7 @@ metrics: image: registry: docker.io repository: bitnami/kafka-exporter - tag: 1.4.2-debian-10-r115 + tag: 1.4.2-debian-10-r243 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images @@ -990,39 +1054,14 @@ metrics: pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: + ## e.g: ## pullSecrets: ## - myRegistryKeySecretName ## pullSecrets: [] - ## Kafka exporter pods ServiceAccount - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - ## @param metrics.kafka.serviceAccount.create Enable creation of ServiceAccount for Kafka exporter pods - ## - create: true - ## @param metrics.kafka.serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated - ## If not set and create is true, a name is generated using the kafka.metrics.kafka.serviceAccountName template - ## - name: "" - ## @param metrics.kafka.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created - ## Can be set to false if pods using this serviceAccount do not need to use K8s API - ## - automountServiceAccountToken: true - ## @param metrics.kafka.schedulerName Name of the k8s scheduler (other than default) for Kafka Exporter - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param metrics.kafka.extraFlags Extra flags to be passed to Kafka exporter - ## Example: - ## extraFlags: - ## tls.insecure-skip-tls-verify: "" - ## web.telemetry-path: "/metrics" - ## - extraFlags: {} + ## @param metrics.kafka.certificatesSecret Name of the existing secret containing the optional certificate and key files - ## for Kafka Exporter client authentication + ## for Kafka exporter client authentication ## certificatesSecret: "" ## @param metrics.kafka.tlsCert The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file) @@ -1031,24 +1070,51 @@ metrics: ## @param metrics.kafka.tlsKey The secret key from the certificatesSecret if 'client-key' key different from the default (key-file) ## tlsKey: key-file - ## @param metrics.kafka.tlsCaSecret Name of the existing secret containing the optional ca certificate for Kafka Exporter client authentication + ## @param metrics.kafka.tlsCaSecret Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication ## tlsCaSecret: "" ## @param metrics.kafka.tlsCaCert The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file) ## tlsCaCert: ca-file - ## @param metrics.kafka.podLabels Kafka exporter pod labels - ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## @param metrics.kafka.extraFlags Extra flags to be passed to Kafka exporter + ## e.g: + ## extraFlags: + ## tls.insecure-skip-tls-verify: "" + ## web.telemetry-path: "/metrics" ## - podLabels: {} - ## @param metrics.kafka.podAnnotations Kafka exporter pod annotations - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + extraFlags: {} + ## @param metrics.kafka.command Override Kafka exporter container command ## - podAnnotations: {} - ## Prometheus Kafka Exporter containers' Security Context - ## @param metrics.kafka.containerSecurityContext.enabled Enable Prometheus Kafka Exporter containers' Security Context + command: [] + ## @param metrics.kafka.args Override Kafka exporter container arguments + ## + args: [] + ## @param metrics.kafka.containerPorts.metrics Kafka exporter metrics container port + ## + containerPorts: + metrics: 9308 + ## Kafka exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.kafka.resources.limits The resources limits for the container + ## @param metrics.kafka.resources.requests The requested resources for the container + ## + resources: + limits: {} + requests: {} + ## Kafka exporter pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.kafka.podSecurityContext.enabled Enable security context for the pods + ## @param metrics.kafka.podSecurityContext.fsGroup Set Kafka exporter pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Kafka exporter containers' Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## Example: + ## @param metrics.kafka.containerSecurityContext.enabled Enable Kafka exporter containers' Security Context + ## @param metrics.kafka.containerSecurityContext.runAsUser Set Kafka exporter containers' Security Context runAsUser + ## @param metrics.kafka.containerSecurityContext.runAsNonRoot Set Kafka exporter containers' Security Context runAsNonRoot + ## e.g: ## containerSecurityContext: ## enabled: true ## capabilities: @@ -1056,42 +1122,101 @@ metrics: ## readOnlyRootFilesystem: true ## containerSecurityContext: - enabled: false - ## Prometheus Kafka Exporter' resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## We usually recommend not to specify default resources and to leave this as a conscious - ## choice for the user. This also increases chances charts run on environments with little - ## resources, such as Minikube. If you do want to specify resources, uncomment the following - ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. - ## @param metrics.kafka.resources.limits Kafka Exporter container resource limits - ## @param metrics.kafka.resources.requests Kafka Exporter container resource requests + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param metrics.kafka.hostAliases Kafka exporter pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param metrics.kafka.podLabels Extra labels for Kafka exporter pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ ## - resources: - ## Example: - ## limits: - ## cpu: 100m - ## memory: 128Mi - limits: {} - ## Examples: - ## requests: - ## cpu: 100m - ## memory: 128Mi - requests: {} - ## @param metrics.kafka.affinity Affinity for Kafka Exporter pod assignment + podLabels: {} + ## @param metrics.kafka.podAnnotations Extra annotations for Kafka exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param metrics.kafka.podAffinityPreset Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param metrics.kafka.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node metrics.kafka.affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param metrics.kafka.nodeAffinityPreset.type Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param metrics.kafka.nodeAffinityPreset.key Node label key to match Ignored if `metrics.kafka.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param metrics.kafka.nodeAffinityPreset.values Node label values to match. Ignored if `metrics.kafka.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param metrics.kafka.affinity Affinity for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## Note: metrics.kafka.podAffinityPreset, metrics.kafka.podAntiAffinityPreset, and metrics.kafka.nodeAffinityPreset will be ignored when it's set ## affinity: {} - ## @param metrics.kafka.nodeSelector Node labels for Kafka Exporter pod assignment + ## @param metrics.kafka.nodeSelector Node labels for pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {} - ## @param metrics.kafka.tolerations Tolerations for Kafka Exporter pod assignment + ## @param metrics.kafka.tolerations Tolerations for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## tolerations: [] + ## @param metrics.kafka.schedulerName Name of the k8s scheduler (other than default) for Kafka exporter + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param metrics.kafka.priorityClassName Kafka exporter pods' priorityClassName + ## + priorityClassName: "" + ## @param metrics.kafka.topologySpreadConstraints Topology Spread Constraints for pod assignment + ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## The value is evaluated as a template + ## + topologySpreadConstraints: [] + ## @param metrics.kafka.extraVolumes Optionally specify extra list of additional volumes for the Kafka exporter pod(s) + ## e.g: + ## extraVolumes: + ## - name: kafka-jaas + ## secret: + ## secretName: kafka-jaas + ## + extraVolumes: [] + ## @param metrics.kafka.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s) + ## extraVolumeMounts: + ## - name: kafka-jaas + ## mountPath: /bitnami/kafka/config/kafka_jaas.conf + ## subPath: kafka_jaas.conf + ## + extraVolumeMounts: [] + ## @param metrics.kafka.sidecars Add additional sidecar containers to the Kafka exporter pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] ## @param metrics.kafka.initContainers Add init containers to the Kafka exporter pods - ## Example: + ## e.g: ## initContainers: ## - name: your-image-name ## image: your-image @@ -1101,42 +1226,44 @@ metrics: ## containerPort: 1234 ## initContainers: [] - ## Service configuration + ## Kafka exporter service configuration ## service: - ## @param metrics.kafka.service.type Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for Kafka Exporter - ## - type: ClusterIP - ## @param metrics.kafka.service.port Kafka Exporter Prometheus port - ## - port: 9308 - ## @param metrics.kafka.service.nodePort Kubernetes HTTP node port - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - nodePort: "" - ## @param metrics.kafka.service.loadBalancerIP loadBalancerIP if service type is `LoadBalancer` - ## Set the LoadBalancer service type to internal only - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## @param metrics.kafka.service.ports.metrics Kafka exporter metrics service port ## - loadBalancerIP: "" - ## @param metrics.kafka.service.loadBalancerSourceRanges Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## Example: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] + ports: + metrics: 9308 ## @param metrics.kafka.service.clusterIP Static clusterIP or None for headless services ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address ## clusterIP: "" - ## @param metrics.kafka.service.annotations [object] Annotations for the Kafka Exporter Prometheus metrics service + ## @param metrics.kafka.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.kafka.service.annotations [object] Annotations for the Kafka exporter service ## annotations: prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.kafka.service.port }}" + prometheus.io/port: "{{ .Values.metrics.kafka.service.ports.metrics }}" prometheus.io/path: "/metrics" - ## Prometheus JMX Exporter: exposes the majority of Kafkas metrics + ## Kafka exporter pods ServiceAccount + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param metrics.kafka.serviceAccount.create Enable creation of ServiceAccount for Kafka exporter pods + ## + create: true + ## @param metrics.kafka.serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the kafka.metrics.kafka.serviceAccountName template + ## + name: "" + ## @param metrics.kafka.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## Prometheus JMX exporter: exposes the majority of Kafkas metrics ## jmx: ## @param metrics.jmx.enabled Whether or not to expose JMX metrics to Prometheus @@ -1153,7 +1280,7 @@ metrics: image: registry: docker.io repository: bitnami/jmx-exporter - tag: 0.16.1-debian-10-r177 + tag: 0.16.1-debian-10-r306 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images @@ -1161,16 +1288,17 @@ metrics: pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: + ## e.g: ## pullSecrets: ## - myRegistryKeySecretName ## pullSecrets: [] - - ## Prometheus JMX Exporter Containers' Security Context - ## @param metrics.jmx.containerSecurityContext.enabled Enable Prometheus JMX Exporter Containers' Security Context + ## Prometheus JMX exporter containers' Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## Example: + ## @param metrics.jmx.containerSecurityContext.enabled Enable Prometheus JMX exporter containers' Security Context + ## @param metrics.jmx.containerSecurityContext.runAsUser Set Prometheus JMX exporter containers' Security Context runAsUser + ## @param metrics.jmx.containerSecurityContext.runAsNonRoot Set Prometheus JMX exporter containers' Security Context runAsNonRoot + ## e.g: ## containerSecurityContext: ## enabled: true ## capabilities: @@ -1178,63 +1306,45 @@ metrics: ## readOnlyRootFilesystem: true ## containerSecurityContext: - enabled: false - ## Prometheus JMX Exporter' resource requests and limits + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param metrics.jmx.containerPorts.metrics Prometheus JMX exporter metrics container port + ## + containerPorts: + metrics: 5556 + ## Prometheus JMX exporter resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## We usually recommend not to specify default resources and to leave this as a conscious - ## choice for the user. This also increases chances charts run on environments with little - ## resources, such as Minikube. If you do want to specify resources, uncomment the following - ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. - ## @param metrics.jmx.resources.limits JMX Exporter container resource limits - ## @param metrics.jmx.resources.requests JMX Exporter container resource requests + ## @param metrics.jmx.resources.limits The resources limits for the JMX exporter container + ## @param metrics.jmx.resources.requests The requested resources for the JMX exporter container ## resources: - ## Example: - ## limits: - ## cpu: 100m - ## memory: 128Mi limits: {} - ## Examples: - ## requests: - ## cpu: 100m - ## memory: 128Mi requests: {} - ## Service configuration + ## Prometheus JMX exporter service configuration ## service: - ## @param metrics.jmx.service.type Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for JMX Exporter - ## - type: ClusterIP - ## @param metrics.jmx.service.port JMX Exporter Prometheus port - ## - port: 5556 - ## @param metrics.jmx.service.nodePort Kubernetes HTTP node port - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## @param metrics.jmx.service.ports.metrics Prometheus JMX exporter metrics service port ## - nodePort: "" - ## @param metrics.jmx.service.loadBalancerIP loadBalancerIP if service type is `LoadBalancer` - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - loadBalancerIP: "" - ## @param metrics.jmx.service.loadBalancerSourceRanges Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## Example: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] + ports: + metrics: 5556 ## @param metrics.jmx.service.clusterIP Static clusterIP or None for headless services ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address ## clusterIP: "" - ## @param metrics.jmx.service.annotations [object] Annotations for the JMX Exporter Prometheus metrics service + ## @param metrics.jmx.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.jmx.service.annotations [object] Annotations for the Prometheus JMX exporter service ## annotations: prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.jmx.service.port }}" + prometheus.io/port: "{{ .Values.metrics.jmx.service.ports.metrics }}" prometheus.io/path: "/" - ## @param metrics.jmx.whitelistObjectNames Allows setting which JMX objects you want to expose to via JMX stats to JMX Exporter - ## Only whitelisted values will be exposed via JMX Exporter. They must also be exposed via Rules. To expose all metrics + ## @param metrics.jmx.whitelistObjectNames Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter + ## Only whitelisted values will be exposed via JMX exporter. They must also be exposed via Rules. To expose all metrics ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []` ## (2) commented out above `overrideConfig`. ## @@ -1279,19 +1389,25 @@ metrics: ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint ## scrapeTimeout: "" - ## @param metrics.serviceMonitor.selector ServiceMonitor selector labels + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration - ## e.g: - ## selector: - ## prometheus: my-prometheus ## selector: {} - ## @param metrics.serviceMonitor.relabelings Relabel configuration for the metrics + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping ## relabelings: [] ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion ## metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" ## @section Kafka provisioning parameters @@ -1301,42 +1417,13 @@ provisioning: ## @param provisioning.enabled Enable kafka provisioning Job ## enabled: false - ## @param provisioning.numPartitions Default number of partitions for topics when unspecified. - numPartitions: 1 - ## @param provisioning.replicationFactor Default replication factor for topics when unspecified. - replicationFactor: 1 - ## @param provisioning.schedulerName Name of the k8s scheduler (other than default) for kafka provisioning - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param provisioning.podAnnotations Provisioning Pod annotations. + ## @param provisioning.numPartitions Default number of partitions for topics when unspecified ## - podAnnotations: {} - ## We usually recommend not to specify default resources and to leave this as a conscious - ## choice for the user. This also increases chances charts run on environments with little - ## resources, such as Minikube. If you do want to specify resources, uncomment the following - ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. - ## @param provisioning.resources.limits The resources limits for the container - ## @param provisioning.resources.requests The requested resources for the container - ## - resources: - ## Example: - ## limits: - ## cpu: 250m - ## memory: 1Gi - limits: {} - ## Examples: - ## requests: - ## cpu: 250m - ## memory: 256Mi - requests: {} - ## @param provisioning.command Override provisioning container command - ## - command: [] - ## @param provisioning.args Override provisioning container arguments + numPartitions: 1 + ## @param provisioning.replicationFactor Default replication factor for topics when unspecified ## - args: [] - ## @param provisioning.topics Kafka provisioning topics + replicationFactor: 1 + ## @param provisioning.topics Kafka topics to provision ## - name: topic-name ## partitions: 1 ## replicationFactor: 1 @@ -1346,24 +1433,214 @@ provisioning: ## flush.messages: 1 ## topics: [] + ## @param provisioning.tolerations Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param provisioning.extraProvisioningCommands Extra commands to run to provision cluster resources + ## - echo "Allow user to consume from any topic" + ## - >- + ## /opt/bitnami/kafka/bin/kafka-acls.sh + ## --bootstrap-server $KAFKA_SERVICE + ## --command-config $CLIENT_CONF + ## --add + ## --allow-principal User:user + ## --consumer --topic '*' + ## - "/opt/bitnami/kafka/bin/kafka-acls.sh + ## --bootstrap-server $KAFKA_SERVICE + ## --command-config $CLIENT_CONF + ## --list" + ## + extraProvisioningCommands: [] + ## @param provisioning.parallel Number of provisioning commands to run at the same time + ## + parallel: 1 + ## @param provisioning.preScript Extra bash script to run before topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations + ## + preScript: "" + ## @param provisioning.postScript Extra bash script to run after topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations + ## + postScript: "" + ## Auth Configuration for kafka provisioning Job + ## + auth: + ## TLS configuration for kafka provisioning Job + ## + tls: + ## @param provisioning.auth.tls.type Format to use for TLS certificates. Allowed types: `jks` and `pem`. + ## Note: ignored if auth.tls.clientProtocol different from one of these values: "tls" "mtls" "sasl_tls". + ## + type: jks + ## @param provisioning.auth.tls.certificatesSecret Existing secret containing the TLS certificates for the Kafka provisioning Job. + ## When using 'jks' format for certificates, the secret should contain a truststore and a keystore. + ## When using 'pem' format for certificates, the secret should contain a public CA certificate, a public certificate and one private key. + ## + certificatesSecret: "" + ## @param provisioning.auth.tls.cert The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) + ## + cert: tls.crt + ## @param provisioning.auth.tls.key The secret key from the certificatesSecret if 'key' key different from the default (tls.key) + ## + key: tls.key + ## @param provisioning.auth.tls.caCert The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) + ## + caCert: ca.crt + ## @param provisioning.auth.tls.keystore The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) + ## + keystore: keystore.jks + ## @param provisioning.auth.tls.truststore The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) + ## + truststore: truststore.jks + ## @param provisioning.auth.tls.passwordsSecret Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. + ## It should contain two keys called "keystore-password" and "truststore-password", or "key-password" if using a password-protected PEM key. + ## + passwordsSecret: "" + ## @param provisioning.auth.tls.keyPasswordSecretKey The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + keyPasswordSecretKey: key-password + ## @param provisioning.auth.tls.keystorePasswordSecretKey The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + keystorePasswordSecretKey: keystore-password + ## @param provisioning.auth.tls.truststorePasswordSecretKey The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + truststorePasswordSecretKey: truststore-password + ## @param provisioning.auth.tls.keyPassword Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. + ## + keyPassword: "" + ## @param provisioning.auth.tls.keystorePassword Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. + ## + keystorePassword: "" + ## @param provisioning.auth.tls.truststorePassword Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. + ## + truststorePassword: "" + ## @param provisioning.command Override provisioning container command + ## + command: [] + ## @param provisioning.args Override provisioning container arguments + ## + args: [] + ## @param provisioning.extraEnvVars Extra environment variables to add to the provisioning pod + ## e.g: + ## extraEnvVars: + ## - name: KAFKA_CFG_BACKGROUND_THREADS + ## value: "10" + ## + extraEnvVars: [] + ## @param provisioning.extraEnvVarsCM ConfigMap with extra environment variables + ## + extraEnvVarsCM: "" + ## @param provisioning.extraEnvVarsSecret Secret with extra environment variables + ## + extraEnvVarsSecret: "" + ## @param provisioning.podAnnotations Extra annotations for Kafka provisioning pods + ## + podAnnotations: {} + ## @param provisioning.podLabels Extra labels for Kafka provisioning pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## Kafka provisioning resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param provisioning.resources.limits The resources limits for the Kafka provisioning container + ## @param provisioning.resources.requests The requested resources for the Kafka provisioning container + ## + resources: + limits: {} + requests: {} + ## Kafka provisioning pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param provisioning.podSecurityContext.enabled Enable security context for the pods + ## @param provisioning.podSecurityContext.fsGroup Set Kafka provisioning pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Kafka provisioning containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param provisioning.containerSecurityContext.enabled Enable Kafka provisioning containers' Security Context + ## @param provisioning.containerSecurityContext.runAsUser Set Kafka provisioning containers' Security Context runAsUser + ## @param provisioning.containerSecurityContext.runAsNonRoot Set Kafka provisioning containers' Security Context runAsNonRoot + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param provisioning.schedulerName Name of the k8s scheduler (other than default) for kafka provisioning + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param provisioning.extraVolumes Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) + ## e.g: + ## extraVolumes: + ## - name: kafka-jaas + ## secret: + ## secretName: kafka-jaas + ## + extraVolumes: [] + ## @param provisioning.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) + ## extraVolumeMounts: + ## - name: kafka-jaas + ## mountPath: /bitnami/kafka/config/kafka_jaas.conf + ## subPath: kafka_jaas.conf + ## + extraVolumeMounts: [] + ## @param provisioning.sidecars Add additional sidecar containers to the Kafka provisioning pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param provisioning.initContainers Add additional Add init containers to the Kafka provisioning pod(s) + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## @param provisioning.waitForKafka If true use an init container to wait until kafka is ready before starting provisioning + ## + waitForKafka: true -## @section Zookeeper chart parameters +## @section ZooKeeper chart parameters -## Zookeeper chart configuration +## ZooKeeper chart configuration ## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml ## zookeeper: - ## @param zookeeper.enabled Switch to enable or disable the Zookeeper helm chart + ## @param zookeeper.enabled Switch to enable or disable the ZooKeeper helm chart ## enabled: true + ## @param zookeeper.replicaCount Number of ZooKeeper nodes + ## + replicaCount: 1 + ## ZooKeeper authenticaiton + ## auth: - ## @param zookeeper.auth.enabled Enable Zookeeper auth + ## @param zookeeper.auth.enabled Enable ZooKeeper auth ## enabled: false - ## @param zookeeper.auth.clientUser User that will use Zookeeper clients to auth + ## @param zookeeper.auth.clientUser User that will use ZooKeeper clients to auth ## clientUser: "" - ## @param zookeeper.auth.clientPassword Password that will use Zookeeper clients to auth + ## @param zookeeper.auth.clientPassword Password that will use ZooKeeper clients to auth ## clientPassword: "" ## @param zookeeper.auth.serverUsers Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" @@ -1372,9 +1649,24 @@ zookeeper: ## @param zookeeper.auth.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" ## serverPasswords: "" -## This value is only used when zookeeper.enabled is set to false + ## ZooKeeper Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## @param zookeeper.persistence.enabled Enable persistence on ZooKeeper using PVC(s) + ## @param zookeeper.persistence.storageClass Persistent Volume storage class + ## @param zookeeper.persistence.accessModes Persistent Volume access modes + ## @param zookeeper.persistence.size Persistent Volume size + ## + persistence: + enabled: true + storageClass: "" + accessModes: + - ReadWriteOnce + size: 8Gi + +## External Zookeeper Configuration +## All of these values are only used if `zookeeper.enabled=false` ## externalZookeeper: - ## @param externalZookeeper.servers Server or list of external Zookeeper servers to use + ## @param externalZookeeper.servers List of external zookeeper servers to use. Typically used in combination with 'zookeeperChrootPath'. ## servers: []