diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 417418170..b735a995c 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.4.2 +version: 1.4.3 appVersion: v1beta2-1.6.1-3.5.0 keywords: - spark diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 8dfe591d0..a5a9c1bdc 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -77,67 +77,69 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum ## Values -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| affinity | object | `{}` | Affinity for pod assignment | -| batchScheduler.enable | bool | `false` | Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application | -| commonLabels | object | `{}` | Common labels to add to the resources | -| controllerThreads | int | `10` | Operator concurrency, higher values might increase memory usage | -| envFrom | list | `[]` | Pod environment variable sources | -| fullnameOverride | string | `""` | String to override release name | -| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| image.repository | string | `"docker.io/kubeflow/spark-operator"` | Image repository | -| image.tag | string | `""` | if set, override the image tag whose default is the chart appVersion. | -| imagePullSecrets | list | `[]` | Image pull secrets | -| ingressUrlFormat | string | `""` | Ingress URL format. Requires the UI service to be enabled by setting `uiService.enable` to true. | -| istio.enabled | bool | `false` | When using `istio`, spark jobs need to run without a sidecar to properly terminate | -| labelSelectorFilter | string | `""` | A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. | -| leaderElection.lockName | string | `"spark-operator-lock"` | Leader election lock name. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. | -| leaderElection.lockNamespace | string | `""` | Optionally store the lock in another namespace. Defaults to operator's namespace | -| logLevel | int | `2` | Set higher levels for more verbose logging | -| metrics.enable | bool | `true` | Enable prometheus metric scraping | -| metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint | -| metrics.port | int | `10254` | Metrics port | -| metrics.portName | string | `"metrics"` | Metrics port name | -| metrics.prefix | string | `""` | Metric prefix, will be added to all exported metrics | -| nameOverride | string | `""` | String to partially override `spark-operator.fullname` template (will maintain the release name) | -| nodeSelector | object | `{}` | Node labels for pod assignment | -| podAnnotations | object | `{}` | Additional annotations to add to the pod | -| podLabels | object | `{}` | Additional labels to add to the pod | -| podMonitor | object | `{"enable":false,"jobLabel":"spark-operator-podmonitor","labels":{},"podMetricsEndpoint":{"interval":"5s","scheme":"http"}}` | Prometheus pod monitor for operator's pod. | -| podMonitor.enable | bool | `false` | If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well. | -| podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from | -| podMonitor.labels | object | `{}` | Pod monitor labels | -| podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port | -| podSecurityContext | object | `{}` | Pod security context | -| priorityClassName | string | `""` | A priority class to be used for running spark-operator pod. | -| rbac.annotations | object | `{}` | Optional annotations for rbac | -| rbac.create | bool | `false` | **DEPRECATED** use `createRole` and `createClusterRole` | -| rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources | -| rbac.createRole | bool | `true` | Create and use RBAC `Role` resources | -| replicaCount | int | `1` | Desired number of pods, leaderElection will be enabled if this is greater than 1 | -| resourceQuotaEnforcement.enable | bool | `false` | Whether to enable the ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled by setting `webhook.enable` to true. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. | -| resources | object | `{}` | Pod resource requests and limits Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. | -| resyncInterval | int | `30` | Operator resync interval. Note that the operator will respond to events (e.g. create, update) unrelated to this setting | -| securityContext | object | `{}` | Operator container security context | -| serviceAccounts.spark.annotations | object | `{}` | Optional annotations for the spark service account | -| serviceAccounts.spark.create | bool | `true` | Create a service account for spark apps | -| serviceAccounts.spark.name | string | `""` | Optional name for the spark service account | -| serviceAccounts.sparkoperator.annotations | object | `{}` | Optional annotations for the operator service account | -| serviceAccounts.sparkoperator.create | bool | `true` | Create a service account for the operator | -| serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account | -| sidecars | list | `[]` | Sidecar containers | -| sparkJobNamespaces | list | `[""]` | List of namespaces where to run spark jobs | -| tolerations | list | `[]` | List of node taints to tolerate | -| uiService.enable | bool | `true` | Enable UI service creation for Spark application | -| volumeMounts | list | `[]` | | -| volumes | list | `[]` | | -| webhook.enable | bool | `false` | Enable webhook server | -| webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces | -| webhook.objectSelector | string | `""` | The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2). Empty string (default) will operate on all objects | -| webhook.port | int | `8080` | Webhook service port | -| webhook.portName | string | `"webhook"` | Webhook container port name and service target port name | -| webhook.timeout | int | `30` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade | +| Key | Type | Default | Description | +|-------------------------------------------|--------|------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| affinity | object | `{}` | Affinity for pod assignment | +| batchScheduler.enable | bool | `false` | Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application | +| commonLabels | object | `{}` | Common labels to add to the resources | +| controllerThreads | int | `10` | Operator concurrency, higher values might increase memory usage | +| envFrom | list | `[]` | Pod environment variable sources | +| fullnameOverride | string | `""` | String to override release name | +| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| image.repository | string | `"docker.io/kubeflow/spark-operator"` | Image repository | +| image.tag | string | `""` | if set, override the image tag whose default is the chart appVersion. | +| imagePullSecrets | list | `[]` | Image pull secrets | +| ingressUrlFormat | string | `""` | Ingress URL format. Requires the UI service to be enabled by setting `uiService.enable` to true. | +| istio.enabled | bool | `false` | When using `istio`, spark jobs need to run without a sidecar to properly terminate | +| labelSelectorFilter | string | `""` | A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. | +| leaderElection.lockName | string | `"spark-operator-lock"` | Leader election lock name. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. | +| leaderElection.lockNamespace | string | `""` | Optionally store the lock in another namespace. Defaults to operator's namespace | +| logLevel | int | `2` | Set higher levels for more verbose logging | +| metrics.enable | bool | `true` | Enable prometheus metric scraping | +| metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint | +| metrics.port | int | `10254` | Metrics port | +| metrics.portName | string | `"metrics"` | Metrics port name | +| metrics.prefix | string | `""` | Metric prefix, will be added to all exported metrics | +| nameOverride | string | `""` | String to partially override `spark-operator.fullname` template (will maintain the release name) | +| nodeSelector | object | `{}` | Node labels for pod assignment | +| podDisruptionBudget.enabled | bool | `false` | Whether to deploy a PodDisruptionBudget | +| podDisruptionBudget.minAvailable | int | `1` | An eviction is allowed if at least "minAvailable" pods selected by "selector" will still be available after the eviction | +| podAnnotations | object | `{}` | Additional annotations to add to the pod | +| podLabels | object | `{}` | Additional labels to add to the pod | +| podMonitor | object | `{"enable":false,"jobLabel":"spark-operator-podmonitor","labels":{},"podMetricsEndpoint":{"interval":"5s","scheme":"http"}}` | Prometheus pod monitor for operator's pod. | +| podMonitor.enable | bool | `false` | If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well. | +| podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from | +| podMonitor.labels | object | `{}` | Pod monitor labels | +| podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port | +| podSecurityContext | object | `{}` | Pod security context | +| priorityClassName | string | `""` | A priority class to be used for running spark-operator pod. | +| rbac.annotations | object | `{}` | Optional annotations for rbac | +| rbac.create | bool | `false` | **DEPRECATED** use `createRole` and `createClusterRole` | +| rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources | +| rbac.createRole | bool | `true` | Create and use RBAC `Role` resources | +| replicaCount | int | `1` | Desired number of pods, leaderElection will be enabled if this is greater than 1 | +| resourceQuotaEnforcement.enable | bool | `false` | Whether to enable the ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled by setting `webhook.enable` to true. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. | +| resources | object | `{}` | Pod resource requests and limits Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. | +| resyncInterval | int | `30` | Operator resync interval. Note that the operator will respond to events (e.g. create, update) unrelated to this setting | +| securityContext | object | `{}` | Operator container security context | +| serviceAccounts.spark.annotations | object | `{}` | Optional annotations for the spark service account | +| serviceAccounts.spark.create | bool | `true` | Create a service account for spark apps | +| serviceAccounts.spark.name | string | `""` | Optional name for the spark service account | +| serviceAccounts.sparkoperator.annotations | object | `{}` | Optional annotations for the operator service account | +| serviceAccounts.sparkoperator.create | bool | `true` | Create a service account for the operator | +| serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account | +| sidecars | list | `[]` | Sidecar containers | +| sparkJobNamespaces | list | `[""]` | List of namespaces where to run spark jobs | +| tolerations | list | `[]` | List of node taints to tolerate | +| uiService.enable | bool | `true` | Enable UI service creation for Spark application | +| volumeMounts | list | `[]` | | +| volumes | list | `[]` | | +| webhook.enable | bool | `false` | Enable webhook server | +| webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces | +| webhook.objectSelector | string | `""` | The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2). Empty string (default) will operate on all objects | +| webhook.port | int | `8080` | Webhook service port | +| webhook.portName | string | `"webhook"` | Webhook container port name and service target port name | +| webhook.timeout | int | `30` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade | ## Maintainers diff --git a/charts/spark-operator-chart/templates/poddisruptionbudget.yaml b/charts/spark-operator-chart/templates/poddisruptionbudget.yaml new file mode 100644 index 000000000..317f8bdb9 --- /dev/null +++ b/charts/spark-operator-chart/templates/poddisruptionbudget.yaml @@ -0,0 +1,17 @@ +{{- if $.Values.podDisruptionBudget.enable }} +{{- if (gt (int $.Values.replicaCount) 1) }} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "spark-operator.fullname" . }}-pdb + labels: + {{- include "spark-operator.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "spark-operator.selectorLabels" . | nindent 6 }} + minAvailable: {{ $.Values.podDisruptionBudget.minAvailable }} +{{- else }} +{{- fail "replicaCount must be greater than 1 to enable PodDisruptionBudget" }} +{{- end }} +{{- end }} diff --git a/charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml b/charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml new file mode 100644 index 000000000..3f702fd10 --- /dev/null +++ b/charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml @@ -0,0 +1,37 @@ +suite: Test spark operator podDisruptionBudget + +templates: + - poddisruptionbudget.yaml + +release: + name: spark-operator + +tests: + - it: Should not render spark operator podDisruptionBudget if podDisruptionBudget.enable is false + set: + podDisruptionBudget: + enable: false + asserts: + - hasDocuments: + count: 0 + + - it: Should render spark operator podDisruptionBudget if podDisruptionBudget.enable is true + set: + podDisruptionBudget: + enable: true + documentIndex: 0 + asserts: + - containsDocument: + apiVersion: policy/v1 + kind: PodDisruptionBudget + name: spark-operator-podDisruptionBudget + + - it: Should set minAvailable from values + set: + podDisruptionBudget: + enable: true + minAvailable: 3 + asserts: + - equal: + path: spec.template.minAvailable + value: 3 diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index d9f63b645..6eefe666b 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -134,6 +134,15 @@ podMonitor: scheme: http interval: 5s +# -- podDisruptionBudget to avoid service degradation +podDisruptionBudget: + # -- Specifies whether to enable pod disruption budget. + # Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) + enable: false + # -- The number of pods that must be available. + # Require `replicaCount` to be greater than 1 + minAvailable: 1 + # nodeSelector -- Node labels for pod assignment nodeSelector: {}