From 3802141bcaf97fc6102cd7b40b19fe0c8caa4abe Mon Sep 17 00:00:00 2001 From: Razvan-Daniel Mihai <84674+razvan@users.noreply.github.com> Date: Mon, 25 Sep 2023 12:38:15 +0200 Subject: [PATCH] Use the CommonConfiguration structure for roles. (#277) * wip * Re-add pod overrides for the submit pod. * Unify driver and executor configurations. * Successful smoke tests. * Rename config structs. * smoke tests pass on kind. * Refactor, cleanup and split configuration between driver and executor again. * Update docs, examples and changelog. * fix typo * Move replicas under spec.executor.config in tests and examples. * Clean up tests. * Apply suggestions * Remove old node selector struct * Added module doc for roles.rs * Use RoleGroup for executors to make replicas on the same level as executor configuration. * Update tests with "replicas" directly under "executor". * Update docs/examples with "replicas" directly under "executor". * Update rust/crd/src/roles.rs Co-authored-by: Andrew Kenworthy * Implement review feedback. --------- Co-authored-by: Andrew Kenworthy --- CHANGELOG.md | 2 + Cargo.lock | 7 + Cargo.toml | 1 + deploy/config-spec/properties.yaml | 12 +- .../configs/properties.yaml | 12 +- deploy/helm/spark-k8s-operator/crds/crds.yaml | 2099 +++++++++-------- .../examples/example-history-app.yaml | 2 +- .../examples/example-sparkapp-configmap.yaml | 16 +- ...xample-sparkapp-external-dependencies.yaml | 16 +- .../examples/example-sparkapp-image.yaml | 41 +- .../examples/example-sparkapp-pvc.yaml | 16 +- .../examples/example-sparkapp-s3-private.yaml | 2 +- .../examples/example-sparkapp-streaming.yaml | 28 +- .../pages/usage-guide/resources.adoc | 40 +- .../ny-tlc-report-external-dependencies.yaml | 16 +- examples/ny-tlc-report-image.yaml | 2 +- examples/ny-tlc-report.yaml | 16 +- rust/crd/Cargo.toml | 1 + rust/crd/src/lib.rs | 891 +++---- rust/crd/src/roles.rs | 273 +++ .../operator-binary/src/history_controller.rs | 10 +- rust/operator-binary/src/main.rs | 37 +- .../src/pod_driver_controller.rs | 17 +- .../src/spark_k8s_controller.rs | 212 +- ...loy-automatic-log-config-spark-app.yaml.j2 | 145 +- ...deploy-custom-log-config-spark-app.yaml.j2 | 41 +- ...y-automatic-log-config-pyspark-app.yaml.j2 | 145 +- ...ploy-custom-log-config-pyspark-app.yaml.j2 | 41 +- .../pod_overrides/10-deploy-spark-app.yaml.j2 | 2 +- .../10-deploy-spark-app.yaml.j2 | 17 +- .../10-deploy-spark-app.yaml.j2 | 17 +- .../resources/10-deploy-spark-app.yaml.j2 | 53 +- .../resources/12-deploy-spark-app.yaml.j2 | 15 +- .../kuttl/smoke/10-deploy-spark-app.yaml.j2 | 17 +- .../10-deploy-spark-app.yaml.j2 | 17 +- .../10-deploy-spark-app.yaml.j2 | 17 +- .../12-deploy-spark-app.yaml.j2 | 17 +- .../10-deploy-spark-app.yaml.j2 | 29 +- .../10-deploy-spark-app.yaml.j2 | 17 +- .../10-deploy-spark-app.yaml.j2 | 17 +- 40 files changed, 2289 insertions(+), 2087 deletions(-) create mode 100644 rust/crd/src/roles.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 210bc9fb..157c2fc1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,12 +15,14 @@ All notable changes to this project will be documented in this file. - `operator-rs` `0.44.0` -> `0.48.0` ([#267], [#275]). - Removed usages of SPARK_DAEMON_JAVA_OPTS since it's not a reliable way to pass extra JVM options ([#272]). - [BREAKING] use product image selection instead of version ([#275]). +- BREAKING refactored application roles to use `CommonConfiguration` structures from the operator framework ([#277]). [#267]: https://github.com/stackabletech/spark-k8s-operator/pull/267 [#268]: https://github.com/stackabletech/spark-k8s-operator/pull/268 [#269]: https://github.com/stackabletech/spark-k8s-operator/pull/269 [#272]: https://github.com/stackabletech/spark-k8s-operator/pull/272 [#275]: https://github.com/stackabletech/spark-k8s-operator/pull/275 +[#277]: https://github.com/stackabletech/spark-k8s-operator/pull/277 ## [23.7.0] - 2023-07-14 diff --git a/Cargo.lock b/Cargo.lock index 8359580f..9ce13e3b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -860,6 +860,12 @@ dependencies = [ "hashbrown 0.14.0", ] +[[package]] +name = "indoc" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c785eefb63ebd0e33416dfcb8d6da0bf27ce752843a45632a67bf10d4d4b5c4" + [[package]] name = "instant" version = "0.1.12" @@ -1928,6 +1934,7 @@ dependencies = [ name = "stackable-spark-k8s-crd" version = "0.0.0-dev" dependencies = [ + "indoc", "rstest", "semver", "serde", diff --git a/Cargo.toml b/Cargo.toml index 9c7f39f2..b60d0cfc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,6 +26,7 @@ strum = { version = "0.25", features = ["derive"] } tokio = { version = "1.29", features = ["full"] } tracing = "0.1" tracing-futures = { version = "0.2", features = ["futures-03"] } +indoc = "2" # [patch."https://github.com/stackabletech/operator-rs.git"] # stackable-operator = { git = "https://github.com/stackabletech//operator-rs.git", branch = "main" } diff --git a/deploy/config-spec/properties.yaml b/deploy/config-spec/properties.yaml index 8bb23059..f25e830e 100644 --- a/deploy/config-spec/properties.yaml +++ b/deploy/config-spec/properties.yaml @@ -17,7 +17,11 @@ properties: - fromVersion: "0.0.0" value: "30" roles: - - name: "node" + - name: "submit" + required: true + - name: "driver" + required: true + - name: "executor" required: true asOfVersion: "0.0.0" comment: "History server - TTL for successfully resolved domain names." @@ -36,7 +40,11 @@ properties: - fromVersion: "0.0.0" value: "0" roles: - - name: "node" + - name: "submit" + required: true + - name: "driver" + required: true + - name: "executor" required: true asOfVersion: "0.0.0" comment: "History server - TTL for domain names that cannot be resolved." diff --git a/deploy/helm/spark-k8s-operator/configs/properties.yaml b/deploy/helm/spark-k8s-operator/configs/properties.yaml index 8bb23059..f25e830e 100644 --- a/deploy/helm/spark-k8s-operator/configs/properties.yaml +++ b/deploy/helm/spark-k8s-operator/configs/properties.yaml @@ -17,7 +17,11 @@ properties: - fromVersion: "0.0.0" value: "30" roles: - - name: "node" + - name: "submit" + required: true + - name: "driver" + required: true + - name: "executor" required: true asOfVersion: "0.0.0" comment: "History server - TTL for successfully resolved domain names." @@ -36,7 +40,11 @@ properties: - fromVersion: "0.0.0" value: "0" roles: - - name: "node" + - name: "submit" + required: true + - name: "driver" + required: true + - name: "executor" required: true asOfVersion: "0.0.0" comment: "History server - TTL for domain names that cannot be resolved." diff --git a/deploy/helm/spark-k8s-operator/crds/crds.yaml b/deploy/helm/spark-k8s-operator/crds/crds.yaml index 01c18cbd..0d863e08 100644 --- a/deploy/helm/spark-k8s-operator/crds/crds.yaml +++ b/deploy/helm/spark-k8s-operator/crds/crds.yaml @@ -56,147 +56,240 @@ spec: driver: nullable: true properties: - affinity: - default: - podAffinity: null - podAntiAffinity: null - nodeAffinity: null - nodeSelector: null + cliOverrides: + additionalProperties: + type: string + default: {} + type: object + config: + default: {} properties: - nodeAffinity: - description: Node affinity is a group of node affinity scheduling rules. - nullable: true + affinity: + default: + podAffinity: null + podAntiAffinity: null + nodeAffinity: null + nodeSelector: null properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. + nodeAffinity: + description: Node affinity is a group of node affinity scheduling rules. + nullable: true + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + nodeSelector: + nullable: true + type: object + podAffinity: + description: Pod affinity is a group of inter pod affinity scheduling rules. + nullable: true properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight type: object type: array - required: - - nodeSelectorTerms - type: object - type: object - nodeSelector: - nullable: true - type: object - podAffinity: - description: Pod affinity is a group of inter pod affinity scheduling rules. - nullable: true - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running properties: labelSelector: description: A label query over a set of resources, in this case pods. @@ -258,115 +351,115 @@ spec: description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: array + type: object + podAntiAffinity: + description: Pod anti affinity is a group of inter pod anti affinity scheduling rules. + nullable: true + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Pod anti affinity is a group of inter pod anti affinity scheduling rules. - nullable: true - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running properties: labelSelector: description: A label query over a set of resources, in this case pods. @@ -439,175 +532,161 @@ spec: required: - topologyKey type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: array + type: object + type: object + logging: + default: + enableVectorAgent: null + containers: {} + properties: + containers: + additionalProperties: + anyOf: + - required: + - custom + - {} + description: Fragment derived from `ContainerLogConfigChoice` properties: - labelSelector: - description: A label query over a set of resources, in this case pods. + console: + nullable: true properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + custom: + description: Custom log configuration provided in a ConfigMap properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: + configMap: + nullable: true + type: string + type: object + file: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + loggers: + additionalProperties: + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object + type: object + default: {} type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey type: object - type: array + type: object + enableVectorAgent: + nullable: true + type: boolean + type: object + resources: + default: + memory: + limit: null + runtimeLimits: {} + cpu: + min: null + max: null + storage: {} + properties: + cpu: + default: + min: null + max: null + properties: + max: + description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + nullable: true + type: string + min: + description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + nullable: true + type: string + type: object + memory: + properties: + limit: + description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + nullable: true + type: string + runtimeLimits: + type: object + type: object + storage: + type: object type: object + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + nullable: true + type: array type: object - jvmSecurity: + configOverrides: additionalProperties: - nullable: true - type: string + additionalProperties: + type: string + type: object default: {} type: object - logging: - default: - enableVectorAgent: null - containers: {} - properties: - containers: - additionalProperties: - anyOf: - - required: - - custom - - {} - description: Fragment derived from `ContainerLogConfigChoice` - properties: - console: - nullable: true - properties: - level: - description: Log levels - enum: - - TRACE - - DEBUG - - INFO - - WARN - - ERROR - - FATAL - - NONE - nullable: true - type: string - type: object - custom: - description: Custom log configuration provided in a ConfigMap - properties: - configMap: - nullable: true - type: string - type: object - file: - nullable: true - properties: - level: - description: Log levels - enum: - - TRACE - - DEBUG - - INFO - - WARN - - ERROR - - FATAL - - NONE - nullable: true - type: string - type: object - loggers: - additionalProperties: - properties: - level: - description: Log levels - enum: - - TRACE - - DEBUG - - INFO - - WARN - - ERROR - - FATAL - - NONE - nullable: true - type: string - type: object - default: {} - type: object - type: object - type: object - enableVectorAgent: - nullable: true - type: boolean + envOverrides: + additionalProperties: + type: string + default: {} type: object podOverrides: default: {} @@ -3401,70 +3480,6 @@ spec: type: array type: object type: object - resources: - default: - memory: - limit: null - runtimeLimits: {} - cpu: - min: null - max: null - storage: {} - properties: - cpu: - default: - min: null - max: null - properties: - max: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." - nullable: true - type: string - min: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." - nullable: true - type: string - type: object - memory: - properties: - limit: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." - nullable: true - type: string - runtimeLimits: - type: object - type: object - storage: - type: object - type: object - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume within a container. - properties: - mountPath: - description: Path within the container at which the volume should be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). - type: string - subPathExpr: - description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - nullable: true - type: array type: object env: items: @@ -3545,147 +3560,240 @@ spec: executor: nullable: true properties: - affinity: - default: - podAffinity: null - podAntiAffinity: null - nodeAffinity: null - nodeSelector: null + cliOverrides: + additionalProperties: + type: string + default: {} + type: object + config: + default: {} properties: - nodeAffinity: - description: Node affinity is a group of node affinity scheduling rules. - nullable: true + affinity: + default: + podAffinity: null + podAntiAffinity: null + nodeAffinity: null + nodeSelector: null properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. + nodeAffinity: + description: Node affinity is a group of node affinity scheduling rules. + nullable: true + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + nodeSelector: + nullable: true + type: object + podAffinity: + description: Pod affinity is a group of inter pod affinity scheduling rules. + nullable: true properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight type: object type: array - required: - - nodeSelectorTerms - type: object - type: object - nodeSelector: - nullable: true - type: object - podAffinity: - description: Pod affinity is a group of inter pod affinity scheduling rules. - nullable: true - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running properties: labelSelector: description: A label query over a set of resources, in this case pods. @@ -3758,104 +3866,104 @@ spec: required: - topologyKey type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: array + type: object + podAntiAffinity: + description: Pod anti affinity is a group of inter pod anti affinity scheduling rules. + nullable: true + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Pod anti affinity is a group of inter pod anti affinity scheduling rules. - nullable: true - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running properties: labelSelector: description: A label query over a set of resources, in this case pods. @@ -3928,185 +4036,161 @@ spec: required: - topologyKey type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: array + type: object + type: object + logging: + default: + enableVectorAgent: null + containers: {} + properties: + containers: + additionalProperties: + anyOf: + - required: + - custom + - {} + description: Fragment derived from `ContainerLogConfigChoice` properties: - labelSelector: - description: A label query over a set of resources, in this case pods. + console: + nullable: true properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + custom: + description: Custom log configuration provided in a ConfigMap properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: + configMap: + nullable: true + type: string + type: object + file: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + loggers: + additionalProperties: + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object + type: object + default: {} type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey type: object - type: array + type: object + enableVectorAgent: + nullable: true + type: boolean + type: object + resources: + default: + memory: + limit: null + runtimeLimits: {} + cpu: + min: null + max: null + storage: {} + properties: + cpu: + default: + min: null + max: null + properties: + max: + description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + nullable: true + type: string + min: + description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + nullable: true + type: string + type: object + memory: + properties: + limit: + description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + nullable: true + type: string + runtimeLimits: + type: object + type: object + storage: + type: object type: object - type: object - instances: - format: uint - minimum: 0.0 - nullable: true - type: integer - jvmSecurity: - additionalProperties: - nullable: true - type: string - default: {} - type: object - logging: - default: - enableVectorAgent: null - containers: {} - properties: - containers: - additionalProperties: - anyOf: - - required: - - custom - - {} - description: Fragment derived from `ContainerLogConfigChoice` + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume within a container. properties: - console: - nullable: true - properties: - level: - description: Log levels - enum: - - TRACE - - DEBUG - - INFO - - WARN - - ERROR - - FATAL - - NONE - nullable: true - type: string - type: object - custom: - description: Custom log configuration provided in a ConfigMap - properties: - configMap: - nullable: true - type: string - type: object - file: - nullable: true - properties: - level: - description: Log levels - enum: - - TRACE - - DEBUG - - INFO - - WARN - - ERROR - - FATAL - - NONE - nullable: true - type: string - type: object - loggers: - additionalProperties: - properties: - level: - description: Log levels - enum: - - TRACE - - DEBUG - - INFO - - WARN - - ERROR - - FATAL - - NONE - nullable: true - type: string - type: object - default: {} - type: object + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name type: object - type: object - enableVectorAgent: nullable: true - type: boolean + type: array + type: object + configOverrides: + additionalProperties: + additionalProperties: + type: string + type: object + default: {} type: object - nodeSelector: + envOverrides: additionalProperties: type: string - nullable: true + default: {} type: object podOverrides: default: {} @@ -6900,70 +6984,42 @@ spec: type: array type: object type: object - resources: - default: - memory: - limit: null - runtimeLimits: {} - cpu: - min: null - max: null - storage: {} + replicas: + format: uint16 + minimum: 0.0 + nullable: true + type: integer + selector: + description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. + nullable: true properties: - cpu: - default: - min: null - max: null - properties: - max: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." - nullable: true - type: string - min: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." - nullable: true - type: string - type: object - memory: - properties: - limit: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." - nullable: true - type: string - runtimeLimits: - type: object - type: object - storage: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume within a container. - properties: - mountPath: - description: Path within the container at which the volume should be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). - type: string - subPathExpr: - description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - nullable: true - type: array type: object image: nullable: true @@ -6971,81 +7027,138 @@ spec: job: nullable: true properties: - logging: - default: - enableVectorAgent: null - containers: {} + cliOverrides: + additionalProperties: + type: string + default: {} + type: object + config: + default: {} properties: - containers: - additionalProperties: - anyOf: - - required: - - custom - - {} - description: Fragment derived from `ContainerLogConfigChoice` - properties: - console: - nullable: true - properties: - level: - description: Log levels - enum: - - TRACE - - DEBUG - - INFO - - WARN - - ERROR - - FATAL - - NONE - nullable: true - type: string - type: object - custom: - description: Custom log configuration provided in a ConfigMap + logging: + default: + enableVectorAgent: null + containers: {} + properties: + containers: + additionalProperties: + anyOf: + - required: + - custom + - {} + description: Fragment derived from `ContainerLogConfigChoice` properties: - configMap: + console: nullable: true - type: string - type: object - file: - nullable: true - properties: - level: - description: Log levels - enum: - - TRACE - - DEBUG - - INFO - - WARN - - ERROR - - FATAL - - NONE + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + custom: + description: Custom log configuration provided in a ConfigMap + properties: + configMap: + nullable: true + type: string + type: object + file: nullable: true - type: string + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + loggers: + additionalProperties: + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + default: {} + type: object type: object - loggers: - additionalProperties: - properties: - level: - description: Log levels - enum: - - TRACE - - DEBUG - - INFO - - WARN - - ERROR - - FATAL - - NONE - nullable: true - type: string + type: object + enableVectorAgent: + nullable: true + type: boolean + type: object + resources: + default: + memory: + limit: null + runtimeLimits: {} + cpu: + min: null + max: null + storage: {} + properties: + cpu: + default: + min: null + max: null + properties: + max: + description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + nullable: true + type: string + min: + description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + nullable: true + type: string + type: object + memory: + properties: + limit: + description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + nullable: true + type: string + runtimeLimits: type: object - default: {} - type: object - type: object + type: object + storage: + type: object type: object - enableVectorAgent: - nullable: true - type: boolean + type: object + configOverrides: + additionalProperties: + additionalProperties: + type: string + type: object + default: {} + type: object + envOverrides: + additionalProperties: + type: string + default: {} type: object podOverrides: default: {} @@ -9839,42 +9952,6 @@ spec: type: array type: object type: object - resources: - default: - memory: - limit: null - runtimeLimits: {} - cpu: - min: null - max: null - storage: {} - properties: - cpu: - default: - min: null - max: null - properties: - max: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." - nullable: true - type: string - min: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." - nullable: true - type: string - type: object - memory: - properties: - limit: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." - nullable: true - type: string - runtimeLimits: - type: object - type: object - storage: - type: object - type: object type: object logFileDirectory: nullable: true diff --git a/docs/modules/spark-k8s/examples/example-history-app.yaml b/docs/modules/spark-k8s/examples/example-history-app.yaml index 2323ead9..825415b7 100644 --- a/docs/modules/spark-k8s/examples/example-history-app.yaml +++ b/docs/modules/spark-k8s/examples/example-history-app.yaml @@ -32,4 +32,4 @@ spec: credentials: secretClass: history-credentials-class # <6> executor: - instances: 1 + replicas: 1 diff --git a/docs/modules/spark-k8s/examples/example-sparkapp-configmap.yaml b/docs/modules/spark-k8s/examples/example-sparkapp-configmap.yaml index 6948e67a..ee49c0db 100644 --- a/docs/modules/spark-k8s/examples/example-sparkapp-configmap.yaml +++ b/docs/modules/spark-k8s/examples/example-sparkapp-configmap.yaml @@ -20,11 +20,13 @@ spec: sparkConf: "spark.hadoop.fs.s3a.aws.credentials.provider": "org.apache.hadoop.fs.s3a.AnonymousAWSCredentialsProvider" driver: - volumeMounts: - - name: cm-job-arguments # <6> - mountPath: /arguments # <7> + config: + volumeMounts: + - name: cm-job-arguments # <6> + mountPath: /arguments # <7> executor: - instances: 3 - volumeMounts: - - name: cm-job-arguments # <6> - mountPath: /arguments # <7> + replicas: 3 + config: + volumeMounts: + - name: cm-job-arguments # <6> + mountPath: /arguments # <7> diff --git a/docs/modules/spark-k8s/examples/example-sparkapp-external-dependencies.yaml b/docs/modules/spark-k8s/examples/example-sparkapp-external-dependencies.yaml index 45838630..348b8a20 100644 --- a/docs/modules/spark-k8s/examples/example-sparkapp-external-dependencies.yaml +++ b/docs/modules/spark-k8s/examples/example-sparkapp-external-dependencies.yaml @@ -24,11 +24,13 @@ spec: persistentVolumeClaim: claimName: pvc-ksv driver: - volumeMounts: - - name: job-deps - mountPath: /dependencies # <6> + config: + volumeMounts: + - name: job-deps + mountPath: /dependencies # <6> executor: - instances: 3 - volumeMounts: - - name: job-deps - mountPath: /dependencies # <6> + replicas: 3 + config: + volumeMounts: + - name: job-deps + mountPath: /dependencies # <6> diff --git a/docs/modules/spark-k8s/examples/example-sparkapp-image.yaml b/docs/modules/spark-k8s/examples/example-sparkapp-image.yaml index 0d74f282..2671d519 100644 --- a/docs/modules/spark-k8s/examples/example-sparkapp-image.yaml +++ b/docs/modules/spark-k8s/examples/example-sparkapp-image.yaml @@ -19,24 +19,27 @@ spec: sparkConf: # <5> "spark.hadoop.fs.s3a.aws.credentials.provider": "org.apache.hadoop.fs.s3a.AnonymousAWSCredentialsProvider" job: - resources: - cpu: - min: "1" - max: "1" - memory: - limit: "1Gi" + config: + resources: + cpu: + min: "1" + max: "1" + memory: + limit: "1Gi" driver: - resources: - cpu: - min: "1" - max: "1500m" - memory: - limit: "1Gi" + config: + resources: + cpu: + min: "1" + max: "1500m" + memory: + limit: "1Gi" executor: - instances: 3 - resources: - cpu: - min: "1" - max: "4" - memory: - limit: "2Gi" + replicas: 3 + config: + resources: + cpu: + min: "1" + max: "4" + memory: + limit: "2Gi" diff --git a/docs/modules/spark-k8s/examples/example-sparkapp-pvc.yaml b/docs/modules/spark-k8s/examples/example-sparkapp-pvc.yaml index 120e09b6..1beb37ff 100644 --- a/docs/modules/spark-k8s/examples/example-sparkapp-pvc.yaml +++ b/docs/modules/spark-k8s/examples/example-sparkapp-pvc.yaml @@ -22,11 +22,13 @@ spec: persistentVolumeClaim: claimName: pvc-ksv driver: - volumeMounts: - - name: job-deps - mountPath: /dependencies # <5> + config: + volumeMounts: + - name: job-deps + mountPath: /dependencies # <5> executor: - instances: 3 - volumeMounts: - - name: job-deps - mountPath: /dependencies # <5> + replicas: 3 + config: + volumeMounts: + - name: job-deps + mountPath: /dependencies # <5> diff --git a/docs/modules/spark-k8s/examples/example-sparkapp-s3-private.yaml b/docs/modules/spark-k8s/examples/example-sparkapp-s3-private.yaml index e3c013e0..bf65af2e 100644 --- a/docs/modules/spark-k8s/examples/example-sparkapp-s3-private.yaml +++ b/docs/modules/spark-k8s/examples/example-sparkapp-s3-private.yaml @@ -22,4 +22,4 @@ spec: spark.driver.extraClassPath: "/dependencies/jars/hadoop-aws-3.2.0.jar:/dependencies/jars/aws-java-sdk-bundle-1.11.375.jar" spark.executor.extraClassPath: "/dependencies/jars/hadoop-aws-3.2.0.jar:/dependencies/jars/aws-java-sdk-bundle-1.11.375.jar" executor: - instances: 3 + replicas: 3 diff --git a/docs/modules/spark-k8s/examples/example-sparkapp-streaming.yaml b/docs/modules/spark-k8s/examples/example-sparkapp-streaming.yaml index 2cf0d96a..06b4ab90 100644 --- a/docs/modules/spark-k8s/examples/example-sparkapp-streaming.yaml +++ b/docs/modules/spark-k8s/examples/example-sparkapp-streaming.yaml @@ -17,17 +17,19 @@ spec: spark.kubernetes.driver.pod.name: "pyspark-streaming-driver" spark.kubernetes.executor.podNamePrefix: "pyspark-streaming" driver: - resources: - cpu: - min: "1" - max: "2" - memory: - limit: "1Gi" + config: + resources: + cpu: + min: "1" + max: "2" + memory: + limit: "1Gi" executor: - instances: 1 - resources: - cpu: - min: "1700m" - max: "3" - memory: - limit: "2Gi" + replicas: 1 + config: + resources: + cpu: + min: "1700m" + max: "3" + memory: + limit: "2Gi" diff --git a/docs/modules/spark-k8s/pages/usage-guide/resources.adoc b/docs/modules/spark-k8s/pages/usage-guide/resources.adoc index 58f76e69..e1ecc929 100644 --- a/docs/modules/spark-k8s/pages/usage-guide/resources.adoc +++ b/docs/modules/spark-k8s/pages/usage-guide/resources.adoc @@ -7,26 +7,29 @@ If no resources are configured explicitly, the operator uses the following defau [source,yaml] ---- job: - resources: - cpu: - min: '100m' - max: "400m" - memory: - limit: '512Mi' + config: + resources: + cpu: + min: '100m' + max: "400m" + memory: + limit: '512Mi' driver: - resources: - cpu: - min: '250m' - max: "1" - memory: - limit: '1Gi' + config: + resources: + cpu: + min: '250m' + max: "1" + memory: + limit: '1Gi' executor: - resources: - cpu: - min: '250m' - max: "1" - memory: - limit: '4Gi' + config: + resources: + cpu: + min: '250m' + max: "1" + memory: + limit: '4Gi' ---- For `SparkHistoryServer`s the following defaults are used: @@ -48,4 +51,3 @@ For more details regarding Kubernetes CPU limits see: https://kubernetes.io/docs Spark allocates a default amount of non-heap memory based on the type of job (JVM or non-JVM). This is taken into account when defining memory settings based exclusively on the resource limits, so that the "declared" value is the actual total value (i.e. including memory overhead). This may result in minor deviations from the stated resource value due to rounding differences. NOTE: It is possible to define Spark resources either directly by setting configuration properties listed under `sparkConf`, or by using resource limits. If both are used, then `sparkConf` properties take precedence. It is recommended for the sake of clarity to use *_either_* one *_or_* the other. - diff --git a/examples/ny-tlc-report-external-dependencies.yaml b/examples/ny-tlc-report-external-dependencies.yaml index 824fa8cf..c2c9b516 100644 --- a/examples/ny-tlc-report-external-dependencies.yaml +++ b/examples/ny-tlc-report-external-dependencies.yaml @@ -30,11 +30,13 @@ spec: persistentVolumeClaim: claimName: pvc-ksv driver: - volumeMounts: - - name: job-deps - mountPath: /dependencies + config: + volumeMounts: + - name: job-deps + mountPath: /dependencies executor: - instances: 3 - volumeMounts: - - name: job-deps - mountPath: /dependencies + config: + replicas: 3 + volumeMounts: + - name: job-deps + mountPath: /dependencies diff --git a/examples/ny-tlc-report-image.yaml b/examples/ny-tlc-report-image.yaml index d5b74d23..a5c16feb 100644 --- a/examples/ny-tlc-report-image.yaml +++ b/examples/ny-tlc-report-image.yaml @@ -25,4 +25,4 @@ spec: sparkConf: spark.hadoop.fs.s3a.aws.credentials.provider: "org.apache.hadoop.fs.s3a.AnonymousAWSCredentialsProvider" executor: - instances: 3 + replicas: 3 diff --git a/examples/ny-tlc-report.yaml b/examples/ny-tlc-report.yaml index c4c11cc4..c4fb8e44 100644 --- a/examples/ny-tlc-report.yaml +++ b/examples/ny-tlc-report.yaml @@ -32,11 +32,13 @@ spec: sparkConf: spark.hadoop.fs.s3a.aws.credentials.provider: "org.apache.hadoop.fs.s3a.AnonymousAWSCredentialsProvider" driver: - volumeMounts: - - name: cm-job-arguments - mountPath: /arguments + config: + volumeMounts: + - name: cm-job-arguments + mountPath: /arguments executor: - instances: 3 - volumeMounts: - - name: cm-job-arguments - mountPath: /arguments + replicas: 3 + config: + volumeMounts: + - name: cm-job-arguments + mountPath: /arguments diff --git a/rust/crd/Cargo.toml b/rust/crd/Cargo.toml index 8153a6e6..8326aa55 100644 --- a/rust/crd/Cargo.toml +++ b/rust/crd/Cargo.toml @@ -20,3 +20,4 @@ tracing.workspace = true [dev-dependencies] rstest.workspace = true serde_yaml.workspace = true +indoc.workspace = true diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 515df0b4..7485f7c9 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -3,38 +3,29 @@ pub mod affinity; pub mod constants; pub mod history; +pub mod roles; pub mod s3logdir; pub mod tlscerts; -use std::{ - cmp::max, - collections::{BTreeMap, HashMap}, - slice, -}; - +pub use crate::roles::*; use constants::*; use history::LogFileDirectorySpec; use s3logdir::S3LogDir; - use serde::{Deserialize, Serialize}; use snafu::{OptionExt, ResultExt, Snafu}; +use stackable_operator::product_config::ProductConfigManager; +use stackable_operator::product_config_utils::{ + transform_all_roles_to_config, validate_all_roles_and_groups_config, + ValidatedRoleConfigByPropertyKind, +}; use stackable_operator::{ builder::{SecretOperatorVolumeSourceBuilder, VolumeBuilder}, commons::{ - affinity::{StackableAffinity, StackableAffinityFragment}, - product_image_selection::ProductImage, - resources::{ - CpuLimits, CpuLimitsFragment, MemoryLimits, MemoryLimitsFragment, NoRuntimeLimits, - NoRuntimeLimitsFragment, Resources, ResourcesFragment, - }, + product_image_selection::{ProductImage, ResolvedProductImage}, + resources::{CpuLimits, MemoryLimits, Resources}, s3::{S3AccessStyle, S3ConnectionDef, S3ConnectionSpec}, }, - config::{ - fragment, - fragment::Fragment, - fragment::ValidationError, - merge::{Atomic, Merge}, - }, + config::{fragment, fragment::ValidationError, merge::Merge}, k8s_openapi::{ api::core::v1::{EmptyDirVolumeSource, EnvVar, PodTemplateSpec, Volume, VolumeMount}, apimachinery::pkg::api::resource::Quantity, @@ -42,11 +33,15 @@ use stackable_operator::{ kube::{CustomResource, ResourceExt}, labels::ObjectLabels, memory::{BinaryMultiple, MemoryQuantity}, - product_logging::{self, spec::Logging}, - role_utils::pod_overrides_schema, + product_config::types::PropertyNameKind, + product_logging, + role_utils::{CommonConfiguration, Role, RoleGroup}, schemars::{self, JsonSchema}, }; -use strum::{Display, EnumIter}; +use std::{ + cmp::max, + collections::{BTreeMap, HashMap}, +}; #[derive(Snafu, Debug)] pub enum Error { @@ -69,13 +64,14 @@ pub enum Error { FailedParseToFloatConversion, #[snafu(display("fragment validation failure"))] FragmentValidationFailure { source: ValidationError }, -} - -#[derive(Clone, Debug, Deserialize, Display, Eq, PartialEq, Serialize, JsonSchema)] -#[strum(serialize_all = "kebab-case")] -pub enum SparkApplicationRole { - Driver, - Executor, + #[snafu(display("failed to transform configs"))] + ProductConfigTransform { + source: stackable_operator::product_config_utils::ConfigError, + }, + #[snafu(display("invalid product config"))] + InvalidProductConfig { + source: stackable_operator::error::Error, + }, } #[derive(Clone, Debug, Deserialize, PartialEq, Serialize, JsonSchema)] @@ -85,69 +81,6 @@ pub struct SparkApplicationStatus { pub phase: String, } -#[derive(Clone, Debug, Default, JsonSchema, PartialEq, Fragment)] -#[allow(clippy::derive_partial_eq_without_eq)] -#[fragment_attrs( - derive( - Clone, - Debug, - Default, - Deserialize, - Merge, - JsonSchema, - PartialEq, - Serialize, - ), - allow(clippy::derive_partial_eq_without_eq), - serde(rename_all = "camelCase") -)] -pub struct SparkStorageConfig {} - -#[derive(Clone, Debug, Default, Fragment, JsonSchema, PartialEq)] -#[fragment_attrs( - derive( - Clone, - Debug, - Default, - Deserialize, - Merge, - JsonSchema, - PartialEq, - Serialize - ), - serde(rename_all = "camelCase") -)] -pub struct SparkConfig { - #[fragment_attrs(serde(default))] - pub resources: Resources, - #[fragment_attrs(serde(default))] - pub logging: Logging, - #[fragment_attrs(serde(default))] - #[fragment_attrs(schemars(schema_with = "pod_overrides_schema"))] - pub pod_overrides: PodTemplateSpec, -} - -impl SparkConfig { - /// The resources requested here are applied to the spark-submit Pod. - fn default_config() -> SparkConfigFragment { - SparkConfigFragment { - resources: ResourcesFragment { - cpu: CpuLimitsFragment { - min: Some(Quantity("100m".to_owned())), - max: Some(Quantity("400m".to_owned())), - }, - memory: MemoryLimitsFragment { - limit: Some(Quantity("512Mi".to_owned())), - runtime_limits: NoRuntimeLimitsFragment {}, - }, - storage: SparkStorageConfigFragment {}, - }, - logging: product_logging::spec::default_logging(), - pod_overrides: PodTemplateSpec::default(), - } - } -} - #[derive(Clone, CustomResource, Debug, Deserialize, JsonSchema, Serialize)] #[kube( group = "spark.stackable.tech", @@ -180,11 +113,11 @@ pub struct SparkApplicationSpec { #[serde(skip_serializing_if = "Option::is_none")] pub vector_aggregator_config_map_name: Option, #[serde(default, skip_serializing_if = "Option::is_none")] - pub job: Option, + pub job: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] - pub driver: Option, + pub driver: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] - pub executor: Option, + pub executor: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] pub stopped: Option, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -347,26 +280,6 @@ impl SparkApplication { self.add_common_volume_mounts(volume_mounts, s3conn, s3logdir) } - pub fn executor_volume_mounts( - &self, - config: &ExecutorConfig, - s3conn: &Option, - s3logdir: &Option, - ) -> Vec { - let volume_mounts = config.volume_mounts.clone().unwrap_or_default().into(); - self.add_common_volume_mounts(volume_mounts, s3conn, s3logdir) - } - - pub fn driver_volume_mounts( - &self, - config: &DriverConfig, - s3conn: &Option, - s3logdir: &Option, - ) -> Vec { - let volume_mounts = config.volume_mounts.clone().unwrap_or_default().into(); - self.add_common_volume_mounts(volume_mounts, s3conn, s3logdir) - } - fn add_common_volume_mounts( &self, mut mounts: Vec, @@ -573,13 +486,12 @@ impl SparkApplication { &mut submit_conf, )?; - if let Some(executors) = &self.spec.executor { - if let Some(instances) = executors.instances { - submit_conf.insert( - "spark.executor.instances".to_string(), - instances.to_string(), - ); - } + if let Some(RoleGroup { + replicas: Some(replicas), + .. + }) = &self.spec.executor + { + submit_conf.insert("spark.executor.instances".to_string(), replicas.to_string()); } if let Some(log_dir) = s3_log_dir { @@ -640,22 +552,154 @@ impl SparkApplication { e } - pub fn job_config(&self) -> Result { - let mut config = self.spec.job.clone().unwrap_or_default(); - config.merge(&SparkConfig::default_config()); - fragment::validate(config).context(FragmentValidationFailureSnafu) + pub fn submit_config(&self) -> Result { + if let Some(CommonConfiguration { mut config, .. }) = self.spec.job.clone() { + config.merge(&SubmitConfig::default_config()); + fragment::validate(config).context(FragmentValidationFailureSnafu) + } else { + fragment::validate(SubmitConfig::default_config()) + .context(FragmentValidationFailureSnafu) + } } - pub fn driver_config(&self) -> Result { - let mut config = self.spec.driver.clone().unwrap_or_default(); - config.merge(&DriverConfig::default_config()); - fragment::validate(config).context(FragmentValidationFailureSnafu) + pub fn driver_config(&self) -> Result { + if let Some(CommonConfiguration { mut config, .. }) = self.spec.driver.clone() { + config.merge(&RoleConfig::default_config()); + fragment::validate(config).context(FragmentValidationFailureSnafu) + } else { + fragment::validate(RoleConfig::default_config()).context(FragmentValidationFailureSnafu) + } } - pub fn executor_config(&self) -> Result { - let mut config = self.spec.executor.clone().unwrap_or_default(); - config.merge(&ExecutorConfig::default_config()); - fragment::validate(config).context(FragmentValidationFailureSnafu) + pub fn executor_config(&self) -> Result { + if let Some(RoleGroup { + config: CommonConfiguration { mut config, .. }, + .. + }) = self.spec.executor.clone() + { + config.merge(&RoleConfig::default_config()); + fragment::validate(config).context(FragmentValidationFailureSnafu) + } else { + fragment::validate(RoleConfig::default_config()).context(FragmentValidationFailureSnafu) + } + } + + pub fn pod_overrides(&self, role: SparkApplicationRole) -> Option { + match role { + SparkApplicationRole::Submit => self.spec.job.clone().map(|j| j.pod_overrides), + SparkApplicationRole::Driver => self.spec.driver.clone().map(|d| d.pod_overrides), + SparkApplicationRole::Executor => { + self.spec.executor.clone().map(|r| r.config.pod_overrides) + } + } + } + + pub fn validated_role_config( + &self, + resolved_product_image: &ResolvedProductImage, + product_config: &ProductConfigManager, + ) -> Result { + let submit_conf = if self.spec.job.is_some() { + self.spec.job.as_ref().unwrap().clone() + } else { + CommonConfiguration { + config: SubmitConfig::default_config(), + ..CommonConfiguration::default() + } + }; + + let driver_conf = if self.spec.driver.is_some() { + self.spec.driver.as_ref().unwrap().clone() + } else { + CommonConfiguration { + config: RoleConfig::default_config(), + ..CommonConfiguration::default() + } + }; + + let executor_conf = if self.spec.executor.is_some() { + self.spec.executor.as_ref().unwrap().clone() + } else { + RoleGroup { + replicas: Some(1), + config: CommonConfiguration { + config: RoleConfig::default_config(), + ..CommonConfiguration::default() + }, + selector: None, + } + }; + + let mut roles_to_validate = HashMap::new(); + roles_to_validate.insert( + SparkApplicationRole::Submit.to_string(), + ( + vec![ + PropertyNameKind::Env, + PropertyNameKind::File(JVM_SECURITY_PROPERTIES_FILE.to_string()), + ], + Role { + config: submit_conf.clone(), + role_groups: [( + "default".to_string(), + RoleGroup { + config: submit_conf, + replicas: Some(1), + selector: None, + }, + )] + .into(), + } + .erase(), + ), + ); + roles_to_validate.insert( + SparkApplicationRole::Driver.to_string(), + ( + vec![ + PropertyNameKind::Env, + PropertyNameKind::File(JVM_SECURITY_PROPERTIES_FILE.to_string()), + ], + Role { + config: driver_conf.clone(), + role_groups: [( + "default".to_string(), + RoleGroup { + config: driver_conf, + replicas: Some(1), + selector: None, + }, + )] + .into(), + } + .erase(), + ), + ); + roles_to_validate.insert( + SparkApplicationRole::Executor.to_string(), + ( + vec![ + PropertyNameKind::Env, + PropertyNameKind::File(JVM_SECURITY_PROPERTIES_FILE.to_string()), + ], + Role { + config: executor_conf.config.clone(), + role_groups: [("default".to_string(), executor_conf)].into(), + } + .erase(), + ), + ); + + let role_config = transform_all_roles_to_config(self, roles_to_validate); + + validate_all_roles_and_groups_config( + &resolved_product_image.product_version, + &role_config.context(ProductConfigTransformSnafu)?, + product_config, + false, + false, + ) + .context(InvalidProductConfigSnafu) } } @@ -729,7 +773,7 @@ fn subtract_spark_memory_overhead(for_java: bool, limit: &Quantity) -> Result, ) -> Result<(), Error> { if let Resources { @@ -781,7 +825,7 @@ fn resources_to_driver_props( /// Spark will use these and *ignore* the resource limits in pod templates entirely. fn resources_to_executor_props( for_java: bool, - executor_config: &ExecutorConfig, + executor_config: &RoleConfig, props: &mut BTreeMap, ) -> Result<(), Error> { if let Resources { @@ -832,428 +876,38 @@ fn resources_to_executor_props( Ok(()) } -#[derive(Clone, Debug, Default, Deserialize, JsonSchema, PartialEq, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct VolumeMounts { - pub volume_mounts: Option>, -} - -impl Atomic for VolumeMounts {} - -impl<'a> IntoIterator for &'a VolumeMounts { - type Item = &'a VolumeMount; - type IntoIter = slice::Iter<'a, VolumeMount>; - - fn into_iter(self) -> Self::IntoIter { - self.volume_mounts.as_deref().unwrap_or_default().iter() - } -} - -impl From for Vec { - fn from(value: VolumeMounts) -> Self { - value.volume_mounts.unwrap_or_default() - } -} - -#[derive(Clone, Debug, Default, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct NodeSelector { - pub node_selector: Option>, -} - -impl Atomic for NodeSelector {} - -#[derive(Clone, Debug, Default, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct CommonConfig { - pub secret: Option, - pub log_dir: Option, - pub max_port_retries: Option, -} - -#[derive( - Clone, - Debug, - Deserialize, - Display, - Eq, - EnumIter, - JsonSchema, - Ord, - PartialEq, - PartialOrd, - Serialize, -)] -#[serde(rename_all = "kebab-case")] -#[strum(serialize_all = "kebab-case")] -pub enum SubmitJobContainer { - SparkSubmit, - Vector, -} - -#[derive( - Clone, - Debug, - Deserialize, - Display, - Eq, - EnumIter, - JsonSchema, - Ord, - PartialEq, - PartialOrd, - Serialize, -)] -#[serde(rename_all = "kebab-case")] -#[strum(serialize_all = "kebab-case")] -pub enum SparkContainer { - Job, - Requirements, - Spark, - Vector, - Tls, -} - -#[derive(Clone, Debug, Default, Fragment, JsonSchema, PartialEq)] -#[fragment_attrs( - derive( - Clone, - Debug, - Default, - Deserialize, - Merge, - JsonSchema, - PartialEq, - Serialize - ), - serde(rename_all = "camelCase") -)] -pub struct DriverConfig { - #[fragment_attrs(serde(default))] - pub resources: Resources, - #[fragment_attrs(serde(default))] - pub logging: Logging, - #[fragment_attrs(serde(default, flatten))] - pub volume_mounts: Option, - #[fragment_attrs(serde(default))] - pub affinity: StackableAffinity, - #[fragment_attrs(serde(default))] - #[fragment_attrs(schemars(schema_with = "pod_overrides_schema"))] - pub pod_overrides: PodTemplateSpec, - #[fragment_attrs(serde(default))] - pub jvm_security: HashMap>, -} - -impl DriverConfig { - fn default_config() -> DriverConfigFragment { - DriverConfigFragment { - resources: ResourcesFragment { - cpu: CpuLimitsFragment { - min: Some(Quantity("250m".to_owned())), - max: Some(Quantity("1".to_owned())), - }, - memory: MemoryLimitsFragment { - limit: Some(Quantity("1Gi".to_owned())), - runtime_limits: NoRuntimeLimitsFragment {}, - }, - storage: SparkStorageConfigFragment {}, - }, - logging: product_logging::spec::default_logging(), - volume_mounts: Some(VolumeMounts::default()), - affinity: StackableAffinityFragment::default(), - pod_overrides: PodTemplateSpec::default(), - jvm_security: vec![ - ( - "networkaddress.cache.ttl".to_string(), - Some("30".to_string()), - ), - ( - "networkaddress.cache.negative.ttl".to_string(), - Some("0".to_string()), - ), - ] - .into_iter() - .collect(), - } - } -} - -#[derive(Clone, Debug, Default, Fragment, JsonSchema, PartialEq)] -#[fragment_attrs( - derive( - Clone, - Debug, - Default, - Deserialize, - Merge, - JsonSchema, - PartialEq, - Serialize - ), - serde(rename_all = "camelCase") -)] -pub struct ExecutorConfig { - #[fragment_attrs(serde(default))] - pub instances: Option, - #[fragment_attrs(serde(default))] - pub resources: Resources, - #[fragment_attrs(serde(default))] - pub logging: Logging, - #[fragment_attrs(serde(default, flatten))] - pub volume_mounts: Option, - #[fragment_attrs(serde(default, flatten))] - pub node_selector: Option, - #[fragment_attrs(serde(default))] - pub affinity: StackableAffinity, - #[fragment_attrs(serde(default))] - #[fragment_attrs(schemars(schema_with = "pod_overrides_schema"))] - pub pod_overrides: PodTemplateSpec, - #[fragment_attrs(serde(default))] - pub jvm_security: HashMap>, -} - -impl ExecutorConfig { - fn default_config() -> ExecutorConfigFragment { - ExecutorConfigFragment { - instances: None, - resources: ResourcesFragment { - cpu: CpuLimitsFragment { - min: Some(Quantity("250m".to_owned())), - max: Some(Quantity("1".to_owned())), - }, - memory: MemoryLimitsFragment { - limit: Some(Quantity("4Gi".to_owned())), - runtime_limits: NoRuntimeLimitsFragment {}, - }, - storage: SparkStorageConfigFragment {}, - }, - logging: product_logging::spec::default_logging(), - volume_mounts: Some(VolumeMounts::default()), - node_selector: Default::default(), - affinity: Default::default(), - pod_overrides: PodTemplateSpec::default(), - jvm_security: vec![ - ( - "networkaddress.cache.ttl".to_string(), - Some("30".to_string()), - ), - ( - "networkaddress.cache.negative.ttl".to_string(), - Some("0".to_string()), - ), - ] - .into_iter() - .collect(), - } - } -} - #[cfg(test)] mod tests { - use crate::DriverConfig; - use crate::{cores_from_quantity, resources_to_executor_props, ExecutorConfig}; + use crate::{cores_from_quantity, resources_to_executor_props, RoleConfig}; use crate::{resources_to_driver_props, SparkApplication}; use crate::{Quantity, SparkStorageConfig}; - use rstest::rstest; - use stackable_operator::builder::ObjectMetaBuilder; use stackable_operator::commons::affinity::StackableAffinity; use stackable_operator::commons::resources::{ CpuLimits, MemoryLimits, NoRuntimeLimits, Resources, }; - use stackable_operator::k8s_openapi::api::core::v1::PodTemplateSpec; + use stackable_operator::product_config::{types::PropertyNameKind, ProductConfigManager}; + use stackable_operator::product_config_utils::ValidatedRoleConfigByPropertyKind; use stackable_operator::product_logging::spec::Logging; - use std::collections::{BTreeMap, HashMap}; - #[test] - fn test_spark_examples_s3() { - let spark_application = serde_yaml::from_str::( - r#" ---- -apiVersion: spark.stackable.tech/v1alpha1 -kind: SparkApplication -metadata: - name: spark-examples-s3 -spec: - version: "1.0" - sparkImage: - productVersion: 3.4.0 - mode: cluster - mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: s3a://stackable-spark-k8s-jars/jobs/spark-examples.jar - sparkConf: - "spark.hadoop.fs.s3a.aws.credentials.provider": "org.apache.hadoop.fs.s3a.AnonymousAWSCredentialsProvider" - driver: - cores: 1 - coreLimit: "1200m" - memory: "512m" - executor: - cores: 1 - instances: 3 - memory: "512m" - config: - enableMonitoring: true - "#).unwrap(); - - assert_eq!("1.0", spark_application.spec.version.unwrap_or_default()); - assert_eq!( - Some("org.apache.spark.examples.SparkPi".to_string()), - spark_application.spec.main_class - ); - assert_eq!( - Some("s3a://stackable-spark-k8s-jars/jobs/spark-examples.jar".to_string()), - spark_application.spec.main_application_file - ); - assert_eq!( - Some(1), - spark_application.spec.spark_conf.map(|m| m.keys().len()) - ); - - assert!(spark_application.spec.mode.is_some()); - assert!(spark_application.spec.driver.is_some()); - assert!(spark_application.spec.executor.is_some()); - - assert!(spark_application.spec.args.is_none()); - assert!(spark_application.spec.deps.is_none()); - assert!(spark_application.spec.image.is_none()); - } - - #[test] - fn test_ny_tlc_report_image() { - let spark_application = serde_yaml::from_str::( - r#" ---- -apiVersion: spark.stackable.tech/v1alpha1 -kind: SparkApplication -metadata: - name: ny-tlc-report-image - namespace: my-ns -spec: - version: "1.0" - image: docker.stackable.tech/stackable/ny-tlc-report:0.1.0 - sparkImage: - productVersion: 3.2.1 - mode: cluster - mainApplicationFile: local:///stackable/spark/jobs/ny_tlc_report.py - args: - - "--input 's3a://nyc-tlc/trip data/yellow_tripdata_2021-07.csv'" - deps: - requirements: - - tabulate==0.8.9 - sparkConf: - "spark.hadoop.fs.s3a.aws.credentials.provider": "org.apache.hadoop.fs.s3a.AnonymousAWSCredentialsProvider" - driver: - cores: 1 - coreLimit: "1200m" - memory: "512m" - executor: - cores: 1 - instances: 3 - memory: "512m" - "#).unwrap(); - - assert_eq!("1.0", spark_application.spec.version.unwrap_or_default()); - assert_eq!( - Some("local:///stackable/spark/jobs/ny_tlc_report.py".to_string()), - spark_application.spec.main_application_file - ); - assert_eq!( - Some(1), - spark_application.spec.spark_conf.map(|m| m.keys().len()) - ); - - assert!(spark_application.spec.image.is_some()); - assert!(spark_application.spec.mode.is_some()); - assert!(spark_application.spec.args.is_some()); - assert!(spark_application.spec.deps.is_some()); - assert!(spark_application.spec.driver.is_some()); - assert!(spark_application.spec.executor.is_some()); - - assert!(spark_application.spec.main_class.is_none()); - } - - #[test] - fn test_ny_tlc_report_external_dependencies() { - let spark_application = serde_yaml::from_str::( - r#" ---- -apiVersion: spark.stackable.tech/v1alpha1 -kind: SparkApplication -metadata: - name: ny-tlc-report-external-dependencies - namespace: default - uid: 12345678asdfghj -spec: - version: "1.0" - sparkImage: - productVersion: 3.4.0 - mode: cluster - mainApplicationFile: s3a://stackable-spark-k8s-jars/jobs/ny_tlc_report.py - args: - - "--input 's3a://nyc-tlc/trip data/yellow_tripdata_2021-07.csv'" - deps: - requirements: - - tabulate==0.8.9 - sparkConf: - "spark.hadoop.fs.s3a.aws.credentials.provider": "org.apache.hadoop.fs.s3a.AnonymousAWSCredentialsProvider" - driver: - cores: 1 - coreLimit: "1200m" - memory: "512m" - executor: - cores: 1 - instances: 3 - memory: "512m" - "#).unwrap(); - - let meta = ObjectMetaBuilder::new() - .name_and_namespace(&spark_application) - .ownerreference_from_resource(&spark_application, None, Some(true)) - .unwrap() - .build(); - - assert_eq!("12345678asdfghj", meta.owner_references.unwrap()[0].uid); - - assert_eq!("1.0", spark_application.spec.version.unwrap_or_default()); - assert_eq!( - Some("s3a://stackable-spark-k8s-jars/jobs/ny_tlc_report.py".to_string()), - spark_application.spec.main_application_file - ); - assert_eq!( - Some(1), - spark_application.spec.spark_conf.map(|m| m.keys().len()) - ); - - assert!(spark_application.spec.mode.is_some()); - assert!(spark_application.spec.args.is_some()); - assert!(spark_application.spec.deps.is_some()); - assert!(spark_application.spec.driver.is_some()); - assert!(spark_application.spec.executor.is_some()); - - assert!(spark_application.spec.main_class.is_none()); - assert!(spark_application.spec.image.is_none()); - } + use indoc::indoc; + use rstest::rstest; + use std::collections::{BTreeMap, HashMap}; #[test] fn test_default_resource_limits() { - let spark_application = serde_yaml::from_str::( - r#" ---- -apiVersion: spark.stackable.tech/v1alpha1 -kind: SparkApplication -metadata: - name: spark-examples -spec: - sparkImage: - productVersion: 1.2.3 - executor: - instances: 1 - config: - enableMonitoring: true - "#, - ) + let spark_application = serde_yaml::from_str::(indoc! {" + --- + apiVersion: spark.stackable.tech/v1alpha1 + kind: SparkApplication + metadata: + name: spark-examples + spec: + sparkImage: + productVersion: 1.2.3 + "}) .unwrap(); - let job_resources = &spark_application.job_config().unwrap().resources; + let job_resources = &spark_application.submit_config().unwrap().resources; assert_eq!("100m", job_resources.cpu.min.as_ref().unwrap().0); assert_eq!("400m", job_resources.cpu.max.as_ref().unwrap().0); @@ -1268,44 +922,54 @@ spec: #[test] fn test_merged_resource_limits() { - let spark_application = serde_yaml::from_str::( - r#" ---- -apiVersion: spark.stackable.tech/v1alpha1 -kind: SparkApplication -metadata: - name: spark-examples -spec: - sparkImage: - productVersion: 1.2.3 - job: - resources: - cpu: - min: "100m" - max: "200m" - memory: - limit: "1G" - driver: - resources: - cpu: - min: "1" - max: "1300m" - memory: - limit: "512m" - executor: - instances: 1 - resources: - cpu: - min: "500m" - max: "1200m" - memory: - limit: "1Gi" - config: - enableMonitoring: true - "#, - ) + let spark_application = serde_yaml::from_str::(indoc! {r#" + --- + apiVersion: spark.stackable.tech/v1alpha1 + kind: SparkApplication + metadata: + name: spark-examples + spec: + sparkImage: + productVersion: 1.2.3 + job: + config: + resources: + cpu: + min: "100m" + max: "200m" + memory: + limit: "1G" + driver: + config: + resources: + cpu: + min: "1" + max: "1300m" + memory: + limit: "512m" + executor: + replicas: 10 + config: + resources: + cpu: + min: "500m" + max: "1200m" + memory: + limit: "1Gi" + "# }) .unwrap(); + assert_eq!( + "200m", + &spark_application + .submit_config() + .unwrap() + .resources + .cpu + .max + .unwrap() + .0 + ); assert_eq!( "1300m", &spark_application @@ -1343,7 +1007,7 @@ spec: #[test] fn test_resource_to_driver_props() { - let driver_config = DriverConfig { + let driver_config = RoleConfig { resources: Resources { memory: MemoryLimits { limit: Some(Quantity("128Mi".to_string())), @@ -1361,8 +1025,6 @@ spec: }, volume_mounts: None, affinity: StackableAffinity::default(), - pod_overrides: PodTemplateSpec::default(), - jvm_security: HashMap::new(), }; let mut props = BTreeMap::new(); @@ -1397,8 +1059,7 @@ spec: #[test] fn test_resource_to_executor_props() { - let executor_config = ExecutorConfig { - instances: Some(1), + let executor_config = RoleConfig { resources: Resources { memory: MemoryLimits { limit: Some(Quantity("512Mi".to_string())), @@ -1415,10 +1076,7 @@ spec: containers: BTreeMap::new(), }, volume_mounts: None, - node_selector: None, affinity: StackableAffinity::default(), - pod_overrides: PodTemplateSpec::default(), - jvm_security: HashMap::new(), }; let mut props = BTreeMap::new(); @@ -1450,4 +1108,63 @@ spec: assert_eq!(expected, props); } + + #[test] + fn test_validated_config() { + let spark_application = serde_yaml::from_str::(indoc! {r#" + --- + apiVersion: spark.stackable.tech/v1alpha1 + kind: SparkApplication + metadata: + name: spark-examples + spec: + sparkImage: + productVersion: 1.2.3 + "#}) + .unwrap(); + + let resolved_product_image = spark_application + .spec + .spark_image + .resolve("spark-k8s", "0.0.0-dev"); + + let product_config = + ProductConfigManager::from_yaml_file("../../deploy/config-spec/properties.yaml") + .unwrap(); + let validated_config = spark_application + .validated_role_config(&resolved_product_image, &product_config) + .unwrap(); + + let expected_role_groups: HashMap< + String, + HashMap>, + > = vec![( + "default".into(), + vec![ + (PropertyNameKind::Env, BTreeMap::new()), + ( + PropertyNameKind::File("security.properties".into()), + vec![ + ("networkaddress.cache.negative.ttl".into(), "0".into()), + ("networkaddress.cache.ttl".into(), "30".into()), + ] + .into_iter() + .collect(), + ), + ] + .into_iter() + .collect(), + )] + .into_iter() + .collect(); + let expected: ValidatedRoleConfigByPropertyKind = vec![ + ("submit".into(), expected_role_groups.clone()), + ("driver".into(), expected_role_groups.clone()), + ("executor".into(), expected_role_groups), + ] + .into_iter() + .collect(); + + assert_eq!(expected, validated_config); + } } diff --git a/rust/crd/src/roles.rs b/rust/crd/src/roles.rs new file mode 100644 index 00000000..42f0e275 --- /dev/null +++ b/rust/crd/src/roles.rs @@ -0,0 +1,273 @@ +//! Roles and configuration for SparkApplications. +//! +//! Spark applications have three roles described by the [`SparkApplicationRole`]. +//! +//! Unlike others, the Spark application controller doesn't create objects +//! like Pods, Services, etc. for these roles directly, but instead it delegates +//! this responsibility to the Submit job. +//! +//! The submit job only supports one group per role. For this reason, the +//! [`SparkApplication`] spec doesn't declare Role objects directly. Instead it +//! only declares [`stackable_operator::role_utils::CommonConfiguration`] objects for job, +//! driver and executor and constructs the Roles dynamically when needed. The only group under +//! each role is named "default". These roles are transparent to the user. +//! +//! The history server has its own role completely unrelated to this module. +use std::{collections::BTreeMap, slice}; + +use serde::{Deserialize, Serialize}; + +use crate::s3logdir::S3LogDir; +use crate::SparkApplication; +use stackable_operator::{ + commons::{ + affinity::StackableAffinity, + resources::{ + CpuLimitsFragment, MemoryLimitsFragment, NoRuntimeLimits, NoRuntimeLimitsFragment, + Resources, ResourcesFragment, + }, + s3::S3ConnectionSpec, + }, + config::{ + fragment::Fragment, + merge::{Atomic, Merge}, + }, + k8s_openapi::{api::core::v1::VolumeMount, apimachinery::pkg::api::resource::Quantity}, + product_config_utils::Configuration, + product_logging::{self, spec::Logging}, + schemars::{self, JsonSchema}, +}; +use strum::{Display, EnumIter}; + +#[derive(Clone, Debug, Deserialize, Display, Eq, PartialEq, Serialize, JsonSchema)] +#[strum(serialize_all = "kebab-case")] +pub enum SparkApplicationRole { + Submit, + Driver, + Executor, +} + +#[derive(Clone, Debug, Default, JsonSchema, PartialEq, Fragment)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[fragment_attrs( + derive( + Clone, + Debug, + Default, + Deserialize, + Merge, + JsonSchema, + PartialEq, + Serialize, + ), + allow(clippy::derive_partial_eq_without_eq), + serde(rename_all = "camelCase") +)] +pub struct SparkStorageConfig {} + +#[derive( + Clone, + Debug, + Deserialize, + Display, + Eq, + EnumIter, + JsonSchema, + Ord, + PartialEq, + PartialOrd, + Serialize, +)] +#[serde(rename_all = "kebab-case")] +#[strum(serialize_all = "kebab-case")] +pub enum SparkContainer { + SparkSubmit, + Job, + Requirements, + Spark, + Vector, + Tls, +} + +#[derive(Clone, Debug, Default, Fragment, JsonSchema, PartialEq)] +#[fragment_attrs( + derive( + Clone, + Debug, + Default, + Deserialize, + Merge, + JsonSchema, + PartialEq, + Serialize + ), + serde(rename_all = "camelCase") +)] +pub struct RoleConfig { + #[fragment_attrs(serde(default))] + pub resources: Resources, + #[fragment_attrs(serde(default))] + pub logging: Logging, + #[fragment_attrs(serde(default, flatten))] + pub volume_mounts: Option, + #[fragment_attrs(serde(default))] + pub affinity: StackableAffinity, +} + +impl RoleConfig { + pub fn default_config() -> RoleConfigFragment { + RoleConfigFragment { + resources: ResourcesFragment { + cpu: CpuLimitsFragment { + min: Some(Quantity("250m".to_owned())), + max: Some(Quantity("1".to_owned())), + }, + memory: MemoryLimitsFragment { + limit: Some(Quantity("1Gi".to_owned())), + runtime_limits: NoRuntimeLimitsFragment {}, + }, + storage: SparkStorageConfigFragment {}, + }, + logging: product_logging::spec::default_logging(), + volume_mounts: Some(VolumeMounts::default()), + affinity: Default::default(), + } + } + pub fn volume_mounts( + &self, + spark_application: &SparkApplication, + s3conn: &Option, + s3logdir: &Option, + ) -> Vec { + let volume_mounts = self.volume_mounts.clone().unwrap_or_default().into(); + spark_application.add_common_volume_mounts(volume_mounts, s3conn, s3logdir) + } +} + +impl Configuration for RoleConfigFragment { + type Configurable = SparkApplication; + + fn compute_env( + &self, + _resource: &Self::Configurable, + _role_name: &str, + ) -> stackable_operator::product_config_utils::ConfigResult>> + { + Ok(BTreeMap::new()) + } + + fn compute_cli( + &self, + _resource: &Self::Configurable, + _role_name: &str, + ) -> stackable_operator::product_config_utils::ConfigResult>> + { + Ok(BTreeMap::new()) + } + + fn compute_files( + &self, + _resource: &Self::Configurable, + _role_name: &str, + _file: &str, + ) -> stackable_operator::product_config_utils::ConfigResult>> + { + Ok(BTreeMap::new()) + } +} + +#[derive(Clone, Debug, Default, Fragment, JsonSchema, PartialEq)] +#[fragment_attrs( + derive( + Clone, + Debug, + Default, + Deserialize, + Merge, + JsonSchema, + PartialEq, + Serialize + ), + serde(rename_all = "camelCase") +)] +pub struct SubmitConfig { + #[fragment_attrs(serde(default))] + pub resources: Resources, + #[fragment_attrs(serde(default))] + pub logging: Logging, +} + +impl SubmitConfig { + pub fn default_config() -> SubmitConfigFragment { + SubmitConfigFragment { + resources: ResourcesFragment { + cpu: CpuLimitsFragment { + min: Some(Quantity("100m".to_owned())), + max: Some(Quantity("400m".to_owned())), + }, + memory: MemoryLimitsFragment { + limit: Some(Quantity("512Mi".to_owned())), + runtime_limits: NoRuntimeLimitsFragment {}, + }, + storage: SparkStorageConfigFragment {}, + }, + logging: product_logging::spec::default_logging(), + } + } +} + +impl Configuration for SubmitConfigFragment { + type Configurable = SparkApplication; + + fn compute_env( + &self, + _resource: &Self::Configurable, + _role_name: &str, + ) -> stackable_operator::product_config_utils::ConfigResult>> + { + Ok(BTreeMap::new()) + } + + fn compute_cli( + &self, + _resource: &Self::Configurable, + _role_name: &str, + ) -> stackable_operator::product_config_utils::ConfigResult>> + { + Ok(BTreeMap::new()) + } + + fn compute_files( + &self, + _resource: &Self::Configurable, + _role_name: &str, + _file: &str, + ) -> stackable_operator::product_config_utils::ConfigResult>> + { + Ok(BTreeMap::new()) + } +} + +// TODO: remove this when switch to pod overrides ??? +#[derive(Clone, Debug, Default, Deserialize, JsonSchema, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct VolumeMounts { + pub volume_mounts: Option>, +} + +impl Atomic for VolumeMounts {} + +impl<'a> IntoIterator for &'a VolumeMounts { + type Item = &'a VolumeMount; + type IntoIter = slice::Iter<'a, VolumeMount>; + + fn into_iter(self) -> Self::IntoIter { + self.volume_mounts.as_deref().unwrap_or_default().iter() + } +} + +impl From for Vec { + fn from(value: VolumeMounts) -> Self { + value.volume_mounts.unwrap_or_default() + } +} diff --git a/rust/operator-binary/src/history_controller.rs b/rust/operator-binary/src/history_controller.rs index 0942dcc2..de8bc78b 100644 --- a/rust/operator-binary/src/history_controller.rs +++ b/rust/operator-binary/src/history_controller.rs @@ -1,4 +1,5 @@ use crate::product_logging::{self, resolve_vector_aggregator_address}; +use crate::Ctx; use stackable_operator::{ builder::{ConfigMapBuilder, ContainerBuilder, ObjectMetaBuilder, PodBuilder, VolumeBuilder}, cluster_resources::{ClusterResourceApplyStrategy, ClusterResources}, @@ -19,9 +20,7 @@ use stackable_operator::{ Resource, ResourceExt, }, labels::{role_group_selector_labels, role_selector_labels, ObjectLabels}, - product_config::{ - types::PropertyNameKind, writer::to_java_properties_string, ProductConfigManager, - }, + product_config::{types::PropertyNameKind, writer::to_java_properties_string}, product_logging::{ framework::{calculate_log_volume_size_limit, vector_container}, spec::{ @@ -54,11 +53,6 @@ use stackable_operator::k8s_openapi::DeepMerge; use stackable_operator::logging::controller::ReconcilerError; use strum::{EnumDiscriminants, IntoStaticStr}; -pub struct Ctx { - pub client: stackable_operator::client::Client, - pub product_config: ProductConfigManager, -} - #[derive(Snafu, Debug, EnumDiscriminants)] #[strum_discriminants(derive(IntoStaticStr))] #[allow(clippy::enum_variant_names)] diff --git a/rust/operator-binary/src/main.rs b/rust/operator-binary/src/main.rs index 4e653788..8342f464 100644 --- a/rust/operator-binary/src/main.rs +++ b/rust/operator-binary/src/main.rs @@ -13,6 +13,7 @@ use stackable_operator::k8s_openapi::api::core::v1::Pod; use stackable_operator::k8s_openapi::api::core::v1::{ConfigMap, Service}; use stackable_operator::kube::runtime::{controller::Controller, watcher}; use stackable_operator::logging::controller::report_controller_reconciled; +use stackable_operator::product_config::ProductConfigManager; use stackable_operator::CustomResourceExt; use stackable_spark_k8s_crd::constants::{ CONTROLLER_NAME, HISTORY_CONTROLLER_NAME, OPERATOR_NAME, POD_DRIVER_CONTROLLER_NAME, @@ -35,6 +36,15 @@ struct Opts { cmd: Command, } +const PRODUCT_CONFIG_PATHS: [&str; 2] = [ + "deploy/config-spec/properties.yaml", + "/etc/stackable/spark-k8s-operator/config-spec/properties.yaml", +]; +pub struct Ctx { + pub client: stackable_operator::client::Client, + pub product_config: ProductConfigManager, +} + #[tokio::main] async fn main() -> anyhow::Result<()> { let opts = Opts::parse(); @@ -62,14 +72,13 @@ async fn main() -> anyhow::Result<()> { built_info::RUSTC_VERSION, ); - let product_config = product_config.load(&[ - "deploy/config-spec/properties.yaml", - "/etc/stackable/spark-k8s-operator/config-spec/properties.yaml", - ])?; - let client = stackable_operator::client::create_client(Some(OPERATOR_NAME.to_string())).await?; + let ctx = Ctx { + client: client.clone(), + product_config: product_config.load(&PRODUCT_CONFIG_PATHS)?, + }; let app_controller = Controller::new( watch_namespace.get_api::(&client), watcher::Config::default(), @@ -82,9 +91,7 @@ async fn main() -> anyhow::Result<()> { .run( spark_k8s_controller::reconcile, spark_k8s_controller::error_policy, - Arc::new(spark_k8s_controller::Ctx { - client: client.clone(), - }), + Arc::new(ctx), ) .map(|res| { report_controller_reconciled( @@ -108,13 +115,16 @@ async fn main() -> anyhow::Result<()> { .run( pod_driver_controller::reconcile, pod_driver_controller::error_policy, - Arc::new(pod_driver_controller::Ctx { - client: client.clone(), - }), + Arc::new(client.clone()), ) .map(|res| report_controller_reconciled(&client, &format!("{OPERATOR_NAME}.{POD_DRIVER_CONTROLLER_NAME}"), &res)) .instrument(info_span!("pod_driver_controller")); + // Create new object because Ctx cannot be cloned + let ctx = Ctx { + client: client.clone(), + product_config: product_config.load(&PRODUCT_CONFIG_PATHS)?, + }; let history_controller = Controller::new( watch_namespace.get_api::(&client), watcher::Config::default(), @@ -139,10 +149,7 @@ async fn main() -> anyhow::Result<()> { .run( history_controller::reconcile, history_controller::error_policy, - Arc::new(history_controller::Ctx { - client: client.clone(), - product_config, - }), + Arc::new(ctx), ) .map(|res| { report_controller_reconciled( diff --git a/rust/operator-binary/src/pod_driver_controller.rs b/rust/operator-binary/src/pod_driver_controller.rs index 9b420295..4712e817 100644 --- a/rust/operator-binary/src/pod_driver_controller.rs +++ b/rust/operator-binary/src/pod_driver_controller.rs @@ -1,4 +1,6 @@ -use stackable_operator::{k8s_openapi::api::core::v1::Pod, kube::runtime::controller::Action}; +use stackable_operator::{ + client::Client, k8s_openapi::api::core::v1::Pod, kube::runtime::controller::Action, +}; use stackable_spark_k8s_crd::{ constants::POD_DRIVER_CONTROLLER_NAME, SparkApplication, SparkApplicationStatus, }; @@ -11,10 +13,6 @@ use strum::{EnumDiscriminants, IntoStaticStr}; const LABEL_NAME_INSTANCE: &str = "app.kubernetes.io/instance"; -pub struct Ctx { - pub client: stackable_operator::client::Client, -} - #[derive(Snafu, Debug, EnumDiscriminants)] #[strum_discriminants(derive(IntoStaticStr))] #[allow(clippy::enum_variant_names)] @@ -47,7 +45,7 @@ impl ReconcilerError for Error { } } /// Updates the status of the SparkApplication that started the pod. -pub async fn reconcile(pod: Arc, ctx: Arc) -> Result { +pub async fn reconcile(pod: Arc, client: Arc) -> Result { tracing::info!("Starting reconcile driver pod"); let pod_name = pod.metadata.name.as_ref().context(PodNameNotFoundSnafu)?; @@ -65,8 +63,7 @@ pub async fn reconcile(pod: Arc, ctx: Arc) -> Result { }, )?; - let app = ctx - .client + let app = client .get::( app_name.as_ref(), pod.metadata @@ -81,7 +78,7 @@ pub async fn reconcile(pod: Arc, ctx: Arc) -> Result { tracing::info!("Update spark application [{app_name}] status to [{phase}]"); - ctx.client + client .apply_patch_status( POD_DRIVER_CONTROLLER_NAME, &app, @@ -97,6 +94,6 @@ pub async fn reconcile(pod: Arc, ctx: Arc) -> Result { Ok(Action::await_change()) } -pub fn error_policy(_obj: Arc, _error: &Error, _ctx: Arc) -> Action { +pub fn error_policy(_obj: Arc, _error: &Error, _ctx: Arc) -> Action { Action::requeue(Duration::from_secs(5)) } diff --git a/rust/operator-binary/src/spark_k8s_controller.rs b/rust/operator-binary/src/spark_k8s_controller.rs index d284ea29..e1d47867 100644 --- a/rust/operator-binary/src/spark_k8s_controller.rs +++ b/rust/operator-binary/src/spark_k8s_controller.rs @@ -1,9 +1,16 @@ -use std::{sync::Arc, time::Duration, vec}; +use crate::Ctx; + +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, + time::Duration, + vec, +}; use stackable_operator::product_config::writer::to_java_properties_string; use stackable_spark_k8s_crd::{ - constants::*, s3logdir::S3LogDir, tlscerts, SparkApplication, SparkApplicationRole, - SparkContainer, SparkStorageConfig, SubmitJobContainer, + constants::*, s3logdir::S3LogDir, tlscerts, RoleConfig, SparkApplication, SparkApplicationRole, + SparkContainer, SubmitConfig, }; use crate::product_logging::{self, resolve_vector_aggregator_address}; @@ -13,10 +20,8 @@ use stackable_operator::k8s_openapi::DeepMerge; use stackable_operator::{ builder::{ConfigMapBuilder, ContainerBuilder, ObjectMetaBuilder, PodBuilder, VolumeBuilder}, commons::{ - affinity::StackableAffinity, authentication::tls::{CaCert, TlsVerification}, product_image_selection::ResolvedProductImage, - resources::{NoRuntimeLimits, Resources}, s3::S3ConnectionSpec, }, k8s_openapi::{ @@ -24,7 +29,7 @@ use stackable_operator::{ batch::v1::{Job, JobSpec}, core::v1::{ ConfigMap, Container, EnvVar, PodSecurityContext, PodSpec, PodTemplateSpec, - ServiceAccount, Volume, VolumeMount, + ServiceAccount, Volume, }, rbac::v1::{ClusterRole, RoleBinding, RoleRef, Subject}, }, @@ -35,6 +40,8 @@ use stackable_operator::{ ResourceExt, }, logging::controller::ReconcilerError, + product_config::types::PropertyNameKind, + product_config_utils::ValidatedRoleConfigByPropertyKind, product_logging::{ framework::{capture_shell_output, shutdown_vector_command, vector_container}, spec::{ @@ -44,11 +51,8 @@ use stackable_operator::{ }, role_utils::RoleGroupRef, }; -use strum::{EnumDiscriminants, IntoStaticStr}; -pub struct Ctx { - pub client: stackable_operator::client::Client, -} +use strum::{EnumDiscriminants, IntoStaticStr}; #[derive(Snafu, Debug, EnumDiscriminants)] #[strum_discriminants(derive(IntoStaticStr))] @@ -116,6 +120,18 @@ pub enum Error { source: stackable_operator::product_config::writer::PropertiesWriterError, role: SparkApplicationRole, }, + #[snafu(display("failed to generate product config"))] + GenerateProductConfig { + source: stackable_operator::product_config_utils::ConfigError, + }, + #[snafu(display("invalid product config"))] + InvalidProductConfig { + source: stackable_spark_k8s_crd::Error, + }, + #[snafu(display("invalid submit config"))] + SubmitConfig { + source: stackable_spark_k8s_crd::Error, + }, } type Result = std::result::Result; @@ -126,16 +142,6 @@ impl ReconcilerError for Error { } } -pub struct PodTemplateConfig { - pub role: SparkApplicationRole, - pub resources: Resources, - pub logging: Logging, - pub volume_mounts: Vec, - pub affinity: StackableAffinity, - pub pod_overrides: PodTemplateSpec, - pub jvm_security: String, -} - pub async fn reconcile(spark_application: Arc, ctx: Arc) -> Result { tracing::info!("Starting reconcile"); @@ -181,6 +187,10 @@ pub async fn reconcile(spark_application: Arc, ctx: Arc) .spark_image .resolve(SPARK_IMAGE_BASE_NAME, crate::built_info::CARGO_PKG_VERSION); + let validated_product_config: ValidatedRoleConfigByPropertyKind = spark_application + .validated_role_config(&resolved_product_image, &ctx.product_config) + .context(InvalidProductConfigSnafu)?; + let (serviceaccount, rolebinding) = build_spark_role_serviceaccount(&spark_application)?; client .apply_patch(CONTROLLER_NAME, &serviceaccount, &serviceaccount) @@ -210,26 +220,17 @@ pub async fn reconcile(spark_application: Arc, ctx: Arc) let driver_config = spark_application .driver_config() .context(FailedToResolveConfigSnafu)?; - let driver_pod_template_config = PodTemplateConfig { - role: SparkApplicationRole::Driver, - resources: driver_config.resources.clone(), - logging: driver_config.logging.clone(), - volume_mounts: spark_application.driver_volume_mounts( - &driver_config, - &opt_s3conn, - &s3logdir, - ), - affinity: driver_config.affinity, - pod_overrides: driver_config.pod_overrides.clone(), - jvm_security: to_java_properties_string(driver_config.jvm_security.iter()).with_context( - |_| JvmSecurityPropertiesSnafu { - role: SparkApplicationRole::Driver, - }, - )?, - }; + + let driver_product_config: Option<&HashMap>> = + validated_product_config + .get(&SparkApplicationRole::Driver.to_string()) + .and_then(|r| r.get(&"default".to_string())); + let driver_pod_template_config_map = pod_template_config_map( &spark_application, - &driver_pod_template_config, + SparkApplicationRole::Driver, + &driver_config, + driver_product_config, &env_vars, &opt_s3conn, &s3logdir, @@ -248,26 +249,17 @@ pub async fn reconcile(spark_application: Arc, ctx: Arc) let executor_config = spark_application .executor_config() .context(FailedToResolveConfigSnafu)?; - let executor_pod_template_config = PodTemplateConfig { - role: SparkApplicationRole::Executor, - resources: executor_config.resources.clone(), - logging: executor_config.logging.clone(), - volume_mounts: spark_application.executor_volume_mounts( - &executor_config, - &opt_s3conn, - &s3logdir, - ), - affinity: executor_config.affinity, - pod_overrides: executor_config.pod_overrides.clone(), - jvm_security: to_java_properties_string(executor_config.jvm_security.iter()).with_context( - |_| JvmSecurityPropertiesSnafu { - role: SparkApplicationRole::Executor, - }, - )?, - }; + + let executor_product_config: Option<&HashMap>> = + validated_product_config + .get(&SparkApplicationRole::Executor.to_string()) + .and_then(|r| r.get(&"default".to_string())); + let executor_pod_template_config_map = pod_template_config_map( &spark_application, - &executor_pod_template_config, + SparkApplicationRole::Executor, + &executor_config, + executor_product_config, &env_vars, &opt_s3conn, &s3logdir, @@ -292,8 +284,21 @@ pub async fn reconcile(spark_application: Arc, ctx: Arc) ) .context(BuildCommandSnafu)?; - let submit_job_config_map = - submit_job_config_map(&spark_application, vector_aggregator_address.as_deref())?; + let submit_config = spark_application + .submit_config() + .context(SubmitConfigSnafu)?; + + let submit_product_config: Option<&HashMap>> = + validated_product_config + .get(&SparkApplicationRole::Submit.to_string()) + .and_then(|r| r.get(&"default".to_string())); + + let submit_job_config_map = submit_job_config_map( + &spark_application, + submit_product_config, + vector_aggregator_address.as_deref(), + &submit_config.logging, + )?; client .apply_patch( CONTROLLER_NAME, @@ -311,6 +316,7 @@ pub async fn reconcile(spark_application: Arc, ctx: Arc) &job_commands, &opt_s3conn, &s3logdir, + &submit_config, )?; client .apply_patch(CONTROLLER_NAME, &job, &job) @@ -440,7 +446,8 @@ fn init_containers( #[allow(clippy::too_many_arguments)] fn pod_template( spark_application: &SparkApplication, - config: &PodTemplateConfig, + role: SparkApplicationRole, + config: &RoleConfig, volumes: &[Volume], env: &[EnvVar], s3conn: &Option, @@ -449,7 +456,8 @@ fn pod_template( ) -> Result { let container_name = SparkContainer::Spark.to_string(); let mut cb = ContainerBuilder::new(&container_name).context(IllegalContainerNameSnafu)?; - cb.add_volume_mounts(config.volume_mounts.clone()) + + cb.add_volume_mounts(config.volume_mounts(spark_application, s3conn, s3logdir)) .add_env_vars(env.to_vec()) .resources(config.resources.clone().into()) .image_from_product_image(spark_image); @@ -512,20 +520,25 @@ fn pod_template( } let mut pod_template = pb.build_template(); - pod_template.merge_from(config.pod_overrides.clone()); + if let Some(pod_overrides) = spark_application.pod_overrides(role) { + pod_template.merge_from(pod_overrides); + } Ok(pod_template) } +#[allow(clippy::too_many_arguments)] fn pod_template_config_map( spark_application: &SparkApplication, - config: &PodTemplateConfig, + role: SparkApplicationRole, + config: &RoleConfig, + product_config: Option<&HashMap>>, env: &[EnvVar], s3conn: &Option, s3logdir: &Option, vector_aggregator_address: Option<&str>, spark_image: &ResolvedProductImage, ) -> Result { - let cm_name = spark_application.pod_template_config_map_name(config.role.clone()); + let cm_name = spark_application.pod_template_config_map_name(role.clone()); let log_config_map = if let Some(ContainerLogConfig { choice: @@ -548,6 +561,7 @@ fn pod_template_config_map( let template = pod_template( spark_application, + role.clone(), config, volumes.as_ref(), env, @@ -589,20 +603,34 @@ fn pod_template_config_map( ) .context(InvalidLoggingConfigSnafu { cm_name })?; - cm_builder.add_data(JVM_SECURITY_PROPERTIES_FILE, config.jvm_security.clone()); + if let Some(product_config) = product_config { + let jvm_sec_props: BTreeMap> = product_config + .get(&PropertyNameKind::File( + JVM_SECURITY_PROPERTIES_FILE.to_string(), + )) + .cloned() + .unwrap_or_default() + .into_iter() + .map(|(k, v)| (k, Some(v))) + .collect(); + + cm_builder.add_data( + JVM_SECURITY_PROPERTIES_FILE, + to_java_properties_string(jvm_sec_props.iter()) + .with_context(|_| JvmSecurityPropertiesSnafu { role })?, + ); + } cm_builder.build().context(PodTemplateConfigMapSnafu) } fn submit_job_config_map( spark_application: &SparkApplication, + product_config: Option<&HashMap>>, vector_aggregator_address: Option<&str>, + logging: &Logging, ) -> Result { let cm_name = spark_application.submit_job_config_map_name(); - let config = spark_application - .job_config() - .context(FailedToResolveConfigSnafu)?; - let mut cm_builder = ConfigMapBuilder::new(); cm_builder.metadata( @@ -622,13 +650,34 @@ fn submit_job_config_map( role_group: String::new(), }, vector_aggregator_address, - &config.logging, - SubmitJobContainer::SparkSubmit, - SubmitJobContainer::Vector, + logging, + SparkContainer::SparkSubmit, + SparkContainer::Vector, &mut cm_builder, ) .context(InvalidLoggingConfigSnafu { cm_name })?; + if let Some(product_config) = product_config { + let jvm_sec_props: BTreeMap> = product_config + .get(&PropertyNameKind::File( + JVM_SECURITY_PROPERTIES_FILE.to_string(), + )) + .cloned() + .unwrap_or_default() + .into_iter() + .map(|(k, v)| (k, Some(v))) + .collect(); + + cm_builder.add_data( + JVM_SECURITY_PROPERTIES_FILE, + to_java_properties_string(jvm_sec_props.iter()).with_context(|_| { + JvmSecurityPropertiesSnafu { + role: SparkApplicationRole::Submit, + } + })?, + ); + } + cm_builder.build().context(PodTemplateConfigMapSnafu) } @@ -641,12 +690,10 @@ fn spark_job( job_commands: &[String], s3conn: &Option, s3logdir: &Option, + job_config: &SubmitConfig, ) -> Result { - let mut cb = ContainerBuilder::new(&SubmitJobContainer::SparkSubmit.to_string()) + let mut cb = ContainerBuilder::new(&SparkContainer::SparkSubmit.to_string()) .context(IllegalContainerNameSnafu)?; - let job_config = spark_application - .job_config() - .context(FailedToResolveConfigSnafu)?; let log_config_map = if let Some(ContainerLogConfig { choice: @@ -656,7 +703,7 @@ fn spark_job( }) = job_config .logging .containers - .get(&SubmitJobContainer::SparkSubmit) + .get(&SparkContainer::SparkSubmit) { config_map.into() } else { @@ -673,7 +720,7 @@ fn spark_job( cb.image_from_product_image(spark_image) .command(vec!["/bin/bash".to_string(), "-c".to_string()]) .args(vec![args.join(" && ")]) - .resources(job_config.resources.into()) + .resources(job_config.resources.clone().into()) .add_volume_mounts(spark_application.spark_job_volume_mounts(s3conn, s3logdir)) .add_env_vars(env.to_vec()) .add_env_var( @@ -710,10 +757,7 @@ fn spark_job( spark_image, VOLUME_MOUNT_NAME_CONFIG, VOLUME_MOUNT_NAME_LOG, - job_config - .logging - .containers - .get(&SubmitJobContainer::Vector), + job_config.logging.containers.get(&SparkContainer::Vector), ResourceRequirementsBuilder::new() .with_cpu_request("250m") .with_cpu_limit("500m") @@ -743,7 +787,11 @@ fn spark_job( }), }; - pod.merge_from(job_config.pod_overrides); + if let Some(submit_pod_overrides) = + spark_application.pod_overrides(SparkApplicationRole::Submit) + { + pod.merge_from(submit_pod_overrides); + } let job = Job { metadata: ObjectMetaBuilder::new() diff --git a/tests/templates/kuttl/logging/05-deploy-automatic-log-config-spark-app.yaml.j2 b/tests/templates/kuttl/logging/05-deploy-automatic-log-config-spark-app.yaml.j2 index a1f08592..353f1ca6 100644 --- a/tests/templates/kuttl/logging/05-deploy-automatic-log-config-spark-app.yaml.j2 +++ b/tests/templates/kuttl/logging/05-deploy-automatic-log-config-spark-app.yaml.j2 @@ -19,79 +19,82 @@ spec: mainClass: org.apache.spark.examples.SparkALS mainApplicationFile: local:///stackable/spark/examples/jars/spark-examples.jar job: - logging: - enableVectorAgent: true - containers: - spark-submit: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - vector: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: + config: + logging: + enableVectorAgent: true + containers: + spark-submit: + console: level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + vector: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO driver: - logging: - enableVectorAgent: true - containers: - spark: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - job: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - vector: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: + config: + logging: + enableVectorAgent: true + containers: + spark: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + job: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + vector: + console: level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO executor: - instances: 1 - logging: - enableVectorAgent: true - containers: - spark: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - job: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - vector: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: + replicas: 1 + config: + logging: + enableVectorAgent: true + containers: + spark: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + job: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + vector: + console: + level: INFO + file: level: INFO + loggers: + ROOT: + level: INFO diff --git a/tests/templates/kuttl/logging/06-deploy-custom-log-config-spark-app.yaml.j2 b/tests/templates/kuttl/logging/06-deploy-custom-log-config-spark-app.yaml.j2 index 2015366d..ac323f21 100644 --- a/tests/templates/kuttl/logging/06-deploy-custom-log-config-spark-app.yaml.j2 +++ b/tests/templates/kuttl/logging/06-deploy-custom-log-config-spark-app.yaml.j2 @@ -53,24 +53,27 @@ spec: mainClass: org.apache.spark.examples.SparkALS mainApplicationFile: local:///stackable/spark/examples/jars/spark-examples.jar job: - logging: - enableVectorAgent: true - containers: - spark-submit: - custom: - configMap: spark-submit-log-config + config: + logging: + enableVectorAgent: true + containers: + spark-submit: + custom: + configMap: spark-submit-log-config driver: - logging: - enableVectorAgent: true - containers: - spark: - custom: - configMap: spark-log-config + config: + logging: + enableVectorAgent: true + containers: + spark: + custom: + configMap: spark-log-config executor: - instances: 1 - logging: - enableVectorAgent: true - containers: - spark: - custom: - configMap: spark-log-config + replicas: 1 + config: + logging: + enableVectorAgent: true + containers: + spark: + custom: + configMap: spark-log-config diff --git a/tests/templates/kuttl/logging/07-deploy-automatic-log-config-pyspark-app.yaml.j2 b/tests/templates/kuttl/logging/07-deploy-automatic-log-config-pyspark-app.yaml.j2 index 8110ab8b..f87b1160 100644 --- a/tests/templates/kuttl/logging/07-deploy-automatic-log-config-pyspark-app.yaml.j2 +++ b/tests/templates/kuttl/logging/07-deploy-automatic-log-config-pyspark-app.yaml.j2 @@ -20,79 +20,82 @@ spec: requirements: - numpy==1.24.2 job: - logging: - enableVectorAgent: true - containers: - spark-submit: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - vector: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: + config: + logging: + enableVectorAgent: true + containers: + spark-submit: + console: level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + vector: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO driver: - logging: - enableVectorAgent: true - containers: - spark: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - requirements: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - vector: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: + config: + logging: + enableVectorAgent: true + containers: + spark: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + requirements: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + vector: + console: level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO executor: - instances: 1 - logging: - enableVectorAgent: true - containers: - spark: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - requirements: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - vector: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: + replicas: 1 + config: + logging: + enableVectorAgent: true + containers: + spark: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + requirements: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + vector: + console: + level: INFO + file: level: INFO + loggers: + ROOT: + level: INFO diff --git a/tests/templates/kuttl/logging/08-deploy-custom-log-config-pyspark-app.yaml.j2 b/tests/templates/kuttl/logging/08-deploy-custom-log-config-pyspark-app.yaml.j2 index b6ab6f75..c3646e0a 100644 --- a/tests/templates/kuttl/logging/08-deploy-custom-log-config-pyspark-app.yaml.j2 +++ b/tests/templates/kuttl/logging/08-deploy-custom-log-config-pyspark-app.yaml.j2 @@ -54,24 +54,27 @@ spec: requirements: - numpy==1.24.2 job: - logging: - enableVectorAgent: true - containers: - spark-submit: - custom: - configMap: pyspark-submit-log-config + config: + logging: + enableVectorAgent: true + containers: + spark-submit: + custom: + configMap: pyspark-submit-log-config driver: - logging: - enableVectorAgent: true - containers: - spark: - custom: - configMap: pyspark-log-config + config: + logging: + enableVectorAgent: true + containers: + spark: + custom: + configMap: pyspark-log-config executor: - instances: 1 - logging: - enableVectorAgent: true - containers: - spark: - custom: - configMap: pyspark-log-config + replicas: 1 + config: + logging: + enableVectorAgent: true + containers: + spark: + custom: + configMap: pyspark-log-config diff --git a/tests/templates/kuttl/pod_overrides/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/pod_overrides/10-deploy-spark-app.yaml.j2 index 6668dd1e..b606aa8a 100644 --- a/tests/templates/kuttl/pod_overrides/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/pod_overrides/10-deploy-spark-app.yaml.j2 @@ -48,7 +48,7 @@ spec: cpu: 1500m memory: 1024Mi executor: - instances: 1 + replicas: 1 podOverrides: spec: containers: diff --git a/tests/templates/kuttl/pyspark-ny-public-s3-image/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/pyspark-ny-public-s3-image/10-deploy-spark-app.yaml.j2 index 6676a058..8c1d9a34 100644 --- a/tests/templates/kuttl/pyspark-ny-public-s3-image/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/pyspark-ny-public-s3-image/10-deploy-spark-app.yaml.j2 @@ -31,12 +31,15 @@ spec: port: 9000 accessStyle: Path job: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} driver: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} executor: - instances: 3 - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + replicas: 3 + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} diff --git a/tests/templates/kuttl/pyspark-ny-public-s3/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/pyspark-ny-public-s3/10-deploy-spark-app.yaml.j2 index a6dc7039..7cc98360 100644 --- a/tests/templates/kuttl/pyspark-ny-public-s3/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/pyspark-ny-public-s3/10-deploy-spark-app.yaml.j2 @@ -30,12 +30,15 @@ spec: port: 9000 accessStyle: Path job: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} driver: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} executor: - instances: 3 - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + replicas: 3 + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} diff --git a/tests/templates/kuttl/resources/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/resources/10-deploy-spark-app.yaml.j2 index 1e7f81fd..551c9796 100644 --- a/tests/templates/kuttl/resources/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/resources/10-deploy-spark-app.yaml.j2 @@ -24,30 +24,33 @@ spec: spark.kubernetes.driver.pod.name: "resources-crd-driver" spark.kubernetes.executor.podNamePrefix: "resources-crd" job: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} - resources: - cpu: - min: 250m - max: 500m - memory: - limit: 512Mi + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + resources: + cpu: + min: 250m + max: 500m + memory: + limit: 512Mi driver: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} - resources: - cpu: - min: 200m - max: 1200m - memory: - limit: 1024Mi + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + resources: + cpu: + min: 200m + max: 1200m + memory: + limit: 1024Mi executor: - instances: 1 - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} - resources: - cpu: - min: 250m - max: 1000m - memory: - limit: 1024Mi + replicas: 1 + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + resources: + cpu: + min: 250m + max: 1000m + memory: + limit: 1024Mi diff --git a/tests/templates/kuttl/resources/12-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/resources/12-deploy-spark-app.yaml.j2 index a99f1537..dc48fe9e 100644 --- a/tests/templates/kuttl/resources/12-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/resources/12-deploy-spark-app.yaml.j2 @@ -35,11 +35,14 @@ spec: spark.executor.memoryOverheadFactor: "0.4" spark.executor.instances: "1" job: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} driver: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} executor: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} diff --git a/tests/templates/kuttl/smoke/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/smoke/10-deploy-spark-app.yaml.j2 index f8d1fe31..bd26ac35 100644 --- a/tests/templates/kuttl/smoke/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/smoke/10-deploy-spark-app.yaml.j2 @@ -27,12 +27,15 @@ spec: bucket: reference: spark-history-s3-bucket job: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} driver: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} executor: - instances: 1 - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + replicas: 1 + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} diff --git a/tests/templates/kuttl/spark-examples/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/spark-examples/10-deploy-spark-app.yaml.j2 index 53220e9d..2bab38db 100644 --- a/tests/templates/kuttl/spark-examples/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/spark-examples/10-deploy-spark-app.yaml.j2 @@ -20,12 +20,15 @@ spec: mainClass: org.apache.spark.examples.SparkALS mainApplicationFile: "local:///stackable/spark/examples/jars/spark-examples.jar" job: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} driver: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} executor: - instances: 1 - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + replicas: 1 + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} diff --git a/tests/templates/kuttl/spark-history-server/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/spark-history-server/10-deploy-spark-app.yaml.j2 index 8f0d046c..93a96d25 100644 --- a/tests/templates/kuttl/spark-history-server/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/spark-history-server/10-deploy-spark-app.yaml.j2 @@ -27,12 +27,15 @@ spec: bucket: reference: spark-history-s3-bucket job: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} driver: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} executor: - instances: 1 - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + replicas: 1 + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} diff --git a/tests/templates/kuttl/spark-history-server/12-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/spark-history-server/12-deploy-spark-app.yaml.j2 index 3d01cb6a..286e1a56 100644 --- a/tests/templates/kuttl/spark-history-server/12-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/spark-history-server/12-deploy-spark-app.yaml.j2 @@ -27,12 +27,15 @@ spec: bucket: reference: spark-history-s3-bucket job: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} driver: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} executor: - instances: 1 - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + replicas: 1 + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} diff --git a/tests/templates/kuttl/spark-ny-public-s3/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/spark-ny-public-s3/10-deploy-spark-app.yaml.j2 index 16cd28cd..99510674 100644 --- a/tests/templates/kuttl/spark-ny-public-s3/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/spark-ny-public-s3/10-deploy-spark-app.yaml.j2 @@ -36,18 +36,21 @@ spec: s3connection: reference: minio job: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} driver: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} - volumeMounts: - - name: cm-job-arguments - mountPath: /arguments + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + volumeMounts: + - name: cm-job-arguments + mountPath: /arguments executor: - instances: 3 - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} - volumeMounts: - - name: cm-job-arguments - mountPath: /arguments + replicas: 3 + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + volumeMounts: + - name: cm-job-arguments + mountPath: /arguments diff --git a/tests/templates/kuttl/spark-pi-private-s3/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/spark-pi-private-s3/10-deploy-spark-app.yaml.j2 index db45ff67..4adda1e8 100644 --- a/tests/templates/kuttl/spark-pi-private-s3/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/spark-pi-private-s3/10-deploy-spark-app.yaml.j2 @@ -27,12 +27,15 @@ spec: credentials: secretClass: spark-pi-private-s3-credentials-class job: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} driver: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} executor: - instances: 1 - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + replicas: 1 + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} diff --git a/tests/templates/kuttl/spark-pi-public-s3/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/spark-pi-public-s3/10-deploy-spark-app.yaml.j2 index 69f19582..aa38ed4a 100644 --- a/tests/templates/kuttl/spark-pi-public-s3/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/spark-pi-public-s3/10-deploy-spark-app.yaml.j2 @@ -25,12 +25,15 @@ spec: port: 9000 accessStyle: Path job: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} driver: - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} executor: - instances: 1 - logging: - enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + replicas: 1 + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }}