From a27bb51d063af81c99a0ac24d68d5cbe3e90d396 Mon Sep 17 00:00:00 2001 From: Ashleigh Brennan Date: Fri, 26 Jan 2024 10:08:39 -0600 Subject: [PATCH] OBSDOCS-760: Update structure of resource scheduling docs --- _topic_maps/_topic_map.yml | 11 +- _topic_maps/_topic_map_osd.yml | 11 +- _topic_maps/_topic_map_rosa.yml | 11 +- .../config/cluster-logging-moving-nodes.adoc | 11 -- .../config/cluster-logging-tolerations.adoc | 104 ---------------- logging/scheduling_resources/_attributes | 1 + logging/scheduling_resources/images | 1 + .../logging-node-selectors.adoc | 18 +++ .../logging-taints-tolerations.adoc | 25 ++++ logging/scheduling_resources/modules | 1 + logging/scheduling_resources/snippets | 1 + ...cluster-logging-collector-tolerations.adoc | 91 +++++++++----- ...ter-logging-elasticsearch-tolerations.adoc | 70 ----------- .../cluster-logging-kibana-tolerations.adoc | 48 ++++---- .../cluster-logging-logstore-tolerations.adoc | 116 ++++++++++++++++++ .../nodes-scheduler-node-selectors-about.adoc | 1 + ...es-scheduler-taints-tolerations-about.adoc | 20 ++- .../nodes-scheduler-node-selectors.adoc | 12 +- .../cluster-tasks.adoc | 2 +- snippets/about-node-selectors.adoc | 10 ++ 20 files changed, 302 insertions(+), 263 deletions(-) delete mode 100644 logging/config/cluster-logging-moving-nodes.adoc delete mode 100644 logging/config/cluster-logging-tolerations.adoc create mode 120000 logging/scheduling_resources/_attributes create mode 120000 logging/scheduling_resources/images create mode 100644 logging/scheduling_resources/logging-node-selectors.adoc create mode 100644 logging/scheduling_resources/logging-taints-tolerations.adoc create mode 120000 logging/scheduling_resources/modules create mode 120000 logging/scheduling_resources/snippets delete mode 100644 modules/cluster-logging-elasticsearch-tolerations.adoc create mode 100644 modules/cluster-logging-logstore-tolerations.adoc create mode 100644 snippets/about-node-selectors.adoc diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index 8625dd32a90e..da2bd3621198 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -2524,10 +2524,6 @@ Topics: Topics: - Name: Configuring CPU and memory limits for Logging components File: cluster-logging-memory - - Name: Using tolerations to control Logging pod placement - File: cluster-logging-tolerations - - Name: Moving logging subsystem resources with node selectors - File: cluster-logging-moving-nodes - Name: Configuring systemd-journald for Logging File: cluster-logging-systemd - Name: Log collection and forwarding @@ -2568,6 +2564,13 @@ Topics: Topics: - Name: Flow control mechanisms File: logging-flow-control-mechanisms +- Name: Scheduling resources + Dir: scheduling_resources + Topics: + - Name: Using node selectors to move logging resources + File: logging-node-selectors + - Name: Using tolerations to control logging pod placement + File: logging-taints-tolerations - Name: Uninstalling Logging File: cluster-logging-uninstall - Name: Exported fields diff --git a/_topic_maps/_topic_map_osd.yml b/_topic_maps/_topic_map_osd.yml index 198d58e1db74..85b9d46f9cb8 100644 --- a/_topic_maps/_topic_map_osd.yml +++ b/_topic_maps/_topic_map_osd.yml @@ -944,10 +944,6 @@ Topics: Topics: - Name: Configuring CPU and memory limits for Logging components File: cluster-logging-memory - - Name: Using tolerations to control Logging pod placement - File: cluster-logging-tolerations - - Name: Moving logging subsystem resources with node selectors - File: cluster-logging-moving-nodes #- Name: Configuring systemd-journald and Fluentd # File: cluster-logging-systemd - Name: Log collection and forwarding @@ -988,6 +984,13 @@ Topics: Topics: - Name: Flow control mechanisms File: logging-flow-control-mechanisms +- Name: Scheduling resources + Dir: scheduling_resources + Topics: + - Name: Using node selectors to move logging resources + File: logging-node-selectors + - Name: Using tolerations to control logging pod placement + File: logging-taints-tolerations - Name: Uninstalling Logging File: cluster-logging-uninstall - Name: Exported fields diff --git a/_topic_maps/_topic_map_rosa.yml b/_topic_maps/_topic_map_rosa.yml index 6f96df98a28b..d447e72ac376 100644 --- a/_topic_maps/_topic_map_rosa.yml +++ b/_topic_maps/_topic_map_rosa.yml @@ -1117,10 +1117,6 @@ Topics: Topics: - Name: Configuring CPU and memory limits for Logging components File: cluster-logging-memory - - Name: Using tolerations to control Logging pod placement - File: cluster-logging-tolerations - - Name: Moving logging subsystem resources with node selectors - File: cluster-logging-moving-nodes #- Name: Configuring systemd-journald and Fluentd # File: cluster-logging-systemd - Name: Log collection and forwarding @@ -1161,6 +1157,13 @@ Topics: Topics: - Name: Flow control mechanisms File: logging-flow-control-mechanisms +- Name: Scheduling resources + Dir: scheduling_resources + Topics: + - Name: Using node selectors to move logging resources + File: logging-node-selectors + - Name: Using tolerations to control logging pod placement + File: logging-taints-tolerations - Name: Uninstalling Logging File: cluster-logging-uninstall - Name: Exported fields diff --git a/logging/config/cluster-logging-moving-nodes.adoc b/logging/config/cluster-logging-moving-nodes.adoc deleted file mode 100644 index fbe1bac307ad..000000000000 --- a/logging/config/cluster-logging-moving-nodes.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-moving -[id="cluster-logging-moving-nodes"] -= Moving {logging} resources with node selectors -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can use node selectors to deploy the Elasticsearch and Kibana pods to different nodes. - -include::modules/infrastructure-moving-logging.adoc[leveloffset=+1] diff --git a/logging/config/cluster-logging-tolerations.adoc b/logging/config/cluster-logging-tolerations.adoc deleted file mode 100644 index 7330f658840d..000000000000 --- a/logging/config/cluster-logging-tolerations.adoc +++ /dev/null @@ -1,104 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-tolerations -[id="cluster-logging-tolerations"] -= Using tolerations to control OpenShift Logging pod placement -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can use taints and tolerations to ensure that {logging} pods run -on specific nodes and that no other workload can run on those nodes. - -Taints and tolerations are simple `key:value` pair. A taint on a node -instructs the node to repel all pods that do not tolerate the taint. - -The `key` is any string, up to 253 characters and the `value` is any string up to 63 characters. -The string must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores. - -.Sample {logging} CR with tolerations -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: openshift-logging - -... - -spec: - managementState: "Managed" - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - tolerations: <1> - - key: "logging" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 6000 - resources: - limits: - memory: 16Gi - requests: - cpu: 200m - memory: 16Gi - storage: {} - redundancyPolicy: "ZeroRedundancy" - visualization: - type: "kibana" - kibana: - tolerations: <2> - - key: "logging" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 6000 - resources: - limits: - memory: 2Gi - requests: - cpu: 100m - memory: 1Gi - replicas: 1 - collection: - logs: - type: "fluentd" - fluentd: - tolerations: <3> - - key: "logging" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 6000 - resources: - limits: - memory: 2Gi - requests: - cpu: 100m - memory: 1Gi ----- - -<1> This toleration is added to the Elasticsearch pods. -<2> This toleration is added to the Kibana pod. -<3> This toleration is added to the logging collector pods. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-elasticsearch-tolerations.adoc[leveloffset=+1] - -include::modules/cluster-logging-kibana-tolerations.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-tolerations.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="cluster-logging-tolerations-addtl-resources"] -== Additional resources - -ifdef::openshift-enterprise,openshift-origin[] -* xref:../../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints]. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -* link:https://docs.openshift.com/container-platform/latest/nodes/scheduling/nodes-scheduler-taints-tolerations.html#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints]. -endif::[] \ No newline at end of file diff --git a/logging/scheduling_resources/_attributes b/logging/scheduling_resources/_attributes new file mode 120000 index 000000000000..20cc1dcb77bf --- /dev/null +++ b/logging/scheduling_resources/_attributes @@ -0,0 +1 @@ +../../_attributes/ \ No newline at end of file diff --git a/logging/scheduling_resources/images b/logging/scheduling_resources/images new file mode 120000 index 000000000000..847b03ed0541 --- /dev/null +++ b/logging/scheduling_resources/images @@ -0,0 +1 @@ +../../images/ \ No newline at end of file diff --git a/logging/scheduling_resources/logging-node-selectors.adoc b/logging/scheduling_resources/logging-node-selectors.adoc new file mode 100644 index 000000000000..e39331d54223 --- /dev/null +++ b/logging/scheduling_resources/logging-node-selectors.adoc @@ -0,0 +1,18 @@ +:_mod-docs-content-type: ASSEMBLY +include::_attributes/common-attributes.adoc[] +include::_attributes/attributes-openshift-dedicated.adoc[] +[id="logging-node-selectors"] += Using node selectors to move logging resources +:context: logging-node-selectors + +toc::[] + +include::snippets/about-node-selectors.adoc[] + +include::modules/nodes-scheduler-node-selectors-about.adoc[leveloffset=+1] +include::modules/infrastructure-moving-logging.adoc[leveloffset=+1] + +[role="_additional-resources"] +[id="additional-resources_logging-node-selection"] +== Additional resources +* xref:../../nodes/scheduling/nodes-scheduler-node-selectors.adoc#nodes-scheduler-node-selectors[Placing pods on specific nodes using node selectors] diff --git a/logging/scheduling_resources/logging-taints-tolerations.adoc b/logging/scheduling_resources/logging-taints-tolerations.adoc new file mode 100644 index 000000000000..221f7d950390 --- /dev/null +++ b/logging/scheduling_resources/logging-taints-tolerations.adoc @@ -0,0 +1,25 @@ +:_mod-docs-content-type: ASSEMBLY +include::_attributes/common-attributes.adoc[] +include::_attributes/attributes-openshift-dedicated.adoc[] +[id="logging-taints-tolerations"] += Using taints and tolerations to control logging pod placement +:context: logging-taints-tolerations + +toc::[] + +Taints and tolerations allow the node to control which pods should (or should not) be scheduled on them. + +include::modules/nodes-scheduler-taints-tolerations-about.adoc[leveloffset=+1] +include::modules/cluster-logging-logstore-tolerations.adoc[leveloffset=+1] +include::modules/cluster-logging-kibana-tolerations.adoc[leveloffset=+1] +include::modules/cluster-logging-collector-tolerations.adoc[leveloffset=+1] + +[role="_additional-resources"] +[id="additional-resources_cluster-logging-tolerations"] +== Additional resources +ifdef::openshift-enterprise,openshift-origin[] +* xref:../../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints] +endif::[] +ifdef::openshift-rosa,openshift-dedicated[] +* link:https://docs.openshift.com/container-platform/latest/nodes/scheduling/nodes-scheduler-taints-tolerations.html#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints] +endif::[] diff --git a/logging/scheduling_resources/modules b/logging/scheduling_resources/modules new file mode 120000 index 000000000000..36719b9de743 --- /dev/null +++ b/logging/scheduling_resources/modules @@ -0,0 +1 @@ +../../modules/ \ No newline at end of file diff --git a/logging/scheduling_resources/snippets b/logging/scheduling_resources/snippets new file mode 120000 index 000000000000..5a3f5add140e --- /dev/null +++ b/logging/scheduling_resources/snippets @@ -0,0 +1 @@ +../../snippets/ \ No newline at end of file diff --git a/modules/cluster-logging-collector-tolerations.adoc b/modules/cluster-logging-collector-tolerations.adoc index bcbea11201f7..eb8a555f8e35 100644 --- a/modules/cluster-logging-collector-tolerations.adoc +++ b/modules/cluster-logging-collector-tolerations.adoc @@ -1,69 +1,98 @@ // Module included in the following assemblies: // -// * logging/cluster-logging-collector.adoc +// * logging/scheduling_resources/logging-taints-tolerations.adoc :_mod-docs-content-type: PROCEDURE [id="cluster-logging-collector-tolerations_{context}"] -= Using tolerations to control the log collector pod placement += Using tolerations to control log collector pod placement -You can ensure which nodes the logging collector pods run on and prevent -other workloads from using those nodes by using tolerations on the pods. - -You apply tolerations to logging collector pods through the `ClusterLogging` custom resource (CR) -and apply taints to a node through the node specification. You can use taints and tolerations -to ensure the pod does not get evicted for things like memory and CPU issues. - -By default, the logging collector pods have the following toleration: +By default, log collector pods have the following `tolerations` configuration: [source,yaml] ---- -tolerations: -- key: "node-role.kubernetes.io/master" - operator: "Exists" - effect: "NoExecute" +apiVersion: v1 +kind: Pod +metadata: + name: collector-example + namespace: openshift-logging +spec: +# ... + collection: + type: vector + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/disk-pressure + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/memory-pressure + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/pid-pressure + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/unschedulable + operator: Exists +# ... ---- .Prerequisites -* {clo} and {es-op} must be installed. +* You have installed the {clo} and {oc-first}. .Procedure -. Use the following command to add a taint to a node where you want logging collector pods to schedule logging collector pods: +. Add a taint to a node where you want logging collector pods to schedule logging collector pods by running the following command: + [source,terminal] ---- -$ oc adm taint nodes =: +$ oc adm taint nodes =: ---- + -For example: -+ +.Example command [source,terminal] ---- $ oc adm taint nodes node1 collector=node:NoExecute ---- + -This example places a taint on `node1` that has key `collector`, value `node`, and taint effect `NoExecute`. -You must use the `NoExecute` taint effect. `NoExecute` schedules only pods that match the taint and removes existing pods -that do not match. +This example places a taint on `node1` that has key `collector`, value `node`, and taint effect `NoExecute`. You must use the `NoExecute` taint effect. `NoExecute` schedules only pods that match the taint and removes existing pods that do not match. . Edit the `collection` stanza of the `ClusterLogging` custom resource (CR) to configure a toleration for the logging collector pods: + [source,yaml] ---- +apiVersion: logging.openshift.io/v1 +kind: ClusterLogging +metadata: +# ... +spec: +# ... collection: - logs: - type: "fluentd" - fluentd: - tolerations: - - key: "collector" <1> - operator: "Exists" <2> - effect: "NoExecute" <3> - tolerationSeconds: 6000 <4> + type: vector + tolerations: + - key: collector <1> + operator: Exists <2> + effect: NoExecute <3> + tolerationSeconds: 6000 <4> + resources: + limits: + memory: 2Gi + requests: + cpu: 100m + memory: 1Gi +# ... ---- <1> Specify the key that you added to the node. <2> Specify the `Exists` operator to require the `key`/`value`/`effect` parameters to match. <3> Specify the `NoExecute` effect. <4> Optionally, specify the `tolerationSeconds` parameter to set how long a pod can remain bound to a node before being evicted. -This toleration matches the taint created by the `oc adm taint` command. A pod with this toleration would be able to schedule onto `node1`. +This toleration matches the taint created by the `oc adm taint` command. A pod with this toleration can be scheduled onto `node1`. diff --git a/modules/cluster-logging-elasticsearch-tolerations.adoc b/modules/cluster-logging-elasticsearch-tolerations.adoc deleted file mode 100644 index 947f469095f0..000000000000 --- a/modules/cluster-logging-elasticsearch-tolerations.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-elasticsearch-tolerations_{context}"] -= Using tolerations to control the log store pod placement - -You can control which nodes the log store pods runs on and prevent -other workloads from using those nodes by using tolerations on the pods. - -You apply tolerations to the log store pods through the `ClusterLogging` custom resource (CR) -and apply taints to a node through the node specification. A taint on a node is a `key:value pair` that -instructs the node to repel all pods that do not tolerate the taint. Using a specific `key:value` pair -that is not on other pods ensures only the log store pods can run on that node. - -By default, the log store pods have the following toleration: - -[source,yaml] ----- -tolerations: -- effect: "NoExecute" - key: "node.kubernetes.io/disk-pressure" - operator: "Exists" ----- - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Use the following command to add a taint to a node where you want to schedule the OpenShift Logging pods: -+ -[source,terminal] ----- -$ oc adm taint nodes =: ----- -+ -For example: -+ -[source,terminal] ----- -$ oc adm taint nodes node1 elasticsearch=node:NoExecute ----- -+ -This example places a taint on `node1` that has key `elasticsearch`, value `node`, and taint effect `NoExecute`. -Nodes with the `NoExecute` effect schedule only pods that match the taint and remove existing pods -that do not match. - -. Edit the `logstore` section of the `ClusterLogging` CR to configure a toleration for the Elasticsearch pods: -+ -[source,yaml] ----- - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 1 - tolerations: - - key: "elasticsearch" <1> - operator: "Exists" <2> - effect: "NoExecute" <3> - tolerationSeconds: 6000 <4> ----- -<1> Specify the key that you added to the node. -<2> Specify the `Exists` operator to require a taint with the key `elasticsearch` to be present on the Node. -<3> Specify the `NoExecute` effect. -<4> Optionally, specify the `tolerationSeconds` parameter to set how long a pod can remain bound to a node before being evicted. - -This toleration matches the taint created by the `oc adm taint` command. A pod with this toleration could be scheduled onto `node1`. diff --git a/modules/cluster-logging-kibana-tolerations.adoc b/modules/cluster-logging-kibana-tolerations.adoc index 1af459b3f60f..a3bdb7f0f9bd 100644 --- a/modules/cluster-logging-kibana-tolerations.adoc +++ b/modules/cluster-logging-kibana-tolerations.adoc @@ -1,60 +1,64 @@ // Module included in the following assemblies: // -// * logging/cluster-logging-visualizer.adoc +// * logging/scheduling_resources/logging-taints-tolerations.adoc :_mod-docs-content-type: PROCEDURE [id="cluster-logging-kibana-tolerations_{context}"] = Using tolerations to control the log visualizer pod placement -You can control the node where the log visualizer pod runs and prevent -other workloads from using those nodes by using tolerations on the pods. - -You apply tolerations to the log visualizer pod through the `ClusterLogging` custom resource (CR) -and apply taints to a node through the node specification. A taint on a node is a `key:value pair` that -instructs the node to repel all pods that do not tolerate the taint. Using a specific `key:value` pair -that is not on other pods ensures only the Kibana pod can run on that node. +You can use a specific key/value pair that is not on other pods to ensure that only the Kibana pod can run on the specified node. .Prerequisites -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. +* You have installed the {clo}, the {es-op}, and the {oc-first}. .Procedure -. Use the following command to add a taint to a node where you want to schedule the log visualizer pod: +. Add a taint to a node where you want to schedule the log visualizer pod by running the following command: + [source,terminal] ---- -$ oc adm taint nodes =: +$ oc adm taint nodes =: ---- + -For example: -+ +.Example command [source,terminal] ---- $ oc adm taint nodes node1 kibana=node:NoExecute ---- + -This example places a taint on `node1` that has key `kibana`, value `node`, and taint effect `NoExecute`. -You must use the `NoExecute` taint effect. `NoExecute` schedules only pods that match the taint and remove existing pods -that do not match. +This example places a taint on `node1` that has key `kibana`, value `node`, and taint effect `NoExecute`. You must use the `NoExecute` taint effect. `NoExecute` schedules only pods that match the taint and remove existing pods that do not match. . Edit the `visualization` section of the `ClusterLogging` CR to configure a toleration for the Kibana pod: + [source,yaml] ---- +apiVersion: logging.openshift.io/v1 +kind: ClusterLogging +metadata: +# ... +spec: +# ... visualization: - type: "kibana" + type: kibana kibana: tolerations: - - key: "kibana" <1> - operator: "Exists" <2> - effect: "NoExecute" <3> + - key: kibana <1> + operator: Exists <2> + effect: NoExecute <3> tolerationSeconds: 6000 <4> + resources: + limits: + memory: 2Gi + requests: + cpu: 100m + memory: 1Gi + replicas: 1 +# ... ---- <1> Specify the key that you added to the node. -<2> Specify the `Exists` operator to require the `key`/`value`/`effect` parameters to match. +<2> Specify the `Exists` operator to require the `key`, value, and `effect` parameters to match. <3> Specify the `NoExecute` effect. <4> Optionally, specify the `tolerationSeconds` parameter to set how long a pod can remain bound to a node before being evicted. - This toleration matches the taint created by the `oc adm taint` command. A pod with this toleration would be able to schedule onto `node1`. diff --git a/modules/cluster-logging-logstore-tolerations.adoc b/modules/cluster-logging-logstore-tolerations.adoc new file mode 100644 index 000000000000..3c320c73fe22 --- /dev/null +++ b/modules/cluster-logging-logstore-tolerations.adoc @@ -0,0 +1,116 @@ +// Module included in the following assemblies: +// +// * logging/scheduling_resources/logging-taints-tolerations.adoc + +:_mod-docs-content-type: PROCEDURE +[id="cluster-logging-logstore-tolerations_{context}"] += Using tolerations to control log store pod placement + +By default, log store pods have the following toleration configurations: + +.Elasticsearch log store pods default tolerations +[source,yaml] +---- +apiVersion: v1 +kind: Pod +metadata: + name: elasticsearch-example + namespace: openshift-logging +spec: +# ... + tolerations: + - effect: NoSchedule + key: node.kubernetes.io/disk-pressure + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + - effect: NoSchedule + key: node.kubernetes.io/memory-pressure + operator: Exists +# ... +---- + +.LokiStack log store pods default tolerations +[source,yaml] +---- +apiVersion: v1 +kind: Pod +metadata: + name: lokistack-example + namespace: openshift-logging +spec: +# ... + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + - effect: NoSchedule + key: node.kubernetes.io/memory-pressure + operator: Exists +# ... +---- + +You can configure a toleration for log store pods by adding a taint and then modifying the `tolerations` syntax in the `ClusterLogging` custom resource (CR). + +.Prerequisites + +* You have installed the {clo}. +* You have installed the {oc-first}. +* You have deployed an internal log store that is either Elasticsearch or LokiStack. + +.Procedure + +. Add a taint to a node where you want to schedule the {logging} pods, by running the following command: ++ +[source,terminal] +---- +$ oc adm taint nodes =: +---- ++ +.Example command +[source,terminal] +---- +$ oc adm taint nodes node1 lokistack=node:NoExecute +---- ++ +This example places a taint on `node1` that has key `lokistack`, value `node`, and taint effect `NoExecute`. Nodes with the `NoExecute` effect schedule only pods that match the taint and remove existing pods that do not match. + +. Edit the `logstore` section of the `ClusterLogging` CR to configure a toleration for the log store pods: ++ +.Example `ClusterLogging` CR +[source,yaml] +---- +apiVersion: logging.openshift.io/v1 +kind: ClusterLogging +metadata: +# ... +spec: +# ... + logStore: + type: lokistack + elasticsearch: + nodeCount: 1 + tolerations: + - key: lokistack # <1> + operator: Exists # <2> + effect: NoExecute # <3> + tolerationSeconds: 6000 # <4> +# ... +---- +<1> Specify the key that you added to the node. +<2> Specify the `Exists` operator to require a taint with the key `lokistack` to be present on the node. +<3> Specify the `NoExecute` effect. +<4> Optional: Specify the `tolerationSeconds` parameter to set how long a pod can remain bound to a node before being evicted. + +This toleration matches the taint created by the `oc adm taint` command. A pod with this toleration can be scheduled onto `node1`. diff --git a/modules/nodes-scheduler-node-selectors-about.adoc b/modules/nodes-scheduler-node-selectors-about.adoc index 21702c669be6..30ce7e2b8bae 100644 --- a/modules/nodes-scheduler-node-selectors-about.adoc +++ b/modules/nodes-scheduler-node-selectors-about.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/nodes-scheduler-node-selector.adoc +// * logging/scheduling_resources/logging-node-selectors.adoc :_mod-docs-content-type: CONCEPT [id="nodes-scheduler-node-selectors-about_{context}"] diff --git a/modules/nodes-scheduler-taints-tolerations-about.adoc b/modules/nodes-scheduler-taints-tolerations-about.adoc index d171287a1880..fc457a148376 100644 --- a/modules/nodes-scheduler-taints-tolerations-about.adoc +++ b/modules/nodes-scheduler-taints-tolerations-about.adoc @@ -2,7 +2,15 @@ // // * nodes/scheduling/nodes-scheduler-taints-tolerations.adoc // * post_installation_configuration/node-tasks.adoc +// * logging/scheduling_resources/logging-taints-tolerations.adoc +ifeval::["{context}" == "nodes-scheduler-taints-tolerations"] +:nodes-scheduler-taints-tolerations: +endif::[] + +ifeval::["{context}" == "node-tasks"] +:node-tasks: +endif::[] :_mod-docs-content-type: CONCEPT [id="nodes-scheduler-taints-tolerations-about_{context}"] @@ -46,7 +54,6 @@ spec: #... ---- - Taints and tolerations consist of a key, value, and effect. [id="taint-components-table_{context}"] @@ -141,6 +148,7 @@ The following taints are built into {product-title}: {product-title} does not set a default pid.available `evictionHard`. ==== +ifdef::nodes-scheduler-taints-tolerations,node-tasks[] [id="nodes-scheduler-taints-tolerations-about-seconds_{context}"] == Understanding how to use toleration seconds to delay pod evictions @@ -323,3 +331,13 @@ spec: - operator: "Exists" #... ---- + +endif::nodes-scheduler-taints-tolerations,node-tasks[] + +ifeval::["{context}" == "nodes-scheduler-taints-tolerations"] +:!nodes-scheduler-taints-tolerations: +endif::[] + +ifeval::["{context}" == "node-tasks"] +:!node-tasks: +endif::[] diff --git a/nodes/scheduling/nodes-scheduler-node-selectors.adoc b/nodes/scheduling/nodes-scheduler-node-selectors.adoc index 94979add978c..d1f1885942e3 100644 --- a/nodes/scheduling/nodes-scheduler-node-selectors.adoc +++ b/nodes/scheduling/nodes-scheduler-node-selectors.adoc @@ -6,17 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] - - -A _node selector_ specifies a map of key/value pairs that are defined using custom labels on nodes and selectors specified in pods. - -For the pod to be eligible to run on a node, the pod must have the same key/value node selector as the label on the node. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - +include::snippets/about-node-selectors.adoc[] include::modules/nodes-scheduler-node-selectors-about.adoc[leveloffset=+1] diff --git a/post_installation_configuration/cluster-tasks.adoc b/post_installation_configuration/cluster-tasks.adoc index 759f414f30e3..bc5ab134e430 100644 --- a/post_installation_configuration/cluster-tasks.adoc +++ b/post_installation_configuration/cluster-tasks.adoc @@ -557,7 +557,7 @@ include::modules/installation-extend-edge-nodes-aws-local-zones.adoc[leveloffset * xref:../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc[Understanding taints and tolerations] -* xref:../logging/config/cluster-logging-tolerations.adoc[Using tolerations to control OpenShift Logging pod placement] +* xref:../logging/scheduling_resources/logging-taints-tolerations.adoc[Using taints and tolerations to control logging pod placement] [id="post-worker-latency-profiles"] == Improving cluster stability in high latency environments using worker latency profiles diff --git a/snippets/about-node-selectors.adoc b/snippets/about-node-selectors.adoc new file mode 100644 index 000000000000..61541fb54e85 --- /dev/null +++ b/snippets/about-node-selectors.adoc @@ -0,0 +1,10 @@ +// Snippets included in the following assemblies and modules: +// +// * nodes/scheduling/nodes-scheduler-node-selectors.adoc +// * logging/scheduling_resources/logging-node-selection.adoc + +:_mod-docs-content-type: SNIPPET + +A _node selector_ specifies a map of key/value pairs that are defined using custom labels on nodes and selectors specified in pods. + +For the pod to be eligible to run on a node, the pod must have the same key/value node selector as the label on the node.