From 8265c1c018c58a3543c2c21c6876e4416da59537 Mon Sep 17 00:00:00 2001 From: Ashleigh Brennan Date: Mon, 23 Oct 2023 13:37:16 -0500 Subject: [PATCH] OBSDOCS-594: Logging upgrading docs and mCLF note --- _topic_maps/_topic_map.yml | 4 +- _topic_maps/_topic_map_osd.yml | 8 +- _topic_maps/_topic_map_rosa.yml | 8 +- logging/cluster-logging-upgrading.adoc | 29 ++- .../log-forwarding.adoc | 15 +- ...uster-logging-updating-logging-to-5-0.adoc | 225 ------------------ ...ster-logging-upgrading-elasticsearch.adoc} | 108 +++------ modules/log-forwarding-implementations.adoc | 33 +++ modules/log-forwarding-modes.adoc | 33 --- modules/logging-create-clf.adoc | 14 +- .../logging-operator-upgrading-all-ns.adoc | 67 ++++++ modules/logging-upgrading-clo.adoc | 33 +++ modules/logging-upgrading-loki.adoc | 33 +++ 13 files changed, 247 insertions(+), 363 deletions(-) delete mode 100644 modules/cluster-logging-updating-logging-to-5-0.adoc rename modules/{cluster-logging-updating-logging-to-current.adoc => cluster-logging-upgrading-elasticsearch.adoc} (68%) create mode 100644 modules/log-forwarding-implementations.adoc delete mode 100644 modules/log-forwarding-modes.adoc create mode 100644 modules/logging-operator-upgrading-all-ns.adoc create mode 100644 modules/logging-upgrading-clo.adoc create mode 100644 modules/logging-upgrading-loki.adoc diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index f62b75127913..cac261eb626f 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -2535,6 +2535,8 @@ Topics: File: cluster-logging - Name: Installing Logging File: cluster-logging-deploying +- Name: Updating Logging + File: cluster-logging-upgrading Distros: openshift-enterprise,openshift-origin - Name: Configuring your Logging deployment Dir: config @@ -2576,8 +2578,6 @@ Topics: File: cluster-logging-collector - Name: Collecting and storing Kubernetes events File: cluster-logging-eventrouter -- Name: Updating Logging - File: cluster-logging-upgrading - Name: Viewing cluster dashboards File: cluster-logging-dashboards - Name: Logging alerts diff --git a/_topic_maps/_topic_map_osd.yml b/_topic_maps/_topic_map_osd.yml index c6d5fba3b243..548c16687211 100644 --- a/_topic_maps/_topic_map_osd.yml +++ b/_topic_maps/_topic_map_osd.yml @@ -143,7 +143,7 @@ Topics: # cannot list resource "secrets" in API group "" in the namespace "openshift-config" # - Name: Importing simple content access entitlements with Insights Operator # File: insights-operator-simple-access -# must-gather not supported for customers, per Dustin Row, cannot create resource "namespaces" +# must-gather not supported for customers, per Dustin Row, cannot create resource "namespaces" # - Name: Gathering data about your cluster # File: gathering-cluster-data # Distros: openshift-dedicated @@ -165,7 +165,7 @@ Topics: # - Name: Troubleshooting operating system issues # File: troubleshooting-operating-system-issues # Distros: openshift-dedicated -# cannot patch resource "nodes", "nodes/proxy", "namespaces" +# cannot patch resource "nodes", "nodes/proxy", "namespaces" # - Name: Troubleshooting network issues # File: troubleshooting-network-issues # Distros: openshift-dedicated @@ -858,6 +858,8 @@ Topics: File: cluster-logging - Name: Installing Logging File: cluster-logging-deploying +- Name: Updating Logging + File: cluster-logging-upgrading - Name: Accessing the service logs File: sd-accessing-the-service-logs - Name: Configuring your Logging deployment @@ -899,8 +901,6 @@ Topics: File: cluster-logging-collector - Name: Collecting and storing Kubernetes events File: cluster-logging-eventrouter -- Name: Updating Logging - File: cluster-logging-upgrading - Name: Viewing cluster dashboards File: cluster-logging-dashboards - Name: Logging alerts diff --git a/_topic_maps/_topic_map_rosa.yml b/_topic_maps/_topic_map_rosa.yml index 5fe8df8fe83c..18b417a083bb 100644 --- a/_topic_maps/_topic_map_rosa.yml +++ b/_topic_maps/_topic_map_rosa.yml @@ -242,7 +242,7 @@ Topics: # cannot list resource "secrets" in API group "" in the namespace "openshift-config" # - Name: Importing simple content access entitlements with Insights Operator # File: insights-operator-simple-access -# must-gather not supported for customers, per Dustin Row, cannot create resource "namespaces" +# must-gather not supported for customers, per Dustin Row, cannot create resource "namespaces" # - Name: Gathering data about your cluster # File: gathering-cluster-data # Distros: openshift-rosa @@ -267,7 +267,7 @@ Topics: # - Name: Troubleshooting operating system issues # File: troubleshooting-operating-system-issues # Distros: openshift-rosa -# cannot patch resource "nodes", "nodes/proxy", "namespaces" +# cannot patch resource "nodes", "nodes/proxy", "namespaces" # - Name: Troubleshooting network issues # File: troubleshooting-network-issues # Distros: openshift-rosa @@ -1029,6 +1029,8 @@ Topics: File: cluster-logging - Name: Installing Logging File: cluster-logging-deploying +- Name: Updating Logging + File: cluster-logging-upgrading - Name: Accessing the service logs File: sd-accessing-the-service-logs - Name: Viewing cluster logs in the AWS Console @@ -1071,8 +1073,6 @@ Topics: File: cluster-logging-collector - Name: Collecting and storing Kubernetes events File: cluster-logging-eventrouter -- Name: Updating Logging - File: cluster-logging-upgrading - Name: Viewing cluster dashboards File: cluster-logging-dashboards - Name: Logging alerts diff --git a/logging/cluster-logging-upgrading.adoc b/logging/cluster-logging-upgrading.adoc index c311e7e4b9da..45390e102221 100644 --- a/logging/cluster-logging-upgrading.adoc +++ b/logging/cluster-logging-upgrading.adoc @@ -1,20 +1,31 @@ :_mod-docs-content-type: ASSEMBLY :context: cluster-logging-upgrading [id="cluster-logging-upgrading"] -= Updating OpenShift Logging += Updating Logging include::_attributes/common-attributes.adoc[] toc::[] -[id="cluster-logging-supported-versions"] -== Supported Versions -For version compatibility and support information, see link:https://access.redhat.com/support/policy/updates/openshift#logging[Red Hat OpenShift Container Platform Life Cycle Policy] +There are two types of {logging} updates: minor release updates (5.y.z) and major release updates (5.y). -To upgrade from cluster logging in {product-title} version 4.6 and earlier to OpenShift Logging 5.x, you update the {product-title} cluster to version 4.7 or 4.8. Then, you update the following operators: +[id="cluster-logging-upgrading-minor"] +== Minor release updates -* From Elasticsearch Operator 4.x to OpenShift Elasticsearch Operator 5.x -* From Cluster Logging Operator 4.x to Red Hat OpenShift Logging Operator 5.x +If you installed the {logging} Operators using the *Automatic* update approval option, your Operators receive minor version updates automatically. You do not need to complete any manual update steps. -To upgrade from a previous version of OpenShift Logging to the current version, you update OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator to their current versions. +If you installed the {logging} Operators using the *Manual* update approval option, you must manually approve minor version updates. For more information, see xref:../operators/admin/olm-upgrading-operators.adoc#olm-approving-pending-upgrade_olm-upgrading-operators[Manually approving a pending Operator update]. -include::modules/cluster-logging-updating-logging-to-current.adoc[leveloffset=+1] +[id="cluster-logging-upgrading-major"] +== Major release updates + +For major version updates you must complete some manual steps. + +For major release version compatibility and support information, see link:https://access.redhat.com/support/policy/updates/openshift_operators#platform-agnostic[OpenShift Operator Life Cycles]. + +include::modules/logging-operator-upgrading-all-ns.adoc[leveloffset=+1] + +include::modules/logging-upgrading-clo.adoc[leveloffset=+1] + +include::modules/logging-upgrading-loki.adoc[leveloffset=+1] + +include::modules/cluster-logging-upgrading-elasticsearch.adoc[leveloffset=+1] diff --git a/logging/log_collection_forwarding/log-forwarding.adoc b/logging/log_collection_forwarding/log-forwarding.adoc index f01f4244c700..6dce4690de98 100644 --- a/logging/log_collection_forwarding/log-forwarding.adoc +++ b/logging/log_collection_forwarding/log-forwarding.adoc @@ -26,13 +26,17 @@ Administrators can create `ClusterLogForwarder` resources that specify which log Administrators can also authorize RBAC permissions that define which service accounts and users can access and forward which types of logs. -//// -include::modules/log-forwarding-modes.adoc[leveloffset=+1] +include::modules/log-forwarding-implementations.adoc[leveloffset=+1] -[id="log-forwarding-enabling-multi-clf-mode"] -== Enabling multi log forwarder mode for a cluster +[id="log-forwarding-enabling-multi-clf-feature"] +== Enabling the multi log forwarder feature for a cluster -To use multi log forwarder mode, you must create a service account and cluster role bindings for that service account. You can then reference the service account in the `ClusterLogForwarder` resource to control access permissions. +To use the multi log forwarder feature, you must create a service account and cluster role bindings for that service account. You can then reference the service account in the `ClusterLogForwarder` resource to control access permissions. + +[IMPORTANT] +==== +In order to support multi log forwarding in additional namespaces other than the `openshift-logging` namespace, you must xref:../../logging/cluster-logging-upgrading.adoc#logging-operator-upgrading-all-ns_cluster-logging-upgrading[update the Cluster Logging Operator to watch all namespaces]. This functionality is supported by default in new Cluster Logging Operator version 5.8 installations. +==== include::modules/log-collection-rbac-permissions.adoc[leveloffset=+2] @@ -45,7 +49,6 @@ endif::[] * link:https://kubernetes.io/docs/reference/access-authn-authz/rbac/[Using RBAC Authorization Kubernetes documentation] include::modules/logging-create-clf.adoc[leveloffset=+1] -//// [id="log-forwarding-audit-logs"] == Sending audit logs to the internal log store diff --git a/modules/cluster-logging-updating-logging-to-5-0.adoc b/modules/cluster-logging-updating-logging-to-5-0.adoc deleted file mode 100644 index e292db5c4b6c..000000000000 --- a/modules/cluster-logging-updating-logging-to-5-0.adoc +++ /dev/null @@ -1,225 +0,0 @@ -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-updating-logging-to-5-0_{context}"] -= Updating from cluster logging in {product-title} 4.6 or earlier to OpenShift Logging 5.x - -{product-title} 4.7 made the following name changes: - -* The _cluster logging_ feature became the _Red Hat OpenShift Logging_ 5.x product. -* The _Cluster Logging_ Operator became the _Red Hat OpenShift Logging_ Operator. -* The _Elasticsearch_ Operator became _OpenShift Elasticsearch_ Operator. - -To upgrade from cluster logging in {product-title} version 4.6 and earlier to OpenShift Logging 5.x, you update the {product-title} cluster to version 4.7, 4.8, or 4.9. Then, you update the following operators: - -* From Elasticsearch Operator 4.x to OpenShift Elasticsearch Operator 5.x -* From Cluster Logging Operator 4.x to Red Hat OpenShift Logging Operator 5.x - -[IMPORTANT] -==== -You must update the OpenShift Elasticsearch Operator _before_ you update the Red Hat OpenShift Logging Operator. You must also update _both_ Operators to the same version. -==== - -If you update the operators in the wrong order, Kibana does not update and the Kibana custom resource (CR) is not created. To work around this problem, you delete the Red Hat OpenShift Logging Operator pod. When the Red Hat OpenShift Logging Operator pod redeploys, it creates the Kibana CR and Kibana becomes available again. - -.Prerequisites - -* The {product-title} version is 4.7 or later. - -* The OpenShift Logging status is healthy: -** All pods are `ready`. -** The Elasticsearch cluster is healthy. - -* Your Elasticsearch and Kibana data is backed up. - -.Procedure - -. Update the OpenShift Elasticsearch Operator: - -.. From the web console, click *Operators* -> *Installed Operators*. - -.. Select the `openshift-operators-redhat` project. - -.. Click the *OpenShift Elasticsearch Operator*. - -.. Click *Subscription* -> *Channel*. - -.. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. - -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. -+ -Verify that the OpenShift Elasticsearch Operator version is 5.x.x. -+ -Wait for the *Status* field to report *Succeeded*. - -. Update the Cluster Logging Operator: - -.. From the web console, click *Operators* -> *Installed Operators*. - -.. Select the `openshift-logging` project. - -.. Click the *Cluster Logging Operator*. - -.. Click *Subscription* -> *Channel*. - -.. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. - -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. -+ -Verify that the Red Hat OpenShift Logging Operator version is 5.0.x or 5.x.x. -+ -Wait for the *Status* field to report *Succeeded*. - -. Check the logging components: - -.. Ensure that all Elasticsearch pods are in the *Ready* status: -+ -[source,terminal] ----- -$ oc get pod -n openshift-logging --selector component=elasticsearch ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -elasticsearch-cdm-1pbrl44l-1-55b7546f4c-mshhk 2/2 Running 0 31m -elasticsearch-cdm-1pbrl44l-2-5c6d87589f-gx5hk 2/2 Running 0 30m -elasticsearch-cdm-1pbrl44l-3-88df5d47-m45jc 2/2 Running 0 29m ----- -+ -.. Ensure that the Elasticsearch cluster is healthy: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch elasticsearch-cdm-1pbrl44l-1-55b7546f4c-mshhk -- health ----- -+ -[source,json] ----- -{ - "cluster_name" : "elasticsearch", - "status" : "green", -} ----- - -.. Ensure that the Elasticsearch cron jobs are created: -+ -[source,terminal] ----- -$ oc project openshift-logging ----- -+ -[source,terminal] ----- -$ oc get cronjob ----- -+ -[source,terminal] ----- -NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE -elasticsearch-im-app */15 * * * * False 0 56s -elasticsearch-im-audit */15 * * * * False 0 56s -elasticsearch-im-infra */15 * * * * False 0 56s ----- - -.. Verify that the log store is updated to 5.0 or 5.x and the indices are `green`: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch -- indices ----- -+ -Verify that the output includes the `app-00000x`, `infra-00000x`, `audit-00000x`, `.security` indices. -+ -.Sample output with indices in a green status -[%collapsible] -==== -[source,terminal] ----- -Tue Jun 30 14:30:54 UTC 2020 -health status index uuid pri rep docs.count docs.deleted store.size pri.store.size -green open infra-000008 bnBvUFEXTWi92z3zWAzieQ 3 1 222195 0 289 144 -green open infra-000004 rtDSzoqsSl6saisSK7Au1Q 3 1 226717 0 297 148 -green open infra-000012 RSf_kUwDSR2xEuKRZMPqZQ 3 1 227623 0 295 147 -green open .kibana_7 1SJdCqlZTPWlIAaOUd78yg 1 1 4 0 0 0 -green open infra-000010 iXwL3bnqTuGEABbUDa6OVw 3 1 248368 0 317 158 -green open infra-000009 YN9EsULWSNaxWeeNvOs0RA 3 1 258799 0 337 168 -green open infra-000014 YP0U6R7FQ_GVQVQZ6Yh9Ig 3 1 223788 0 292 146 -green open infra-000015 JRBbAbEmSMqK5X40df9HbQ 3 1 224371 0 291 145 -green open .orphaned.2020.06.30 n_xQC2dWQzConkvQqei3YA 3 1 9 0 0 0 -green open infra-000007 llkkAVSzSOmosWTSAJM_hg 3 1 228584 0 296 148 -green open infra-000005 d9BoGQdiQASsS3BBFm2iRA 3 1 227987 0 297 148 -green open infra-000003 1-goREK1QUKlQPAIVkWVaQ 3 1 226719 0 295 147 -green open .security zeT65uOuRTKZMjg_bbUc1g 1 1 5 0 0 0 -green open .kibana-377444158_kubeadmin wvMhDwJkR-mRZQO84K0gUQ 3 1 1 0 0 0 -green open infra-000006 5H-KBSXGQKiO7hdapDE23g 3 1 226676 0 295 147 -green open infra-000001 eH53BQ-bSxSWR5xYZB6lVg 3 1 341800 0 443 220 -green open .kibana-6 RVp7TemSSemGJcsSUmuf3A 1 1 4 0 0 0 -green open infra-000011 J7XWBauWSTe0jnzX02fU6A 3 1 226100 0 293 146 -green open app-000001 axSAFfONQDmKwatkjPXdtw 3 1 103186 0 126 57 -green open infra-000016 m9c1iRLtStWSF1GopaRyCg 3 1 13685 0 19 9 -green open infra-000002 Hz6WvINtTvKcQzw-ewmbYg 3 1 228994 0 296 148 -green open infra-000013 KR9mMFUpQl-jraYtanyIGw 3 1 228166 0 298 148 -green open audit-000001 eERqLdLmQOiQDFES1LBATQ 3 1 0 0 0 0 ----- -==== - -.. Verify that the log collector is updated to 5.0 or 5.x: -+ -[source,terminal] ----- -$ oc get ds fluentd -o json | grep fluentd-init ----- -+ -Verify that the output includes a `fluentd-init` container: -+ -[source,terminal] ----- -"containerName": "fluentd-init" ----- - -.. Verify that the log visualizer is updated to 5.0 or 5.x using the Kibana CRD: -+ -[source,terminal] ----- -$ oc get kibana kibana -o json ----- -+ -Verify that the output includes a Kibana pod with the `ready` status: -+ -.Sample output with a ready Kibana pod -[%collapsible] -==== -[source,json] ----- -[ -{ -"clusterCondition": { -"kibana-5fdd766ffd-nb2jj": [ -{ -"lastTransitionTime": "2020-06-30T14:11:07Z", -"reason": "ContainerCreating", -"status": "True", -"type": "" -}, -{ -"lastTransitionTime": "2020-06-30T14:11:07Z", -"reason": "ContainerCreating", -"status": "True", -"type": "" -} -] -}, -"deployment": "kibana", -"pods": { -"failed": [], -"notReady": [] -"ready": [] -}, -"replicaSets": [ -"kibana-5fdd766ffd" -], -"replicas": 1 -} -] ----- -==== diff --git a/modules/cluster-logging-updating-logging-to-current.adoc b/modules/cluster-logging-upgrading-elasticsearch.adoc similarity index 68% rename from modules/cluster-logging-updating-logging-to-current.adoc rename to modules/cluster-logging-upgrading-elasticsearch.adoc index 23faff8ec2b5..0035add278f4 100644 --- a/modules/cluster-logging-updating-logging-to-current.adoc +++ b/modules/cluster-logging-upgrading-elasticsearch.adoc @@ -1,81 +1,55 @@ // Module file include in the following assemblies: -//cluster-logging-upgrading.adoc -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-updating-logging-to_current_{context}"] -= Updating Logging to the current version - -To update Logging to the current version, you change the subscriptions for the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator. +// logging/cluster-logging-upgrading.adoc -[IMPORTANT] -==== -You must update the OpenShift Elasticsearch Operator _before_ you update the Red Hat OpenShift Logging Operator. You must also update _both_ Operators to the same version. -==== +:_mod-docs-content-type: PROCEDURE +[id="cluster-logging-upgrading-elasticsearch_{context}"] += Updating the OpenShift Elasticsearch Operator +To update the OpenShift Elasticsearch Operator to the current version, you must modify the subscription. -If you update the Operators in the wrong order, Kibana does not update and the Kibana custom resource (CR) is not created. To work around this problem, you delete the Red Hat OpenShift Logging Operator pod. When the Red Hat OpenShift Logging Operator pod redeploys, it creates the Kibana CR and Kibana becomes available again. +include::snippets/logging-elastic-dep-snip.adoc[] .Prerequisites -* The {product-title} version is 4.7 or later. +* If you are using Elasticsearch as the default log store, and Kibana as the UI, update the OpenShift Elasticsearch Operator before you update the Cluster Logging Operator. ++ +[IMPORTANT] +==== +If you update the Operators in the wrong order, Kibana does not update and the Kibana custom resource (CR) is not created. To fix this issue, delete the Red Hat OpenShift Logging Operator pod. When the Red Hat OpenShift Logging Operator pod redeploys, it creates the Kibana CR and Kibana becomes available again. +==== * The Logging status is healthy: -+ -** All pods are `ready`. +** All pods have a `ready` status. ** The Elasticsearch cluster is healthy. -* Your link:https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html[Elasticsearch and Kibana data is backed up.] +* Your link:https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html[Elasticsearch and Kibana data is backed up]. +* You have administrator permissions. +* You have installed the {oc-first} for the verification steps. .Procedure -. Update the OpenShift Elasticsearch Operator: - ifndef::openshift-rosa,openshift-dedicated[] -.. In the {product-title} web console, click *Operators* -> *Installed Operators*. +. In the {product-title} web console, click *Operators* -> *Installed Operators*. endif::[] ifdef::openshift-rosa,openshift-dedicated[] -.. In the {hybrid-console}, click *Operators* -> *Installed Operators*. +. In the {hybrid-console}, click *Operators* -> *Installed Operators*. endif::[] -.. Select the `openshift-Operators-redhat` project. - -.. Click the *OpenShift Elasticsearch Operator*. - -.. Click *Subscription* -> *Channel*. - -.. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. - -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. -+ -.. Verify that the OpenShift Elasticsearch Operator version is 5.x.x. -+ -.. Wait for the *Status* field to report *Succeeded*. - -. Update the Red Hat OpenShift Logging Operator: - -ifndef::openshift-rosa,openshift-dedicated[] -.. In the {product-title} web console, click *Operators* -> *Installed Operators*. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -.. In the {hybrid-console}, click *Operators* -> *Installed Operators*. -endif::[] +. Select the *openshift-operators-redhat* project. -.. Select the `openshift-logging` project. +. Click *OpenShift Elasticsearch Operator*. -.. Click the *Red Hat OpenShift Logging Operator*. +. Click *Subscription* -> *Channel*. -.. Click *Subscription* -> *Channel*. +. In the *Change Subscription Update Channel* window, select *stable-5.y* and click *Save*. Note the `elasticsearch-operator.v5.y.z` version. -.. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. +. Wait for a few seconds, then click *Operators* -> *Installed Operators*. Verify that the OpenShift Elasticsearch Operator version matches the latest `elasticsearch-operator.v5.y.z` version. -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. -+ -.. Verify that the Red Hat OpenShift Logging Operator version is 5.y.z -+ -.. Wait for the *Status* field to report *Succeeded*. +. On the *Operators* -> *Installed Operators* page, wait for the *Status* field to report *Succeeded*. -. Check the logging components: +.Verification -.. Ensure that all Elasticsearch pods are in the *Ready* status: +. Verify that all Elasticsearch pods have a *Ready* status by entering the following command and observing the output: + [source,terminal] ---- @@ -90,14 +64,15 @@ elasticsearch-cdm-1pbrl44l-1-55b7546f4c-mshhk 2/2 Running 0 31m elasticsearch-cdm-1pbrl44l-2-5c6d87589f-gx5hk 2/2 Running 0 30m elasticsearch-cdm-1pbrl44l-3-88df5d47-m45jc 2/2 Running 0 29m ---- -+ -.. Ensure that the Elasticsearch cluster is healthy: + +. Verify that the Elasticsearch cluster status is `green` by entering the following command and observing the output: + [source,terminal] ---- $ oc exec -n openshift-logging -c elasticsearch elasticsearch-cdm-1pbrl44l-1-55b7546f4c-mshhk -- health ---- + +.Example output [source,json] ---- { @@ -106,7 +81,7 @@ $ oc exec -n openshift-logging -c elasticsearch elasticsearch-cdm-1pbrl44l-1-55b } ---- -.. Ensure that the Elasticsearch cron jobs are created: +. Verify that the Elasticsearch cron jobs are created by entering the following commands and observing the output: + [source,terminal] ---- @@ -118,6 +93,7 @@ $ oc project openshift-logging $ oc get cronjob ---- + +.Example output [source,terminal] ---- NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE @@ -126,14 +102,14 @@ elasticsearch-im-audit */15 * * * * False 0 56s elasticsearch-im-infra */15 * * * * False 0 56s ---- -.. Verify that the log store is updated to 5.x and the indices are `green`: +. Verify that the log store is updated to the correct version and the indices are `green` by entering the following command and observing the output: + [source,terminal] ---- $ oc exec -c elasticsearch -- indices ---- + -.. Verify that the output includes the `app-00000x`, `infra-00000x`, `audit-00000x`, `.security` indices. +Verify that the output includes the `app-00000x`, `infra-00000x`, `audit-00000x`, `.security` indices: + .Sample output with indices in a green status [%collapsible] @@ -168,28 +144,14 @@ green open audit-000001 ---- ==== -.. Verify that the log collector is updated: -+ -[source,terminal] ----- -$ oc get ds collector -o json | grep collector ----- -+ -.. Verify that the output includes a `collectort` container: -+ -[source,terminal] ----- -"containerName": "collector" ----- - -.. Verify that the log visualizer is updated to 5.x using the Kibana CRD: +. Verify that the log visualizer is updated to the correct version by entering the following command and observing the output: + [source,terminal] ---- $ oc get kibana kibana -o json ---- + -.. Verify that the output includes a Kibana pod with the `ready` status: +Verify that the output includes a Kibana pod with the `ready` status: + .Sample output with a ready Kibana pod [%collapsible] diff --git a/modules/log-forwarding-implementations.adoc b/modules/log-forwarding-implementations.adoc new file mode 100644 index 000000000000..0b9f2f131266 --- /dev/null +++ b/modules/log-forwarding-implementations.adoc @@ -0,0 +1,33 @@ +// Module included in the following assemblies: +// +// * logging/log_collection_forwarding/log-forwarding.adoc + +:_content-type: CONCEPT +[id="log-forwarding-implementations_{context}"] += Log forwarding implementations + +There are two log forwarding implementations available: the legacy implementation, and the multi log forwarder feature. + +[IMPORTANT] +==== +Only the Vector collector is supported for use with the multi log forwarder feature. The Fluentd collector can only be used with legacy implementations. +==== + +[id="log-forwarding-implementations-legacy_{context}"] +== Legacy implementation + +In legacy implementations, you can only use one log forwarder in your cluster. The `ClusterLogForwarder` resource in this mode must be named `instance`, and must be created in the `openshift-logging` namespace. The `ClusterLogForwarder` resource also requires a corresponding `ClusterLogging` resource named `instance` in the `openshift-logging` namespace. + +[id="log-forwarding-implementations-multi-clf_{context}"] +== Multi log forwarder feature + +The multi log forwarder feature is available in logging 5.8 and later, and provides the following functionality: + +* Administrators can control which users are allowed to define log collection and which logs they are allowed to collect. +* Users who have the required permissions are able to specify additional log collection configurations. +* Administrators who are migrating from the deprecated Fluentd collector to the Vector collector can deploy a new log forwarder separately from their existing deployment. The existing and new log forwarders can operate simultaneously while workloads are being migrated. + +In multi log forwarder implementations, you are not required to create a corresponding `ClusterLogging` resource for your `ClusterLogForwarder` resource. You can create multiple `ClusterLogForwarder` resources using any name, in any namespace, with the following exceptions: + +* You cannot create a `ClusterLogForwarder` resource named `instance` in the `openshift-logging` namespace, because this is reserved for a log forwarder that supports the legacy workflow using the Fluentd collector. +* You cannot create a `ClusterLogForwarder` resource named `collector` in the `openshift-logging` namespace, because this is reserved for the collector. diff --git a/modules/log-forwarding-modes.adoc b/modules/log-forwarding-modes.adoc deleted file mode 100644 index 5a09a8a9c62d..000000000000 --- a/modules/log-forwarding-modes.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/log_collection_forwarding/log-forwarding.adoc - -:_mod-docs-content-type: CONCEPT -[id="log-forwarding-modes_{context}"] -= Log forwarding modes - -There are two log forwarding modes available: legacy mode, and multi log forwarder mode. - -[IMPORTANT] -==== -Only the Vector collector is supported for use with multi log forwarder mode. The Fluentd collector can only be used with legacy mode. -==== - -[id="log-forwarding-modes-legacy_{context}"] -== Legacy mode - -In legacy mode, you can only use one log forwarder in your cluster. The `ClusterLogForwarder` resource in this mode must be named `instance`, and must be created in the `openshift-logging` namespace. The `ClusterLogForwarder` resource also requires a corresponding `ClusterLogging` resource named `instance` in the `openshift-logging` namespace. - -[id="log-forwarding-modes-multi-clf_{context}"] -== Multi log forwarder mode - -Multi log forwarder mode is available in logging 5.8 and later, and provides the following functionality: - -* Administrators can control which users are allowed to define log collection and which logs they are allowed to collect. -* Users who have the required permissions are able to specify additional log collection configurations. -* Administrators who are migrating from the deprecated Fluentd collector to the Vector collector can deploy a new log forwarder separately from their existing deployment. The existing and new log forwarders can operate simultaneously while workloads are being migrated. - -In multi log forwarder mode, you are not required to create a corresponding `ClusterLogging` resource for your `ClusterLogForwarder` resource. You can create multiple `ClusterLogForwarder` resources using any name, in any namespace, with the following exceptions: - -* You cannot create a `ClusterLogForwarder` resource named `instance` in the `openshift-logging` namespace, because this is reserved for a log forwarder that supports the legacy workflow using the Fluentd collector. -* You cannot create a `ClusterLogForwarder` resource named `collector` in the `openshift-logging` namespace, because this is reserved for the collector. diff --git a/modules/logging-create-clf.adoc b/modules/logging-create-clf.adoc index 6302d6e17e3e..f19912f1e177 100644 --- a/modules/logging-create-clf.adoc +++ b/modules/logging-create-clf.adoc @@ -6,10 +6,10 @@ [id="logging-create-clf_{context}"] = Creating a log forwarder -To create a log forwarder, you must create a `ClusterLogForwarder` CR that specifies the log input types that the service account can collect. You can also specify which outputs the logs can be forwarded to. If you are using multi log forwarder mode, you must also reference the service account in the `ClusterLogForwarder` CR. +To create a log forwarder, you must create a `ClusterLogForwarder` CR that specifies the log input types that the service account can collect. You can also specify which outputs the logs can be forwarded to. If you are using the multi log forwarder feature, you must also reference the service account in the `ClusterLogForwarder` CR. -If you are using multi log forwarder mode on your cluster, you can create `ClusterLogForwarder` custom resources (CRs) in any namespace, using any name. -If you are using legacy mode, the `ClusterLogForwarder` CR must be named `instance`, and must be created in the `openshift-logging` namespace. +If you are using the multi log forwarder feature on your cluster, you can create `ClusterLogForwarder` custom resources (CRs) in any namespace, using any name. +If you are using a legacy implementation, the `ClusterLogForwarder` CR must be named `instance`, and must be created in the `openshift-logging` namespace. [IMPORTANT] ==== @@ -37,15 +37,15 @@ spec: url: <7> # ... ---- -<1> In legacy mode, the CR name must be `instance`. In multi log forwarder mode, you can use any name. -<2> In legacy mode, the CR namespace must be `openshift-logging`. In multi log forwarder mode, you can use any namespace. -<3> The name of your service account. The service account is only required in multi log forwarder mode. +<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name. +<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace. +<3> The name of your service account. The service account is only required in multi log forwarder implementations. <4> The log types that are collected. The value for this field can be `audit` for audit logs, `application` for application logs, `infrastructure` for infrastructure logs, or a named input that has been defined for your application. <5> The type of output that you want to forward logs to. The value of this field can be `default`, `loki`, `kafka`, `elasticsearch`, `fluentdForward`, `syslog`, or `cloudwatch`. + [NOTE] ==== -The `default` output type is not supported in mutli log forwarder mode. +The `default` output type is not supported in mutli log forwarder implementations. ==== <6> A name for the output that you want to forward logs to. <7> The URL of the output that you want to forward logs to. diff --git a/modules/logging-operator-upgrading-all-ns.adoc b/modules/logging-operator-upgrading-all-ns.adoc new file mode 100644 index 000000000000..c9fc8c11bbf1 --- /dev/null +++ b/modules/logging-operator-upgrading-all-ns.adoc @@ -0,0 +1,67 @@ +// Module included in the following assemblies: +// +// * logging/cluster-logging-upgrading.adoc + +:_mod-docs-content-type: PROCEDURE +[id="logging-operator-upgrading-all-ns_{context}"] += Upgrading the Cluster Logging Operator to watch all namespaces + +In logging 5.7 and older versions, the Cluster Logging Operator only watches the `openshift-logging` namespace. +If you want the Cluster Logging Operator to watch all namespaces on your cluster, you must redeploy the Operator. You can complete the following procedure to redeploy the Operator without deleting your logging components. + +.Prerequisites + +* You have installed the {oc-first}. +* You have administrator permissions. + +.Procedure + +. Delete the subscription by running the following command: ++ +[source,terminal] +---- +$ oc -n openshift-logging delete subscription +---- + +. Delete the Operator group by running the following command: ++ +[source,terminal] +---- +$ oc -n openshift-logging delete operatorgroup +---- + +. Delete the cluster service version (CSV) by running the following command: ++ +[source,terminal] +---- +$ oc delete clusterserviceversion cluster-logging. +---- + +. Redeploy the Cluster Logging Operator by following the "Installing Logging" documentation. + +.Verification + +* Check that the `targetNamespaces` field in the `OperatorGroup` resource is not present or is set to an empty string. ++ +To do this, run the following command and inspect the output: ++ +[source,terminal] +---- +$ oc get operatorgroup -o yaml +---- ++ +.Example output +[source,yaml] +---- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: openshift-logging-f52cn + namespace: openshift-logging +spec: + upgradeStrategy: Default +status: + namespaces: + - "" +# ... +---- diff --git a/modules/logging-upgrading-clo.adoc b/modules/logging-upgrading-clo.adoc new file mode 100644 index 000000000000..b5508bac6b22 --- /dev/null +++ b/modules/logging-upgrading-clo.adoc @@ -0,0 +1,33 @@ +// Module included in the following assemblies: +// +// * logging/cluster-logging-upgrading.adoc + +:_mod-docs-content-type: PROCEDURE +[id="logging-upgrading-clo_{context}"] += Updating the Cluster Logging Operator + +To update the Cluster Logging Operator to a new major release version, you must modify the update channel for the Operator subscription. + +.Prerequisites + +* You have installed the Red Hat OpenShift Logging Operator. +* You have administrator permissions. +* You have access to the {product-title} web console and are viewing the *Administrator* perspective. + +.Procedure + +. Navigate to *Operators* -> *Installed Operators*. + +. Select the *openshift-logging* project. + +. Click the *Red Hat OpenShift Logging* Operator. + +. Click *Subscription*. In the *Subscription details* section, click the *Update channel* link. This link text might be *stable* or *stable-5.y*, depending on your current update channel. + +. In the *Change Subscription Update Channel* window, select the latest major version update channel, *stable-5.y*, and click *Save*. Note the `cluster-logging.v5.y.z` version. + +.Verification + +. Wait for a few seconds, then click *Operators* -> *Installed Operators*. Verify that the Red Hat OpenShift Logging Operator version matches the latest `cluster-logging.v5.y.z` version. + +. On the *Operators* -> *Installed Operators* page, wait for the *Status* field to report *Succeeded*. diff --git a/modules/logging-upgrading-loki.adoc b/modules/logging-upgrading-loki.adoc new file mode 100644 index 000000000000..b70fdda97bca --- /dev/null +++ b/modules/logging-upgrading-loki.adoc @@ -0,0 +1,33 @@ +// Module included in the following assemblies: +// +// * logging/cluster-logging-upgrading.adoc + +:_content-type: PROCEDURE +[id="logging-upgrading-loki_{context}"] += Updating the Loki Operator + +To update the Loki Operator to a new major release version, you must modify the update channel for the Operator subscription. + +.Prerequisites + +* You have installed the Loki Operator. +* You have administrator permissions. +* You have access to the {product-title} web console and are viewing the *Administrator* perspective. + +.Procedure + +. Navigate to *Operators* -> *Installed Operators*. + +. Select the *openshift-operators-redhat* project. + +. Click the *Loki Operator*. + +. Click *Subscription*. In the *Subscription details* section, click the *Update channel* link. This link text might be *stable* or *stable-5.y*, depending on your current update channel. + +. In the *Change Subscription Update Channel* window, select the latest major version update channel, *stable-5.y*, and click *Save*. Note the `loki-operator.v5.y.z` version. + +.Verification + +. Wait for a few seconds, then click *Operators* -> *Installed Operators*. Verify that the Loki Operator version matches the latest `loki-operator.v5.y.z` version. + +. On the *Operators* -> *Installed Operators* page, wait for the *Status* field to report *Succeeded*.