From 79af05fe99d1ca6d9aec491cdb1e02ba8ad7773c Mon Sep 17 00:00:00 2001 From: Michael Ryan Peter Date: Fri, 22 Apr 2022 11:50:53 -0400 Subject: [PATCH 1/2] Revert "Merge pull request #41099 from ktothill/BZ-1941940-main" This reverts commit 7ea981e0f6a3adb8134dd17558595a8685354076, reversing changes made to 1037e57f40c863bba4850da59e470129ba564a1d. --- modules/ipi-install-creating-an-rhcos-images-cache.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ipi-install-creating-an-rhcos-images-cache.adoc b/modules/ipi-install-creating-an-rhcos-images-cache.adoc index 95b9972affd6..ce4c59f0c338 100644 --- a/modules/ipi-install-creating-an-rhcos-images-cache.adoc +++ b/modules/ipi-install-creating-an-rhcos-images-cache.adoc @@ -99,7 +99,7 @@ $ ls -Z /home/kni/rhcos_image_cache $ podman run -d --name rhcos_image_cache \ <1> -v /home/kni/rhcos_image_cache:/var/www/html \ -p 8080:8080/tcp \ -quay.io/centos7/httpd-24-centos7:latest +registry.centos.org/centos/httpd-24-centos7:latest ---- ifndef::upstream[] + From 13bd778b93426bf81610f0ea412d169666ffc99f Mon Sep 17 00:00:00 2001 From: Michael Ryan Peter Date: Fri, 22 Apr 2022 11:51:56 -0400 Subject: [PATCH 2/2] Revert "RHDEVDOCS-3797 - Logging Name Change w/ Peer Review Edits v3" This reverts commit db856a1ec973dd509360a2d7606299b63c653b22. --- logging/cluster-logging-deploying.adoc | 15 +- logging/cluster-logging-eventrouter.adoc | 4 +- logging/cluster-logging-exported-fields.adoc | 4 +- logging/cluster-logging-external.adoc | 4 +- logging/cluster-logging-release-notes.adoc | 165 +++++++++++++++++- logging/cluster-logging-uninstall.adoc | 2 +- logging/cluster-logging-upgrading.adoc | 12 +- logging/cluster-logging-visualizer.adoc | 6 +- logging/cluster-logging.adoc | 8 +- logging/config/cluster-logging-collector.adoc | 2 +- .../cluster-logging-configuring-cr.adoc | 7 +- .../config/cluster-logging-configuring.adoc | 13 +- logging/config/cluster-logging-log-store.adoc | 2 +- .../cluster-logging-maintenance-support.adoc | 3 + logging/config/cluster-logging-memory.adoc | 5 +- .../config/cluster-logging-moving-nodes.adoc | 2 +- ...luster-logging-storage-considerations.adoc | 8 +- .../config/cluster-logging-tolerations.adoc | 4 +- .../config/cluster-logging-visualizer.adoc | 3 +- logging/dedicated-cluster-logging.adoc | 8 +- .../cluster-logging-alerts.adoc | 2 +- .../cluster-logging-cluster-status.adoc | 4 +- .../cluster-logging-must-gather.adoc | 4 +- logging/viewing-resource-logs.adoc | 5 +- modules/cluster-logging-about-collector.adoc | 2 +- modules/cluster-logging-about-components.adoc | 9 +- modules/cluster-logging-about-crd.adoc | 4 +- modules/cluster-logging-about-logstore.adoc | 7 +- modules/cluster-logging-about.adoc | 17 +- modules/cluster-logging-clo-status-comp.adoc | 12 +- modules/cluster-logging-clo-status.adoc | 2 +- ...ster-logging-collector-alerts-viewing.adoc | 2 +- ...ging-collector-log-forward-cloudwatch.adoc | 2 +- ...-logging-collector-log-forward-syslog.adoc | 1 - ...ogging-collector-log-forwarding-about.adoc | 4 +- ...luster-logging-collector-pod-location.adoc | 2 +- ...cluster-logging-collector-tolerations.adoc | 2 +- modules/cluster-logging-collector-tuning.adoc | 2 +- ...uster-logging-configuring-image-about.adoc | 5 +- modules/cluster-logging-cpu-memory.adoc | 2 +- modules/cluster-logging-deploy-cli.adoc | 6 +- modules/cluster-logging-deploy-console.adoc | 6 +- modules/cluster-logging-deploy-label.adoc | 2 +- .../cluster-logging-deploy-multitenant.adoc | 2 +- ...logging-deploy-storage-considerations.adoc | 2 +- modules/cluster-logging-deploying-about.adoc | 13 +- .../cluster-logging-elasticsearch-audit.adoc | 2 +- ...luster-logging-elasticsearch-exposing.adoc | 9 +- modules/cluster-logging-elasticsearch-ha.adoc | 2 +- ...lasticsearch-persistent-storage-empty.adoc | 8 +- ...uster-logging-elasticsearch-retention.adoc | 4 +- ...cluster-logging-elasticsearch-storage.adoc | 4 +- ...ter-logging-elasticsearch-tolerations.adoc | 2 +- .../cluster-logging-eventrouter-about.adoc | 4 +- .../cluster-logging-eventrouter-deploy.adoc | 2 +- modules/cluster-logging-forwarding-about.adoc | 3 +- .../cluster-logging-kibana-tolerations.adoc | 2 +- ...ster-logging-log-store-status-viewing.adoc | 2 +- modules/cluster-logging-logstore-limits.adoc | 2 +- ...ter-logging-maintenance-support-about.adoc | 4 +- ...luster-logging-manual-rollout-rolling.adoc | 2 +- .../cluster-logging-must-gather-about.adoc | 2 +- ...luster-logging-must-gather-collecting.adoc | 6 +- .../cluster-logging-release-notes-5.2.0.adoc | 109 ------------ .../cluster-logging-release-notes-5.2.z.adoc | 3 - .../cluster-logging-release-notes-5.3.0.adoc | 54 ------ .../cluster-logging-release-notes-5.3.z.adoc | 2 - .../cluster-logging-release-notes-5.4.0.adoc | 34 ---- .../cluster-logging-supported-versions.adoc | 10 ++ modules/cluster-logging-uninstall.adoc | 6 +- .../cluster-logging-visualizer-kibana.adoc | 2 +- modules/infrastructure-moving-logging.adoc | 4 +- 72 files changed, 344 insertions(+), 338 deletions(-) delete mode 100644 modules/cluster-logging-release-notes-5.2.0.adoc delete mode 100644 modules/cluster-logging-release-notes-5.3.0.adoc delete mode 100644 modules/cluster-logging-release-notes-5.4.0.adoc create mode 100644 modules/cluster-logging-supported-versions.adoc diff --git a/logging/cluster-logging-deploying.adoc b/logging/cluster-logging-deploying.adoc index 66d5b19f22cc..f39a16f26261 100644 --- a/logging/cluster-logging-deploying.adoc +++ b/logging/cluster-logging-deploying.adoc @@ -1,17 +1,20 @@ :_content-type: ASSEMBLY :context: cluster-logging-deploying [id="cluster-logging-deploying"] -= Installing the {logging-title} += Installing OpenShift Logging include::_attributes/common-attributes.adoc[] toc::[] -You can install the {logging-title} by deploying the OpenShift Elasticsearch and Red Hat OpenShift Logging Operators. The OpenShift Elasticsearch Operator creates and manages the Elasticsearch cluster used by OpenShift Logging. The {logging} Operator creates and manages the components of the logging stack. +You can install OpenShift Logging by deploying +the OpenShift Elasticsearch and Red Hat OpenShift Logging Operators. The OpenShift Elasticsearch Operator +creates and manages the Elasticsearch cluster used by OpenShift Logging. +The Red Hat OpenShift Logging Operator creates and manages the components of the logging stack. -The process for deploying the {logging} to {product-title} involves: +The process for deploying OpenShift Logging to {product-title} involves: -* Reviewing the xref:../logging/config/cluster-logging-storage-considerations#cluster-logging-storage[{logging-uc} storage considerations]. +* Reviewing the xref:../logging/config/cluster-logging-storage-considerations#cluster-logging-storage[OpenShift Logging storage considerations]. * Installing the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator using the {product-title} xref:../logging/cluster-logging-deploying.adoc#cluster-logging-deploy-console_cluster-logging-deploying[web console] or xref:../logging/cluster-logging-deploying.adoc#cluster-logging-deploy-cli_cluster-logging-deploying[CLI]. @@ -31,7 +34,7 @@ include::modules/cluster-logging-deploy-console.adoc[leveloffset=+1] If you plan to use Kibana, you must xref:#cluster-logging-visualizer-indices_cluster-logging-deploying[manually create your Kibana index patterns and visualizations] to explore and visualize data in Kibana. -If your cluster network provider enforces network isolation, xref:#cluster-logging-deploy-multitenant_cluster-logging-deploying[allow network traffic between the projects that contain the {logging} Operators]. +If your cluster network provider enforces network isolation, xref:#cluster-logging-deploy-multitenant_cluster-logging-deploying[allow network traffic between the projects that contain the OpenShift Logging operators]. include::modules/cluster-logging-deploy-cli.adoc[leveloffset=+1] @@ -40,7 +43,7 @@ include::modules/cluster-logging-deploy-cli.adoc[leveloffset=+1] If you plan to use Kibana, you must xref:#cluster-logging-visualizer-indices_cluster-logging-deploying[manually create your Kibana index patterns and visualizations] to explore and visualize data in Kibana. -If your cluster network provider enforces network isolation, xref:#cluster-logging-deploy-multitenant_cluster-logging-deploying[allow network traffic between the projects that contain the {logging} Operators]. +If your cluster network provider enforces network isolation, xref:#cluster-logging-deploy-multitenant_cluster-logging-deploying[allow network traffic between the projects that contain the OpenShift Logging operators]. include::modules/cluster-logging-visualizer-indices.adoc[leveloffset=+2] diff --git a/logging/cluster-logging-eventrouter.adoc b/logging/cluster-logging-eventrouter.adoc index c0397eee4000..b05516a3c85d 100644 --- a/logging/cluster-logging-eventrouter.adoc +++ b/logging/cluster-logging-eventrouter.adoc @@ -6,9 +6,9 @@ include::_attributes/common-attributes.adoc[] toc::[] -The {product-title} Event Router is a pod that watches Kubernetes events and logs them for collection by the {logging}. You must manually deploy the Event Router. +The {product-title} Event Router is a pod that watches Kubernetes events and logs them for collection by OpenShift Logging. You must manually deploy the Event Router. -The Event Router collects events from all projects and writes them to `STDOUT`. The collector then forwards those events to the store defined in the `ClusterLogForwarder` custom resource (CR). +The Event Router collects events from all projects and writes them to `STDOUT`. Fluentd collects those events and forwards them into the {product-title} Elasticsearch instance. Elasticsearch indexes the events to the `infra` index. [IMPORTANT] ==== diff --git a/logging/cluster-logging-exported-fields.adoc b/logging/cluster-logging-exported-fields.adoc index 9d7cf2487ca2..e8b4922bfe7c 100644 --- a/logging/cluster-logging-exported-fields.adoc +++ b/logging/cluster-logging-exported-fields.adoc @@ -6,14 +6,12 @@ include::_attributes/common-attributes.adoc[] toc::[] -The following fields can be present in log records exported by the {logging}. Although log records are typically formatted as JSON objects, the same data model can be applied to other encodings. +The following fields can be present in log records exported by OpenShift Logging. Although log records are typically formatted as JSON objects, the same data model can be applied to other encodings. To search these fields from Elasticsearch and Kibana, use the full dotted field name when searching. For example, with an Elasticsearch */_search URL*, to look for a Kubernetes pod name, use `/_search/q=kubernetes.pod_name:name-of-my-pod`. // The logging system can parse JSON-formatted log entries to external systems. These log entries are formatted as a fluentd message with extra fields such as `kubernetes`. The fields exported by the logging system and available for searching from Elasticsearch and Kibana are documented at the end of this document. include::modules/cluster-logging-exported-fields-top-level-fields.adoc[leveloffset=0] - include::modules/cluster-logging-exported-fields-kubernetes.adoc[leveloffset=0] - // add modules/cluster-logging-exported-fields-openshift when available diff --git a/logging/cluster-logging-external.adoc b/logging/cluster-logging-external.adoc index 5b3c46ed0e6b..1cdbda55f2fa 100644 --- a/logging/cluster-logging-external.adoc +++ b/logging/cluster-logging-external.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -By default, the {logging} sends container and infrastructure logs to the default internal Elasticsearch log store defined in the `ClusterLogging` custom resource. However, it does not send audit logs to the internal store because it does not provide secure storage. If this default configuration meets your needs, you do not need to configure the Cluster Log Forwarder. +By default, OpenShift Logging sends container and infrastructure logs to the default internal Elasticsearch log store defined in the `ClusterLogging` custom resource. However, it does not send audit logs to the internal store because it does not provide secure storage. If this default configuration meets your needs, you do not need to configure the Cluster Log Forwarder. To send logs to other log aggregators, you use the {product-title} Cluster Log Forwarder. This API enables you to send container, infrastructure, and audit logs to specific endpoints within or outside your cluster. In addition, you can send different types of logs to various systems so that various individuals can access each type. You can also enable Transport Layer Security (TLS) support to send logs securely, as required by your organization. @@ -15,7 +15,7 @@ To send logs to other log aggregators, you use the {product-title} Cluster Log F To send audit logs to the default internal Elasticsearch log store, use the Cluster Log Forwarder as described in xref:../logging/config/cluster-logging-log-store.adoc#cluster-logging-elasticsearch-audit_cluster-logging-store[Forward audit logs to the log store]. ==== -When you forward logs externally, the {logging} creates or modifies a Fluentd config map to send logs using your desired protocols. You are responsible for configuring the protocol on the external log aggregator. +When you forward logs externally, the Red Hat OpenShift Logging Operator creates or modifies a Fluentd config map to send logs using your desired protocols. You are responsible for configuring the protocol on the external log aggregator. [IMPORTANT] ==== diff --git a/logging/cluster-logging-release-notes.adoc b/logging/cluster-logging-release-notes.adoc index 6e491d8ed4a4..6116a83ff251 100644 --- a/logging/cluster-logging-release-notes.adoc +++ b/logging/cluster-logging-release-notes.adoc @@ -1,9 +1,172 @@ :_content-type: ASSEMBLY [id="cluster-logging-release-notes"] -= Release notes for the {logging-title} += Release notes for Red Hat OpenShift Logging 5.3 include::_attributes/common-attributes.adoc[] :context: cluster-logging-release-notes-v5x toc::[] include::modules/making-open-source-more-inclusive.adoc[leveloffset=+1] + +[id="cluster-logging-supported-versions"] +== Supported Versions +include::modules/cluster-logging-supported-versions.adoc[leveloffset=+1] + +// Release Notes by version +[id="cluster-logging-release-notes-5-3-0"] +== OpenShift Logging 5.3.0 +The following advisories are available for OpenShift Logging 5.3.x: + +[id="openshift-logging-5-3-0-new-features-and-enhancements"] +=== New features and enhancements +* With this update, authorization requirements for Log Forwarding have been relaxed. Outputs may now be configured with SASL, username/password, or TLS. + +[id="openshift-logging-5-3-0-bug-fixes"] +=== Bug fixes +* Before this update, application logs were not correctly configured to forward to the proper Cloudwatch stream with multi-line error detection enabled. (link:https://issues.redhat.com/browse/LOG-1939[LOG-1939]) + +* Before this update, a name change of the deployed collector in the 5.3 release caused the alert 'fluentnodedown' to generate. (link:https://issues.redhat.com/browse/LOG-1918[LOG-1918]) + +* Before this update, a regression introduced in a prior release configuration caused the collector to flush its buffered messages before shutdown, creating a delay the termination and restart of collector Pods. With this update, fluentd no longer flushes buffers at shutdown, resolving the issue. (link:https://issues.redhat.com/browse/LOG-1735[LOG-1735]) + +* Before this update, a regression introduced in a prior release intentionally disabled JSON message parsing. With this update, a log entry's "level" value is set based on: a parsed JSON message that has a "level" field or by applying a regex against the message field to extract a match. (link:https://issues.redhat.com/browse/LOG-1199[LOG-1199]) + +[id="openshift-logging-5-3-0-known-issues"] +=== Known issues +* If you forward logs to an external Elasticsearch server and then change a configured value in the pipeline secret, such as the username and password, the Fluentd forwarder loads the new secret but uses the old value to connect to an external Elasticsearch server. This issue happens because the Red Hat OpenShift Logging Operator does not currently monitor secrets for content changes. (link:https://issues.redhat.com/browse/LOG-1652[LOG-1652]) ++ +As a workaround, if you change the secret, you can force the Fluentd pods to redeploy by entering: ++ +[source,terminal] +---- +$ oc delete pod -l component=fluentd +---- + +[id="openshift-logging-5-3-0-deprecated-removed-features"] +== Deprecated and removed features +Some features available in previous releases have been deprecated or removed. + +Deprecated functionality is still included in OpenShift Logging and continues to be supported; however, it will be removed in a future release of this product and is not recommended for new deployments. + +[id="openshift-logging-5-3-0-legacy-forwarding"] +=== Forwarding logs using the legacy Fluentd and legacy syslog methods have been removed + +In OpenShift Logging 5.3, the legacy methods of forwarding logs to Syslog and Fluentd are removed. Bug fixes and support are provided through the end of the OpenShift Logging 5.2 life cycle. After which, no new feature enhancements are made. + +Instead, use the following non-legacy methods: + +* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-fluentd_cluster-logging-external[Forwarding logs using the Fluentd forward protocol] +* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-syslog_cluster-logging-external[Forwarding logs using the syslog protocol] + +[id="openshift-logging-5-3-0-legacy-forwarding-config"] +=== Configuration mechanisms for legacy forwarding methods have been removed + +In OpenShift Logging 5.3, the legacy configuration mechanism for log forwarding is removed: You cannot forward logs using the legacy Fluentd method and legacy Syslog method. Use the standard log forwarding methods instead. + +[id="cluster-logging-release-notes-5-2-0"] +== OpenShift Logging 5.2.0 + +The following advisories are available for OpenShift Logging 5.2.x: + +* link:https://access.redhat.com/errata/RHBA-2021:3550[RHBA-2021:3550 OpenShift Logging Bug Fix Release 5.2.1] +* link:https://access.redhat.com/errata/RHBA-2021:3393[RHBA-2021:3393 OpenShift Logging Bug Fix Release 5.2.0] + +[id="openshift-logging-5-2-0-new-features-and-enhancements"] +=== New features and enhancements + +* With this update, you can forward log data to Amazon CloudWatch, which provides application and infrastructure monitoring. For more information, see xref:../logging/cluster-logging-external.html#cluster-logging-collector-log-forward-cloudwatch_cluster-logging-external[Forwarding logs to Amazon CloudWatch]. (link:https://issues.redhat.com/browse/LOG-1173[LOG-1173]) + +* With this update, you can forward log data to Loki, a horizontally scalable, highly available, multi-tenant log aggregation system. For more information, see xref:../logging/cluster-logging-external.html#cluster-logging-collector-log-forward-loki_cluster-logging-external[Forwarding logs to Loki]. (link:https://issues.redhat.com/browse/LOG-684[LOG-684]) + +* With this update, if you use the Fluentd forward protocol to forward log data over a TLS-encrypted connection, now you can use a password-encrypted private key file and specify the passphrase in the Cluster Log Forwarder configuration. For more information, see xref:../logging/cluster-logging-external.html#cluster-logging-collector-log-forward-fluentd_cluster-logging-external[Forwarding logs using the Fluentd forward protocol]. (link:https://issues.redhat.com/browse/LOG-1525[LOG-1525]) + +* This enhancement enables you to use a username and password to authenticate a log forwarding connection to an external Elasticsearch instance. For example, if you cannot use mutual TLS (mTLS) because a third-party operates the Elasticsearch instance, you can use HTTP or HTTPS and set a secret that contains the username and password. For more information, see xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-es_cluster-logging-external[Forwarding logs to an external Elasticsearch instance]. (link:https://issues.redhat.com/browse/LOG-1022[LOG-1022]) + +* With this update, you can collect OVN network policy audit logs for forwarding to a logging server. For more information, see xref:../logging/cluster-logging-external.html#cluster-logging-collecting-ovn-audit-logs_cluster-logging-external[Collecting OVN network policy audit logs]. (link:https://issues.redhat.com/browse/LOG-1526[LOG-1526]) + +* By default, the data model introduced in {product-title} 4.5 gave logs from different namespaces a single index in common. This change made it harder to see which namespaces produced the most logs. ++ +The current release adds namespace metrics to the *Logging* dashboard in the {product-title} console. With these metrics, you can see which namespaces produce logs and how many logs each namespace produces for a given timestamp. ++ +To see these metrics, open the *Administrator* perspective in the {product-title} web console, and navigate to *Observe* -> *Dashboards* -> *Logging/Elasticsearch*. (link:https://issues.redhat.com/browse/LOG-1680[LOG-1680]) + +* The current release, OpenShift Logging 5.2, enables two new metrics: For a given timestamp or duration, you can see the total logs produced or logged by individual containers, and the total logs collected by the collector. These metrics are labeled by namespace, pod, and container name so that you can see how many logs each namespace and pod collects and produces. (link:https://issues.redhat.com/browse/LOG-1213[LOG-1213]) + +[id="openshift-logging-5-2-0-bug-fixes"] +=== Bug fixes + +* Before this update, when the OpenShift Elasticsearch Operator created index management cronjobs, it added the `POLICY_MAPPING` environment variable twice, which caused the apiserver to report the duplication. This update fixes the issue so that the `POLICY_MAPPING` environment variable is set only once per cronjob, and there is no duplication for the apiserver to report. (link:https://issues.redhat.com/browse/LOG-1130[LOG-1130]) + +* Before this update, suspending an Elasticsearch cluster to zero nodes did not suspend the index-management cronjobs, which put these cronjobs into maximum backoff. Then, after unsuspending the Elasticsearch cluster, these cronjobs stayed halted due to maximum backoff reached. This update resolves the issue by suspending the cronjobs and the cluster. (link:https://issues.redhat.com/browse/LOG-1268[LOG-1268]) + +* Before this update, in the *Logging* dashboard in the {product-title} console, the list of top 10 log-producing containers was missing the "chart namespace" label and provided the incorrect metric name, `fluentd_input_status_total_bytes_logged`. With this update, the chart shows the namespace label and the correct metric name, `log_logged_bytes_total`. (link:https://issues.redhat.com/browse/LOG-1271[LOG-1271]) + +* Before this update, if an index management cronjob terminated with an error, it did not report the error exit code: instead, its job status was "complete." This update resolves the issue by reporting the error exit codes of index management cronjobs that terminate with errors. (link:https://issues.redhat.com/browse/LOG-1273[LOG-1273]) + +* The `priorityclasses.v1beta1.scheduling.k8s.io` was removed in 1.22 and replaced by `priorityclasses.v1.scheduling.k8s.io` (`v1beta1` was replaced by `v1`). Before this update, `APIRemovedInNextReleaseInUse` alerts were generated for `priorityclasses` because `v1beta1` was still present . This update resolves the issue by replacing `v1beta1` with `v1`. The alert is no longer generated. (link:https://issues.redhat.com/browse/LOG-1385[LOG-1385]) + +* Previously, the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator did not have the annotation that was required for them to appear in the {product-title} web console list of operators that can run in a disconnected environment. This update adds the `operators.openshift.io/infrastructure-features: '["Disconnected"]'` annotation to these two operators so that they appear in the list of operators that run in disconnected environments. (link:https://issues.redhat.com/browse/LOG-1420[LOG-1420]) + +* Before this update, Red Hat OpenShift Logging Operator pods were scheduled on CPU cores that were reserved for customer workloads on performance-optimized single-node clusters. With this update, cluster logging operator pods are scheduled on the correct CPU cores. (link:https://issues.redhat.com/browse/LOG-1440[LOG-1440]) + +* Before this update, some log entries had unrecognized UTF-8 bytes, which caused Elasticsearch to reject the messages and block the entire buffered payload. With this update, rejected payloads drop the invalid log entries and resubmit the remaining entries to resolve the issue. (link:https://issues.redhat.com/browse/LOG-1499[LOG-1499]) + +* Before this update, the `kibana-proxy` pod sometimes entered the `CrashLoopBackoff` state and logged the following message `Invalid configuration: cookie_secret must be 16, 24, or 32 bytes to create an AES cipher when pass_access_token == true or cookie_refresh != 0, but is 29 bytes.` The exact actual number of bytes could vary. With this update, the generation of the Kibana session secret has been corrected, and the kibana-proxy pod no longer enters a `CrashLoopBackoff` state due to this error. (link:https://issues.redhat.com/browse/LOG-1446[LOG-1446]) + +* Before this update, the AWS CloudWatch Fluentd plug-in logged its AWS API calls to the Fluentd log at all log levels, consuming additional {product-title} node resources. With this update, the AWS CloudWatch Fluentd plug-in logs AWS API calls only at the "debug" and "trace" log levels. This way, at the default "warn" log level, Fluentd does not consume extra node resources. (link:https://issues.redhat.com/browse/LOG-1071[LOG-1071]) + +* Before this update, the Elasticsearch OpenDistro security plug-in caused user index migrations to fail. This update resolves the issue by providing a newer version of the plug-in. Now, index migrations proceed without errors. (link:https://issues.redhat.com/browse/LOG-1276[LOG-1276]) + +* Before this update, in the *Logging* dashboard in the {product-title} console, the list of top 10 log-producing containers lacked data points. This update resolves the issue, and the dashboard displays all data points. (link:https://issues.redhat.com/browse/LOG-1353[LOG-1353]) + +* Before this update, if you were tuning the performance of the Fluentd log forwarder by adjusting the `chunkLimitSize` and `totalLimitSize` values, the `Setting queued_chunks_limit_size for each buffer to` message reported values that were too low. The current update fixes this issue so that this message reports the correct values. (link:https://issues.redhat.com/browse/LOG-1411[LOG-1411]) + +* Before this update, the Kibana OpenDistro security plug-in caused user index migrations to fail. This update resolves the issue by providing a newer version of the plug-in. Now, index migrations proceed without errors. (link:https://issues.redhat.com/browse/LOG-1558[LOG-1558]) + +* Before this update, using a namespace input filter prevented logs in that namespace from appearing in other inputs. With this update, logs are sent to all inputs that can accept them. (link:https://issues.redhat.com/browse/LOG-1570[LOG-1570]) + +* Before this update, a missing license file for the `viaq/logerr` dependency caused license scanners to abort without success. With this update, the `viaq/logerr` dependency is licensed under Apache 2.0 and the license scanners run successfully. (link:https://issues.redhat.com/browse/LOG-1590[LOG-1590]) + +* Before this update, an incorrect brew tag for `curator5` within the `elasticsearch-operator-bundle` build pipeline caused the pull of an image pinned to a dummy SHA1. With this update, the build pipeline uses the `logging-curator5-rhel8` reference for `curator5`, enabling index management cronjobs to pull the correct image from `registry.redhat.io`. (link:https://issues.redhat.com/browse/LOG-1624[LOG-1624]) + +* Before this update, an issue with the `ServiceAccount` permissions caused errors such as `no permissions for [indices:admin/aliases/get]`. With this update, a permission fix resolves the issue. (link:https://issues.redhat.com/browse/LOG-1657[LOG-1657]) + +* Before this update, the Custom Resource Definition (CRD) for the Red Hat OpenShift Logging Operator was missing the Loki output type, which caused the admission controller to reject the `ClusterLogForwarder` custom resource object. With this update, the CRD includes Loki as an output type so that administrators can configure `ClusterLogForwarder` to send logs to a Loki server. (link:https://issues.redhat.com/browse/LOG-1683[LOG-1683]) + +* Before this update, OpenShift Elasticsearch Operator reconciliation of the `ServiceAccounts` overwrote third-party-owned fields that contained secrets. This issue caused memory and CPU spikes due to frequent recreation of secrets. This update resolves the issue. Now, the OpenShift Elasticsearch Operator does not overwrite third-party-owned fields. (link:https://issues.redhat.com/browse/LOG-1714[LOG-1714]) + +* Before this update, in the `ClusterLogging` custom resource (CR) definition, if you specified a `flush_interval` value but did not set `flush_mode` to `interval`, the Red Hat OpenShift Logging Operator generated a Fluentd configuration. However, the Fluentd collector generated an error at runtime. With this update, the Red Hat OpenShift Logging Operator validates the `ClusterLogging` CR definition and only generates the Fluentd configuration if both fields are specified. (link:https://issues.redhat.com/browse/LOG-1723[LOG-1723]) + +[id="openshift-logging-5-2-0-known-issues"] +=== Known issues + +* If you forward logs to an external Elasticsearch server and then change a configured value in the pipeline secret, such as the username and password, the Fluentd forwarder loads the new secret but uses the old value to connect to an external Elasticsearch server. This issue happens because the Red Hat OpenShift Logging Operator does not currently monitor secrets for content changes. (link:https://issues.redhat.com/browse/LOG-1652[LOG-1652]) ++ +As a workaround, if you change the secret, you can force the Fluentd pods to redeploy by entering: ++ +[source,terminal] +---- +$ oc delete pod -l component=collector +---- + +[id="openshift-logging-5-2-0-deprecated-removed-features"] +== Deprecated and removed features + +Some features available in previous releases have been deprecated or removed. + +Deprecated functionality is still included in OpenShift Logging and continues to be supported; however, it will be removed in a future release of this product and is not recommended for new deployments. + +[id="openshift-logging-5-2-0-legacy-forwarding"] +=== Forwarding logs using the legacy Fluentd and legacy syslog methods have been deprecated + +From {product-title} 4.6 to the present, forwarding logs by using the following legacy methods have been deprecated and will be removed in a future release: + +* Forwarding logs using the legacy Fluentd method +* Forwarding logs using the legacy syslog method + +Instead, use the following non-legacy methods: + +* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-fluentd_cluster-logging-external[Forwarding logs using the Fluentd forward protocol] +* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-syslog_cluster-logging-external[Forwarding logs using the syslog protocol] + +include::modules/cluster-logging-release-notes-5.1.0.adoc[leveloffset=+1] diff --git a/logging/cluster-logging-uninstall.adoc b/logging/cluster-logging-uninstall.adoc index 1b1aeaded2c3..69b2e742504a 100644 --- a/logging/cluster-logging-uninstall.adoc +++ b/logging/cluster-logging-uninstall.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -You can remove the {logging} from your {product-title} cluster. +You can remove OpenShift Logging from your {product-title} cluster. // The following include statements pull in the module files that comprise // the assembly. Include any combination of concept, procedure, or reference diff --git a/logging/cluster-logging-upgrading.adoc b/logging/cluster-logging-upgrading.adoc index d571c4f3cf24..3b068369b8dc 100644 --- a/logging/cluster-logging-upgrading.adoc +++ b/logging/cluster-logging-upgrading.adoc @@ -6,9 +6,14 @@ include::_attributes/common-attributes.adoc[] toc::[] -[id="cluster-logging-supported-versions"] -== Supported Versions -For version compatibility and support information, see link:https://access.redhat.com/support/policy/updates/openshift#logging[Red Hat OpenShift Container Platform Life Cycle Policy] +.{product-title} version support for Red Hat OpenShift Logging (RHOL) +[options="header"] +|==== +| |4.7 |4.8 |4.9 +|RHOL 5.0|X |X | +|RHOL 5.1|X |X | +|RHOL 5.2|X |X |X +|==== To upgrade from cluster logging in {product-title} version 4.6 and earlier to OpenShift Logging 5.x, you update the {product-title} cluster to version 4.7 or 4.8. Then, you update the following operators: @@ -18,5 +23,4 @@ To upgrade from cluster logging in {product-title} version 4.6 and earlier to Op To upgrade from a previous version of OpenShift Logging to the current version, you update OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator to their current versions. include::modules/cluster-logging-updating-logging-to-5-0.adoc[leveloffset=+1] - include::modules/cluster-logging-updating-logging-to-5-1.adoc[leveloffset=+1] diff --git a/logging/cluster-logging-visualizer.adoc b/logging/cluster-logging-visualizer.adoc index 6ded5d71ff00..2f643db68f6b 100644 --- a/logging/cluster-logging-visualizer.adoc +++ b/logging/cluster-logging-visualizer.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -The {logging} includes a web console for visualizing collected log data. Currently, {product-title} deploys the Kibana console for visualization. +OpenShift Logging includes a web console for visualizing collected log data. Currently, {product-title} deploys the Kibana console for visualization. Using the log visualizer, you can do the following with your data: @@ -15,7 +15,7 @@ Using the log visualizer, you can do the following with your data: * create and view custom dashboards using the *Dashboard* tab. Use and configuration of the Kibana interface is beyond the scope of this documentation. For more information, -on using the interface, see the link:https://www.elastic.co/guide/en/kibana/6.8/connect-to-elasticsearch.html[Kibana documentation]. +on using the interface, see the link:https://www.elastic.co/guide/en/kibana/6.8/connect-to-elasticsearch.html[Kibana documentation]. [NOTE] ==== @@ -29,3 +29,5 @@ The audit logs are not stored in the internal {product-title} Elasticsearch inst include::modules/cluster-logging-visualizer-indices.adoc[leveloffset=+1] include::modules/cluster-logging-visualizer-kibana.adoc[leveloffset=+1] + + diff --git a/logging/cluster-logging.adoc b/logging/cluster-logging.adoc index 9356f80167cf..216624089b34 100644 --- a/logging/cluster-logging.adoc +++ b/logging/cluster-logging.adoc @@ -1,7 +1,7 @@ :_content-type: ASSEMBLY :context: cluster-logging [id="cluster-logging"] -= Understanding the {logging-title} += Understanding Red Hat OpenShift Logging include::_attributes/common-attributes.adoc[] toc::[] @@ -9,11 +9,11 @@ toc::[] ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -As a cluster administrator, you can deploy the {logging} to +As a cluster administrator, you can deploy OpenShift Logging to aggregate all the logs from your {product-title} cluster, such as node system audit logs, application container logs, and infrastructure logs. -The {logging} aggregates these logs from throughout your cluster and stores them in a default log store. You can xref:../logging/cluster-logging-visualizer.adoc#cluster-logging-visualizer[use the Kibana web console to visualize log data]. +OpenShift Logging aggregates these logs from throughout your cluster and stores them in a default log store. You can xref:../logging/cluster-logging-visualizer.adoc#cluster-logging-visualizer[use the Kibana web console to visualize log data]. -The {logging} aggregates the following types of logs: +OpenShift Logging aggregates the following types of logs: * `application` - Container logs generated by user applications running in the cluster, except infrastructure container applications. * `infrastructure` - Logs generated by infrastructure components running in the cluster and {product-title} nodes, such as journal logs. Infrastructure components are pods that run in the `openshift*`, `kube*`, or `default` projects. diff --git a/logging/config/cluster-logging-collector.adoc b/logging/config/cluster-logging-collector.adoc index 237b8f38351b..53f661f583a2 100644 --- a/logging/config/cluster-logging-collector.adoc +++ b/logging/config/cluster-logging-collector.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -{logging-title-uc} collects operations and application logs from your cluster and enriches the data with Kubernetes pod and project metadata. +{product-title} uses Fluentd to collect operations and application logs from your cluster and enriches the data with Kubernetes pod and project metadata. You can configure the CPU and memory limits for the log collector and xref:../../logging/config/cluster-logging-moving-nodes.adoc#cluster-logging-moving[move the log collector pods to specific nodes]. All supported modifications to the log collector can be performed though the `spec.collection.log.fluentd` stanza in the `ClusterLogging` custom resource (CR). diff --git a/logging/config/cluster-logging-configuring-cr.adoc b/logging/config/cluster-logging-configuring-cr.adoc index 4cb8c3f1d142..dd2855623d46 100644 --- a/logging/config/cluster-logging-configuring-cr.adoc +++ b/logging/config/cluster-logging-configuring-cr.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -To configure {logging-title} you customize the `ClusterLogging` custom resource (CR). +To configure OpenShift Logging, you customize the `ClusterLogging` custom resource (CR). // The following include statements pull in the module files that comprise // the assembly. Include any combination of concept, procedure, or reference @@ -14,3 +14,8 @@ To configure {logging-title} you customize the `ClusterLogging` custom resource // assemblies. include::modules/cluster-logging-about-crd.adoc[leveloffset=+1] + + + + + diff --git a/logging/config/cluster-logging-configuring.adoc b/logging/config/cluster-logging-configuring.adoc index 251f7e03d361..fe4f1ed15aa7 100644 --- a/logging/config/cluster-logging-configuring.adoc +++ b/logging/config/cluster-logging-configuring.adoc @@ -6,13 +6,14 @@ include::_attributes/common-attributes.adoc[] toc::[] -{logging-title-uc} is configurable using a `ClusterLogging` custom resource (CR) deployed +OpenShift Logging is configurable using a `ClusterLogging` custom resource (CR) deployed in the `openshift-logging` project. -The {logging} operator watches for changes to `ClusterLogging` CR, +The Red Hat OpenShift Logging Operator watches for changes to `ClusterLogging` CR, creates any missing logging components, and adjusts the logging environment accordingly. -The `ClusterLogging` CR is based on the `ClusterLogging` custom resource definition (CRD), which defines a complete {logging} environment and includes all the components of the logging stack to collect, store and visualize logs. +The `ClusterLogging` CR is based on the `ClusterLogging` custom resource definition (CRD), which defines a complete OpenShift Logging environment +and includes all the components of the logging stack to collect, store and visualize logs. .Sample `ClusterLogging` custom resource (CR) [source,yaml] @@ -52,9 +53,9 @@ spec: resources: null type: kibana ---- -You can configure the following for the {logging}: +You can configure the following for OpenShift Logging: -* You can overwrite the image for each {logging} component by modifying the appropriate +* You can overwrite the image for each OpenShift Logging component by modifying the appropriate environment variable in the `cluster-logging-operator` Deployment. * You can specify specific nodes for the logging components using node selectors. @@ -77,5 +78,5 @@ The Rsyslog log collector is currently a Technology Preview feature. [IMPORTANT] ==== -The logging routes are managed by the {logging-title} Operator and cannot be modified by the user. +The logging routes are managed by the Red Hat OpenShift Logging Operator and cannot be modified by the user. ==== diff --git a/logging/config/cluster-logging-log-store.adoc b/logging/config/cluster-logging-log-store.adoc index a0ef02ff54d2..f9cef0ea667c 100644 --- a/logging/config/cluster-logging-log-store.adoc +++ b/logging/config/cluster-logging-log-store.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -{logging-title-uc} uses Elasticsearch 6 (ES) to store and organize the log data. +{product-title} uses Elasticsearch 6 (ES) to store and organize the log data. You can make modifications to your log store, including: diff --git a/logging/config/cluster-logging-maintenance-support.adoc b/logging/config/cluster-logging-maintenance-support.adoc index fba711d9d4be..ceda0ec50b4e 100644 --- a/logging/config/cluster-logging-maintenance-support.adoc +++ b/logging/config/cluster-logging-maintenance-support.adoc @@ -11,3 +11,6 @@ include::modules/cluster-logging-maintenance-support-about.adoc[leveloffset=+1] include::modules/cluster-logging-maintenance-support-list.adoc[leveloffset=+1] include::modules/unmanaged-operators.adoc[leveloffset=+1] + + + diff --git a/logging/config/cluster-logging-memory.adoc b/logging/config/cluster-logging-memory.adoc index 154fd68985c4..24fac411749b 100644 --- a/logging/config/cluster-logging-memory.adoc +++ b/logging/config/cluster-logging-memory.adoc @@ -1,13 +1,13 @@ :_content-type: ASSEMBLY :context: cluster-logging-memory [id="cluster-logging-memory"] -= Configuring CPU and memory limits for {logging} components += Configuring CPU and memory limits for OpenShift Logging components include::_attributes/common-attributes.adoc[] toc::[] -You can configure both the CPU and memory limits for each of the {logging} components as needed. +You can configure both the CPU and memory limits for each of the OpenShift Logging components as needed. // The following include statements pull in the module files that comprise @@ -17,3 +17,4 @@ You can configure both the CPU and memory limits for each of the {logging} compo include::modules/cluster-logging-cpu-memory.adoc[leveloffset=+1] + diff --git a/logging/config/cluster-logging-moving-nodes.adoc b/logging/config/cluster-logging-moving-nodes.adoc index 89ca4a5032be..e6493fdc3fc0 100644 --- a/logging/config/cluster-logging-moving-nodes.adoc +++ b/logging/config/cluster-logging-moving-nodes.adoc @@ -1,7 +1,7 @@ :_content-type: ASSEMBLY :context: cluster-logging-moving [id="cluster-logging-moving"] -= Moving {logging} resources with node selectors += Moving OpenShift Logging resources with node selectors include::_attributes/common-attributes.adoc[] toc::[] diff --git a/logging/config/cluster-logging-storage-considerations.adoc b/logging/config/cluster-logging-storage-considerations.adoc index 83d432692e54..88c1fe9f11d7 100644 --- a/logging/config/cluster-logging-storage-considerations.adoc +++ b/logging/config/cluster-logging-storage-considerations.adoc @@ -1,14 +1,16 @@ :_content-type: ASSEMBLY :context: cluster-logging-storage [id="cluster-logging-storage"] -= Configuring {logging} storage += Configuring OpenShift Logging storage include::_attributes/common-attributes.adoc[] toc::[] -Elasticsearch is a memory-intensive application. The default {logging} installation deploys 16G of memory for both memory requests and memory limits. -The initial set of {product-title} nodes might not be large enough to support the Elasticsearch cluster. You must add additional nodes to the {product-title} cluster to run with the recommended or higher memory. Each Elasticsearch node can operate with a lower memory setting, though this is not recommended for production environments. +Elasticsearch is a memory-intensive application. The default OpenShift Logging installation deploys 16G of memory for both memory requests and memory limits. +The initial set of {product-title} nodes might not be large enough to support the Elasticsearch cluster. You must add additional nodes to the +{product-title} cluster to run with the recommended or higher memory. Each Elasticsearch node can operate with a lower +memory setting, though this is not recommended for production environments. // The following include statements pull in the module files that comprise // the assembly. Include any combination of concept, procedure, or reference diff --git a/logging/config/cluster-logging-tolerations.adoc b/logging/config/cluster-logging-tolerations.adoc index 96fa378927cd..42dbcf06fa3b 100644 --- a/logging/config/cluster-logging-tolerations.adoc +++ b/logging/config/cluster-logging-tolerations.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -You can use taints and tolerations to ensure that {logging} pods run +You can use taints and tolerations to ensure that OpenShift Logging pods run on specific nodes and that no other workload can run on those nodes. Taints and tolerations are simple `key:value` pair. A taint on a node @@ -15,7 +15,7 @@ instructs the node to repel all pods that do not tolerate the taint. The `key` is any string, up to 253 characters and the `value` is any string up to 63 characters. The string must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores. -.Sample {logging} CR with tolerations +.Sample OpenShift Logging CR with tolerations [source,yaml] ---- apiVersion: "logging.openshift.io/v1" diff --git a/logging/config/cluster-logging-visualizer.adoc b/logging/config/cluster-logging-visualizer.adoc index f1d0670fe26f..c5921ecf68a7 100644 --- a/logging/config/cluster-logging-visualizer.adoc +++ b/logging/config/cluster-logging-visualizer.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -{product-title} uses Kibana to display the log data collected by the {logging}. +{product-title} uses Kibana to display the log data collected by OpenShift Logging. You can scale Kibana for redundancy and configure the CPU and memory for your Kibana nodes. @@ -18,3 +18,4 @@ You can scale Kibana for redundancy and configure the CPU and memory for your Ki include::modules/cluster-logging-cpu-memory.adoc[leveloffset=+1] include::modules/cluster-logging-kibana-scaling.adoc[leveloffset=+1] + diff --git a/logging/dedicated-cluster-logging.adoc b/logging/dedicated-cluster-logging.adoc index 9da78a1e6091..f2fffe83cba6 100644 --- a/logging/dedicated-cluster-logging.adoc +++ b/logging/dedicated-cluster-logging.adoc @@ -1,16 +1,18 @@ :_content-type: ASSEMBLY :context: dedicated-cluster-logging [id="dedicated-cluster-logging"] -= Configuring the {logging-title} += Configuring OpenShift Logging in {product-title} include::_attributes/common-attributes.adoc[] toc::[] -As a cluster administrator, you can deploy the {logging} to aggregate logs for a range of services. +As a cluster administrator, you can deploy OpenShift Logging +to aggregate logs for a range of services. {product-title} clusters can perform logging tasks using the OpenShift Elasticsearch Operator. -The {logging} is configurable using a `ClusterLogging` custom resource (CR) + +OpenShift Logging is configurable using a `ClusterLogging` custom resource (CR) deployed in the `openshift-logging` project namespace. The Red Hat OpenShift Logging Operator watches for changes to `ClusterLogging` CR, creates diff --git a/logging/troubleshooting/cluster-logging-alerts.adoc b/logging/troubleshooting/cluster-logging-alerts.adoc index 83055bbc563c..ad48a4ef4214 100644 --- a/logging/troubleshooting/cluster-logging-alerts.adoc +++ b/logging/troubleshooting/cluster-logging-alerts.adoc @@ -1,7 +1,7 @@ :_content-type: ASSEMBLY :context: cluster-logging-alerts [id="cluster-logging-alerts"] -= Understanding {logging} alerts += Understanding OpenShift Logging alerts include::_attributes/common-attributes.adoc[] toc::[] diff --git a/logging/troubleshooting/cluster-logging-cluster-status.adoc b/logging/troubleshooting/cluster-logging-cluster-status.adoc index 22b4c854a6a3..2d06ead546a1 100644 --- a/logging/troubleshooting/cluster-logging-cluster-status.adoc +++ b/logging/troubleshooting/cluster-logging-cluster-status.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -You can view the status of the Red Hat OpenShift Logging Operator and for a number of {logging} components. +You can view the status of the Red Hat OpenShift Logging Operator and for a number of OpenShift Logging components. // The following include statements pull in the module files that comprise // the assembly. Include any combination of concept, procedure, or reference @@ -17,3 +17,5 @@ You can view the status of the Red Hat OpenShift Logging Operator and for a numb include::modules/cluster-logging-clo-status.adoc[leveloffset=+1] include::modules/cluster-logging-clo-status-comp.adoc[leveloffset=+1] + + diff --git a/logging/troubleshooting/cluster-logging-must-gather.adoc b/logging/troubleshooting/cluster-logging-must-gather.adoc index e344ef126c1a..08455ea3caa1 100644 --- a/logging/troubleshooting/cluster-logging-must-gather.adoc +++ b/logging/troubleshooting/cluster-logging-must-gather.adoc @@ -8,7 +8,7 @@ toc::[] When opening a support case, it is helpful to provide debugging information about your cluster to Red Hat Support. -The xref:../../support/gathering-cluster-data.adoc#gathering-cluster-data[`must-gather` tool] enables you to collect diagnostic information for project-level resources, cluster-level resources, and each of the {logging} components. +The xref:../../support/gathering-cluster-data.adoc#gathering-cluster-data[`must-gather` tool] enables you to collect diagnostic information for project-level resources, cluster-level resources, and each of the OpenShift Logging components. For prompt support, supply diagnostic information for both {product-title} and OpenShift Logging. @@ -22,6 +22,6 @@ include::modules/cluster-logging-must-gather-about.adoc[leveloffset=+1] [id="cluster-logging-must-gather-prereqs"] == Prerequisites -* The {logging} and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. include::modules/cluster-logging-must-gather-collecting.adoc[leveloffset=+1] diff --git a/logging/viewing-resource-logs.adoc b/logging/viewing-resource-logs.adoc index d0d19b48b600..099ebf8bd310 100644 --- a/logging/viewing-resource-logs.adoc +++ b/logging/viewing-resource-logs.adoc @@ -6,11 +6,12 @@ include::_attributes/common-attributes.adoc[] toc::[] -You can view the logs for various resources, such as builds, deployments, and pods by using the OpenShift CLI (oc) and the web console. +You can view the logs for various resources, such as builds, deployments, and pods by using the OpenShift CLI (oc) and the web console. [NOTE] ==== -Resource logs are a default feature that provides limited log viewing capability. To enhance your log retrieving and viewing experience, it is recommended that you install xref:../logging/cluster-logging.adoc#cluster-logging[OpenShift Logging]. The {logging} aggregates all the logs from your {product-title} cluster, such as node system audit logs, application container logs, and infrastructure logs, into a dedicated log store. You can then query, discover, and visualize your log data through the xref:../logging/cluster-logging-visualizer.adoc#cluster-logging-visualizer-using[Kibana interface]. Resource logs do not access the {logging} log store. +Resource logs are a default feature that provides limited log viewing capability. To enhance your log retrieving and viewing experience, it is recommended that you install xref:../logging/cluster-logging.adoc#cluster-logging[OpenShift Logging]. OpenShift Logging aggregates all the logs from your {product-title} cluster, such as node system audit logs, application container logs, and infrastructure logs, into a dedicated log store. You can then query, discover, and visualize your log data through the xref:../logging/cluster-logging-visualizer.adoc#cluster-logging-visualizer-using[Kibana interface]. Resource logs do not access the OpenShift Logging log store. ==== include::modules/viewing-resource-logs-cli-console.adoc[leveloffset=+1] + diff --git a/modules/cluster-logging-about-collector.adoc b/modules/cluster-logging-about-collector.adoc index 4b6251b5841d..8ab29c0d9edd 100644 --- a/modules/cluster-logging-about-collector.adoc +++ b/modules/cluster-logging-about-collector.adoc @@ -6,7 +6,7 @@ [id="cluster-logging-about-collector_{context}"] = About the logging collector -The {logging-title} collects container and node logs. +{product-title} uses Fluentd to collect container and node logs. By default, the log collector uses the following sources: diff --git a/modules/cluster-logging-about-components.adoc b/modules/cluster-logging-about-components.adoc index 280541ef4eef..960b74b81721 100644 --- a/modules/cluster-logging-about-components.adoc +++ b/modules/cluster-logging-about-components.adoc @@ -10,12 +10,13 @@ endif::[] :_content-type: CONCEPT [id="cluster-logging-about-components_{context}"] -= About {logging} components += About OpenShift Logging components -The {logging} components include a collector deployed to each node in the {product-title} cluster -that collects all node and container logs and writes them to a log store. You can use a centralized web UI to create rich visualizations and dashboards with the aggregated data. +The OpenShift Logging components include a collector deployed to each node in the {product-title} cluster +that collects all node and container logs and writes them to a log store. You can use a centralized web UI +to create rich visualizations and dashboards with the aggregated data. -The major components of the {logging} are: +The major components of OpenShift Logging are: * collection - This is the component that collects logs from the cluster, formats them, and forwards them to the log store. The current implementation is Fluentd. * log store - This is where the logs are stored. The default implementation is Elasticsearch. You can use the default Elasticsearch log store or forward logs to external log stores. The default log store is optimized and tested for short-term storage. diff --git a/modules/cluster-logging-about-crd.adoc b/modules/cluster-logging-about-crd.adoc index c9c88b56483e..383eb5de634a 100644 --- a/modules/cluster-logging-about-crd.adoc +++ b/modules/cluster-logging-about-crd.adoc @@ -6,11 +6,11 @@ [id="cluster-logging-configuring-crd_{context}"] = About the ClusterLogging custom resource -To make changes to your {logging} environment, create and modify the `ClusterLogging` custom resource (CR). +To make changes to your OpenShift Logging environment, create and modify the `ClusterLogging` custom resource (CR). Instructions for creating or modifying a CR are provided in this documentation as appropriate. -The following example shows a typical custom resource for the {logging}. +The following example shows a typical custom resource for OpenShift Logging. [id="efk-logging-configuring-about-sample_{context}"] .Sample `ClusterLogging` custom resource (CR) diff --git a/modules/cluster-logging-about-logstore.adoc b/modules/cluster-logging-about-logstore.adoc index d51ab1623e43..c90ae77c0acd 100644 --- a/modules/cluster-logging-about-logstore.adoc +++ b/modules/cluster-logging-about-logstore.adoc @@ -6,9 +6,9 @@ [id="cluster-logging-about-logstore_{context}"] = About the log store -By default, {product-title} uses link:https://www.elastic.co/products/elasticsearch[Elasticsearch (ES)] to store log data. Optionally you can use the Log Forwarder API to forward logs to an external store. Several types of store are supported, including fluentd, rsyslog, kafka and others. +By default, {product-title} uses link:https://www.elastic.co/products/elasticsearch[Elasticsearch (ES)] to store log data. Optionally, you can use the log forwarding features to forward logs to external log stores using Fluentd protocols, syslog protocols, or the {product-title} Log Forwarding API. -The {logging} Elasticsearch instance is optimized and tested for short term storage, approximately seven days. If you want to retain your logs over a longer term, it is recommended you move the data to a third-party storage system. +The OpenShift Logging Elasticsearch instance is optimized and tested for short term storage, approximately seven days. If you want to retain your logs over a longer term, it is recommended you move the data to a third-party storage system. Elasticsearch organizes the log data from Fluentd into datastores, or _indices_, then subdivides each index into multiple pieces called _shards_, which it spreads across a set of Elasticsearch nodes in an Elasticsearch cluster. You can configure Elasticsearch to make copies of the shards, called _replicas_, which Elasticsearch also spreads across the Elasticsearch nodes. The `ClusterLogging` custom resource (CR) allows you to specify how the shards are replicated to provide data redundancy and resilience to failure. You can also specify how long the different types of logs are retained using a retention policy in the `ClusterLogging` CR. @@ -23,7 +23,8 @@ See the link:https://www.elastic.co/guide/en/elasticsearch/guide/current/hardwar [NOTE] ==== -A highly-available Elasticsearch environment requires at least three Elasticsearch nodes, each on a different host. +A highly-available Elasticsearch environment requires at least three Elasticsearch nodes, +each on a different host. ==== Role-based access control (RBAC) applied on the Elasticsearch indices enables the controlled access of the logs to the developers. Administrators can access all logs and developers can access only the logs in their projects. diff --git a/modules/cluster-logging-about.adoc b/modules/cluster-logging-about.adoc index 33839af95a00..a18b89a0d8a6 100644 --- a/modules/cluster-logging-about.adoc +++ b/modules/cluster-logging-about.adoc @@ -9,18 +9,19 @@ :_content-type: CONCEPT [id="cluster-logging-about_{context}"] -= About deploying the {logging-title} += About deploying OpenShift Logging ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -{product-title} cluster administrators can deploy the {logging} using -the {product-title} web console or CLI to install the OpenShift Elasticsearch +{product-title} cluster administrators can deploy OpenShift Logging using +the {product-title} web console or CLI to install the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator. When the operators are installed, you create -a `ClusterLogging` custom resource (CR) to schedule {logging} pods and -other resources necessary to support the {logging}. The operators are -responsible for deploying, upgrading, and maintaining the {logging}. +a `ClusterLogging` custom resource (CR) to schedule OpenShift Logging pods and +other resources necessary to support OpenShift Logging. The operators are +responsible for deploying, upgrading, and maintaining OpenShift Logging. endif::openshift-enterprise,openshift-webscale,openshift-origin[] -The `ClusterLogging` CR defines a complete {logging} environment that includes all the components -of the logging stack to collect, store and visualize logs. The Red Hat OpenShift Logging Operator watches the {logging} CR and adjusts the logging deployment accordingly. +The `ClusterLogging` CR defines a complete OpenShift Logging environment that includes all the components +of the logging stack to collect, store and visualize logs. The Red Hat OpenShift Logging Operator watches the OpenShift Logging +CR and adjusts the logging deployment accordingly. Administrators and application developers can view the logs of the projects for which they have view access. diff --git a/modules/cluster-logging-clo-status-comp.adoc b/modules/cluster-logging-clo-status-comp.adoc index c31d3b5355fe..88f2b0a53017 100644 --- a/modules/cluster-logging-clo-status-comp.adoc +++ b/modules/cluster-logging-clo-status-comp.adoc @@ -4,13 +4,13 @@ :_content-type: PROCEDURE [id="cluster-logging-clo-status-example_{context}"] -= Viewing the status of {logging} components += Viewing the status of OpenShift Logging components -You can view the status for a number of {logging} components. +You can view the status for a number of OpenShift Logging components. .Prerequisites -* The {logging-title} and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure @@ -21,7 +21,7 @@ You can view the status for a number of {logging} components. $ oc project openshift-logging ---- -. View the status of the {logging-title} environment: +. View the status of the OpenShift Logging environment: + [source,terminal] ---- @@ -41,7 +41,7 @@ Conditions: Available True MinimumReplicasAvailable Progressing True NewReplicaSetAvailable -.... +.... Events: Type Reason Age From Message @@ -49,7 +49,7 @@ Events: Normal ScalingReplicaSet 62m deployment-controller Scaled up replica set cluster-logging-operator-574b8987df to 1---- ---- -. View the status of the {logging} replica set: +. View the status of the OpenShift Logging replica set: .. Get the name of a replica set: + diff --git a/modules/cluster-logging-clo-status.adoc b/modules/cluster-logging-clo-status.adoc index e2bbee792fa0..d1553dda267e 100644 --- a/modules/cluster-logging-clo-status.adoc +++ b/modules/cluster-logging-clo-status.adoc @@ -10,7 +10,7 @@ You can view the status of your Red Hat OpenShift Logging Operator. .Prerequisites -* The {logging-title} and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-collector-alerts-viewing.adoc b/modules/cluster-logging-collector-alerts-viewing.adoc index 49dea5b1b13a..7282f4323e66 100644 --- a/modules/cluster-logging-collector-alerts-viewing.adoc +++ b/modules/cluster-logging-collector-alerts-viewing.adoc @@ -14,7 +14,7 @@ Alerts are shown in the {product-title} web console, on the *Alerts* tab of the .Procedure -To view the {logging} and other {product-title} alerts: +To view OpenShift Logging and other {product-title} alerts: . In the {product-title} console, click *Observe* → *Alerting*. diff --git a/modules/cluster-logging-collector-log-forward-cloudwatch.adoc b/modules/cluster-logging-collector-log-forward-cloudwatch.adoc index 6f13227f8b4b..6082d3b742cf 100644 --- a/modules/cluster-logging-collector-log-forward-cloudwatch.adoc +++ b/modules/cluster-logging-collector-log-forward-cloudwatch.adoc @@ -2,7 +2,7 @@ [id="cluster-logging-collector-log-forward-cloudwatch_{context}"] = Forwarding logs to Amazon CloudWatch -You can forward logs to Amazon CloudWatch, a monitoring and log storage service hosted by Amazon Web Services (AWS). You can forward logs to CloudWatch in addition to, or instead of, the default {logging} managed Elasticsearch log store. +You can forward logs to Amazon CloudWatch, a monitoring and log storage service hosted by Amazon Web Services (AWS). You can forward logs to CloudWatch in addition to, or instead of, the default OpenShift Logging-managed Elasticsearch log store. To configure log forwarding to CloudWatch, you must create a `ClusterLogForwarder` custom resource (CR) with an output for CloudWatch, and a pipeline that uses the output. diff --git a/modules/cluster-logging-collector-log-forward-syslog.adoc b/modules/cluster-logging-collector-log-forward-syslog.adoc index 831a0f4e7e3c..c28fb94c9ad0 100644 --- a/modules/cluster-logging-collector-log-forward-syslog.adoc +++ b/modules/cluster-logging-collector-log-forward-syslog.adoc @@ -6,7 +6,6 @@ You can use the *syslog* link:https://tools.ietf.org/html/rfc3164[RFC3164] or li To configure log forwarding using the *syslog* protocol, you must create a `ClusterLogForwarder` custom resource (CR) with one or more outputs to the syslog servers, and pipelines that use those outputs. The syslog output can use a UDP, TCP, or TLS connection. -//SME-Feedback-Req: Is the below note accurate? [NOTE] ==== Alternately, you can use a config map to forward logs using the *syslog* RFC3164 protocols. However, this method is deprecated in {product-title} and will be removed in a future release. diff --git a/modules/cluster-logging-collector-log-forwarding-about.adoc b/modules/cluster-logging-collector-log-forwarding-about.adoc index 53d447248d00..cb788f87df2b 100644 --- a/modules/cluster-logging-collector-log-forwarding-about.adoc +++ b/modules/cluster-logging-collector-log-forwarding-about.adoc @@ -47,13 +47,13 @@ Note the following: * If a `ClusterLogForwarder` CR object exists, logs are not forwarded to the default Elasticsearch instance, unless there is a pipeline with the `default` output. -* By default, the {logging} sends container and infrastructure logs to the default internal Elasticsearch log store defined in the `ClusterLogging` custom resource. However, it does not send audit logs to the internal store because it does not provide secure storage. If this default configuration meets your needs, do not configure the Log Forwarding API. +* By default, OpenShift Logging sends container and infrastructure logs to the default internal Elasticsearch log store defined in the `ClusterLogging` custom resource. However, it does not send audit logs to the internal store because it does not provide secure storage. If this default configuration meets your needs, do not configure the Log Forwarding API. * If you do not define a pipeline for a log type, the logs of the undefined types are dropped. For example, if you specify a pipeline for the `application` and `audit` types, but do not specify a pipeline for the `infrastructure` type, `infrastructure` logs are dropped. * You can use multiple types of outputs in the `ClusterLogForwarder` custom resource (CR) to send logs to servers that support different protocols. -* The internal {product-title} Elasticsearch instance does not provide secure storage for audit logs. We recommend you ensure that the system to which you forward audit logs is compliant with your organizational and governmental regulations and is properly secured. The {logging} does not comply with those regulations. +* The internal {product-title} Elasticsearch instance does not provide secure storage for audit logs. We recommend you ensure that the system to which you forward audit logs is compliant with your organizational and governmental regulations and is properly secured. OpenShift Logging does not comply with those regulations. The following example forwards the audit logs to a secure external Elasticsearch instance, the infrastructure logs to an insecure external Elasticsearch instance, the application logs to a Kafka broker, and the application logs from the `my-apps-logs` project to the internal Elasticsearch instance. diff --git a/modules/cluster-logging-collector-pod-location.adoc b/modules/cluster-logging-collector-pod-location.adoc index 47d350339f6d..024b026bb759 100644 --- a/modules/cluster-logging-collector-pod-location.adoc +++ b/modules/cluster-logging-collector-pod-location.adoc @@ -14,7 +14,7 @@ You can view the Fluentd logging collector pods and the corresponding nodes that [source,terminal] ---- -$ oc get pods --selector component=collector -o wide -n openshift-logging +$ oc get pods --selector component=fluentd -o wide -n openshift-logging ---- .Example output diff --git a/modules/cluster-logging-collector-tolerations.adoc b/modules/cluster-logging-collector-tolerations.adoc index e9fca7e0e5f4..57088f58202a 100644 --- a/modules/cluster-logging-collector-tolerations.adoc +++ b/modules/cluster-logging-collector-tolerations.adoc @@ -25,7 +25,7 @@ tolerations: .Prerequisites -* The {logging-title} and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-collector-tuning.adoc b/modules/cluster-logging-collector-tuning.adoc index ecc256f63b0a..6f33e7e8c843 100644 --- a/modules/cluster-logging-collector-tuning.adoc +++ b/modules/cluster-logging-collector-tuning.adoc @@ -6,7 +6,7 @@ [id="cluster-logging-collector-tuning_{context}"] = Advanced configuration for the log forwarder -The {logging-title} includes multiple Fluentd parameters that you can use for tuning the performance of the Fluentd log forwarder. With these parameters, you can change the following Fluentd behaviors: +OpenShift Logging includes multiple Fluentd parameters that you can use for tuning the performance of the Fluentd log forwarder. With these parameters, you can change the following Fluentd behaviors: * Chunk and chunk buffer sizes * Chunk flushing behavior diff --git a/modules/cluster-logging-configuring-image-about.adoc b/modules/cluster-logging-configuring-image-about.adoc index f0b6522dba2c..fe62d4cb1999 100644 --- a/modules/cluster-logging-configuring-image-about.adoc +++ b/modules/cluster-logging-configuring-image-about.adoc @@ -3,9 +3,10 @@ // * logging/cluster-logging-configuring.adoc [id="cluster-logging-configuring-image-about_{context}"] -= Understanding {logging} component images += Understanding OpenShift Logging component images -There are several components in the {logging-title}, each one implemented with one or more images. Each image is specified by an environment variable +There are several components in OpenShift Logging, each one implemented with one +or more images. Each image is specified by an environment variable defined in the *cluster-logging-operator* deployment in the *openshift-logging* project and should not be changed. You can view the images by running the following command: diff --git a/modules/cluster-logging-cpu-memory.adoc b/modules/cluster-logging-cpu-memory.adoc index 0971d084f6af..743ceba5d6ff 100644 --- a/modules/cluster-logging-cpu-memory.adoc +++ b/modules/cluster-logging-cpu-memory.adoc @@ -6,7 +6,7 @@ [id="cluster-logging-memory-limits_{context}"] = Configuring CPU and memory limits -The {logging} components allow for adjustments to both the CPU and memory limits. +The OpenShift Logging components allow for adjustments to both the CPU and memory limits. .Procedure diff --git a/modules/cluster-logging-deploy-cli.adoc b/modules/cluster-logging-deploy-cli.adoc index eb5ec7fd68fd..9e747b1808d7 100644 --- a/modules/cluster-logging-deploy-cli.adoc +++ b/modules/cluster-logging-deploy-cli.adoc @@ -4,7 +4,7 @@ :_content-type: PROCEDURE [id="cluster-logging-deploy-cli_{context}"] -= Installing the {logging-title} using the CLI += Installing OpenShift Logging using the CLI You can use the {product-title} CLI to install the OpenShift Elasticsearch and Red Hat OpenShift Logging Operators. @@ -282,7 +282,7 @@ openshift-logging clusterlogging.5.1.0-202 [NOTE] ==== This default OpenShift Logging configuration should support a wide array of environments. Review the topics on tuning and -configuring {logging} components for information on modifications you can make to your OpenShift Logging cluster. +configuring OpenShift Logging components for information on modifications you can make to your OpenShift Logging cluster. ==== + ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] @@ -381,7 +381,7 @@ For example: $ oc create -f olo-instance.yaml ---- + -This creates the {logging} components, the `Elasticsearch` custom resource and components, and the Kibana interface. +This creates the OpenShift Logging components, the `Elasticsearch` custom resource and components, and the Kibana interface. . Verify the installation by listing the pods in the *openshift-logging* project. + diff --git a/modules/cluster-logging-deploy-console.adoc b/modules/cluster-logging-deploy-console.adoc index 6f6674b939cc..c962071298ef 100644 --- a/modules/cluster-logging-deploy-console.adoc +++ b/modules/cluster-logging-deploy-console.adoc @@ -4,7 +4,7 @@ :_content-type: PROCEDURE [id="cluster-logging-deploy-console_{context}"] -= Installing the {logging-title} using the web console += Installing OpenShift Logging using the web console You can use the {product-title} web console to install the OpenShift Elasticsearch and Red Hat OpenShift Logging Operators. @@ -124,7 +124,7 @@ You might have to refresh the page to load the data. [NOTE] ==== This default OpenShift Logging configuration should support a wide array of environments. Review the topics on tuning and -configuring {logging} components for information on modifications you can make to your OpenShift Logging cluster. +configuring OpenShift Logging components for information on modifications you can make to your OpenShift Logging cluster. ==== + ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] @@ -209,7 +209,7 @@ elasticsearch-cdm-x6kdekli-3 0/1 1 0 6m44s The number of primary shards for the index templates is equal to the number of Elasticsearch data nodes. ==== -.. Click *Create*. This creates the {logging} components, the `Elasticsearch` custom resource and components, and the Kibana interface. +.. Click *Create*. This creates the OpenShift Logging components, the `Elasticsearch` custom resource and components, and the Kibana interface. . Verify the installation: diff --git a/modules/cluster-logging-deploy-label.adoc b/modules/cluster-logging-deploy-label.adoc index 38a3aa28704e..3884178e265e 100644 --- a/modules/cluster-logging-deploy-label.adoc +++ b/modules/cluster-logging-deploy-label.adoc @@ -21,7 +21,7 @@ The following also works: $ oc label nodes 10.10.0.{100..119} elasticsearch-fluentd=true ---- -Labeling nodes in groups paces the daemon sets used by the {logging}, helping to avoid contention on shared resources such as the image registry. +Labeling nodes in groups paces the daemon sets used by OpenShift logging, helping to avoid contention on shared resources such as the image registry. [NOTE] ==== diff --git a/modules/cluster-logging-deploy-multitenant.adoc b/modules/cluster-logging-deploy-multitenant.adoc index e5da687de69c..45c35aa7498c 100644 --- a/modules/cluster-logging-deploy-multitenant.adoc +++ b/modules/cluster-logging-deploy-multitenant.adoc @@ -8,7 +8,7 @@ Your cluster network provider might enforce network isolation. If so, you must allow network traffic between the projects that contain the operators deployed by OpenShift Logging. -Network isolation blocks network traffic between pods or services that are in different projects. The {logging} installs the _OpenShift Elasticsearch Operator_ in the `openshift-operators-redhat` project and the _Red Hat OpenShift Logging Operator_ in the `openshift-logging` project. Therefore, you must allow traffic between these two projects. +Network isolation blocks network traffic between pods or services that are in different projects. OpenShift Logging installs the _OpenShift Elasticsearch Operator_ in the `openshift-operators-redhat` project and the _Red Hat OpenShift Logging Operator_ in the `openshift-logging` project. Therefore, you must allow traffic between these two projects. {product-title} offers two supported choices for the default Container Network Interface (CNI) network provider, OpenShift SDN and OVN-Kubernetes. These two providers implement various network isolation policies. diff --git a/modules/cluster-logging-deploy-storage-considerations.adoc b/modules/cluster-logging-deploy-storage-considerations.adoc index 220e59386b62..06507dea9b31 100644 --- a/modules/cluster-logging-deploy-storage-considerations.adoc +++ b/modules/cluster-logging-deploy-storage-considerations.adoc @@ -3,7 +3,7 @@ // * logging/cluster-logging-deploy.adoc [id="cluster-logging-deploy-storage-considerations_{context}"] -= Storage considerations for the {logging-title} += Storage considerations for OpenShift Logging and {product-title} //// An Elasticsearch index is a collection of primary shards and their corresponding replica shards. This is how Elasticsearch implements high availability internally, so there is little requirement to use hardware based mirroring RAID variants. RAID 0 can still be used to increase overall disk performance. diff --git a/modules/cluster-logging-deploying-about.adoc b/modules/cluster-logging-deploying-about.adoc index 61b76ea32cea..081ce1a9b246 100644 --- a/modules/cluster-logging-deploying-about.adoc +++ b/modules/cluster-logging-deploying-about.adoc @@ -5,20 +5,21 @@ :_content-type: CONCEPT [id="cluster-logging-deploying-about_{context}"] -= About deploying and configuring the {logging-title} += About deploying and configuring OpenShift Logging -The {logging} is designed to be used with the default configuration, which is tuned for small to medium sized {product-title} clusters. +OpenShift Logging is designed to be used with the default configuration, which is tuned for small to medium sized {product-title} clusters. -The installation instructions that follow include a sample `ClusterLogging` custom resource (CR), which you can use to create a {logging} instance and configure your {logging} environment. +The installation instructions that follow include a sample `ClusterLogging` custom resource (CR), which you can use to create an OpenShift Logging instance +and configure your OpenShift Logging environment. -If you want to use the default {logging} install, you can use the sample CR directly. +If you want to use the default OpenShift Logging install, you can use the sample CR directly. If you want to customize your deployment, make changes to the sample CR as needed. The following describes the configurations you can make when installing your OpenShift Logging instance or modify after installation. See the Configuring sections for more information on working with each component, including modifications you can make outside of the `ClusterLogging` custom resource. [id="cluster-logging-deploy-about-config_{context}"] -== Configuring and Tuning the {logging} +== Configuring and Tuning OpenShift Logging -You can configure your {logging} by modifying the `ClusterLogging` custom resource deployed +You can configure your OpenShift Logging environment by modifying the `ClusterLogging` custom resource deployed in the `openshift-logging` project. You can modify any of the following components upon install or after install: diff --git a/modules/cluster-logging-elasticsearch-audit.adoc b/modules/cluster-logging-elasticsearch-audit.adoc index d08144f52214..41cbe8d1eae4 100644 --- a/modules/cluster-logging-elasticsearch-audit.adoc +++ b/modules/cluster-logging-elasticsearch-audit.adoc @@ -12,7 +12,7 @@ To send the audit logs to the default internal Elasticsearch log store, for exam [IMPORTANT] ==== -The internal {product-title} Elasticsearch log store does not provide secure storage for audit logs. Verify that the system to which you forward audit logs complies with your organizational and governmental regulations and is properly secured. The {logging-title} does not comply with those regulations. +The internal {product-title} Elasticsearch log store does not provide secure storage for audit logs. Verify that the system to which you forward audit logs complies with your organizational and governmental regulations and is properly secured. OpenShift Logging does not comply with those regulations. ==== .Procedure diff --git a/modules/cluster-logging-elasticsearch-exposing.adoc b/modules/cluster-logging-elasticsearch-exposing.adoc index dd10d90f7b13..cb0a0541fbe8 100644 --- a/modules/cluster-logging-elasticsearch-exposing.adoc +++ b/modules/cluster-logging-elasticsearch-exposing.adoc @@ -6,9 +6,12 @@ [id="cluster-logging-elasticsearch-exposing_{context}"] = Exposing the log store service as a route -By default, the log store that is deployed with the {logging-title} is not accessible from outside the logging cluster. You can enable a route with re-encryption termination for external access to the log store service for those tools that access its data. +By default, the log store that is deployed with OpenShift Logging is not +accessible from outside the logging cluster. You can enable a route with re-encryption termination +for external access to the log store service for those tools that access its data. -Externally, you can access the log store by creating a reencrypt route, your {product-title} token and the installed log store CA certificate. Then, access a node that hosts the log store service with a cURL request that contains: +Externally, you can access the log store by creating a reencrypt route, your {product-title} token and the installed +log store CA certificate. Then, access a node that hosts the log store service with a cURL request that contains: * The `Authorization: Bearer ${token}` * The Elasticsearch reencrypt route and an link:https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html[Elasticsearch API request]. @@ -56,7 +59,7 @@ $ oc exec elasticsearch-cdm-oplnhinv-1-5746475887-fj2f8 -n openshift-logging -- .Prerequisites -* The {logging-title} and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. * You must have access to the project to be able to access to the logs. diff --git a/modules/cluster-logging-elasticsearch-ha.adoc b/modules/cluster-logging-elasticsearch-ha.adoc index 912bdfa488f5..fb8e6c19a727 100644 --- a/modules/cluster-logging-elasticsearch-ha.adoc +++ b/modules/cluster-logging-elasticsearch-ha.adoc @@ -10,7 +10,7 @@ You can define how Elasticsearch shards are replicated across data nodes in the .Prerequisites -* The {logging-title} and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc b/modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc index af78950d38e2..39635eaaa415 100644 --- a/modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc +++ b/modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc @@ -15,8 +15,8 @@ When using emptyDir, if log storage is restarted or redeployed, you will lose da ==== .Prerequisites -//Find & replace the below according to SME feedback. -* The {logging-title} and Elasticsearch must be installed. + +* OpenShift Logging and Elasticsearch must be installed. .Procedure @@ -28,6 +28,8 @@ When using emptyDir, if log storage is restarted or redeployed, you will lose da logStore: type: "elasticsearch" elasticsearch: - nodeCount: 3 + nodeCount: 3 storage: {} ---- + + diff --git a/modules/cluster-logging-elasticsearch-retention.adoc b/modules/cluster-logging-elasticsearch-retention.adoc index 2673a9bfe041..14ea1a65ee5e 100644 --- a/modules/cluster-logging-elasticsearch-retention.adoc +++ b/modules/cluster-logging-elasticsearch-retention.adoc @@ -19,8 +19,8 @@ Elasticsearch rolls over an index, moving the current index and creating a new i Elasticsearch deletes the rolled-over indices based on the retention policy you configure. If you do not create a retention policy for any log sources, logs are deleted after seven days by default. .Prerequisites -//SME Feedback Req: There are a few instances of these for prereqs. Should OpenShift Logging here be the Red Hat OpenShift Logging Operator or the logging product name? -* The {logging-title} and the OpenShift Elasticsearch Operator must be installed. + +* OpenShift Logging and the OpenShift Elasticsearch Operator must be installed. .Procedure diff --git a/modules/cluster-logging-elasticsearch-storage.adoc b/modules/cluster-logging-elasticsearch-storage.adoc index e0a7cc1242e4..1053f88b9c89 100644 --- a/modules/cluster-logging-elasticsearch-storage.adoc +++ b/modules/cluster-logging-elasticsearch-storage.adoc @@ -17,8 +17,8 @@ occur. ==== .Prerequisites - -* The {logging-title} and Elasticsearch must be installed. + +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-elasticsearch-tolerations.adoc b/modules/cluster-logging-elasticsearch-tolerations.adoc index acaad1605c63..6926e7c9d82f 100644 --- a/modules/cluster-logging-elasticsearch-tolerations.adoc +++ b/modules/cluster-logging-elasticsearch-tolerations.adoc @@ -26,7 +26,7 @@ tolerations: .Prerequisites -* The {logging-title} and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-eventrouter-about.adoc b/modules/cluster-logging-eventrouter-about.adoc index 690662b9f531..cbb23609c756 100644 --- a/modules/cluster-logging-eventrouter-about.adoc +++ b/modules/cluster-logging-eventrouter-about.adoc @@ -6,7 +6,7 @@ [id="cluster-logging-eventrouter-about_{context}"] = About event routing -The Event Router is a pod that watches {product-title} events so they can be collected by the {logging-title}. -The Event Router collects events from all projects and writes them to `STDOUT`. Fluentd collects those events and forwards them into the {product-title} Elasticsearch instance. Elasticsearch indexes the events to the `infra` index. +The Event Router is a pod that watches {product-title} events so they can be collected by OpenShift Logging. +The Event Router collects events from all projects and writes them to `STDOUT`. Fluentd collects those events and forwards them into the {product-title} Elasticsearch instance. Elasticsearch indexes the events to the `infra` index. You must manually deploy the Event Router. diff --git a/modules/cluster-logging-eventrouter-deploy.adoc b/modules/cluster-logging-eventrouter-deploy.adoc index 29eb1c21a8b8..0971dc46e3df 100644 --- a/modules/cluster-logging-eventrouter-deploy.adoc +++ b/modules/cluster-logging-eventrouter-deploy.adoc @@ -14,7 +14,7 @@ The following Template object creates the service account, cluster role, and clu * You need proper permissions to create service accounts and update cluster role bindings. For example, you can run the following template with a user that has the *cluster-admin* role. -* The {logging-title} must be installed. +* OpenShift Logging must be installed. .Procedure diff --git a/modules/cluster-logging-forwarding-about.adoc b/modules/cluster-logging-forwarding-about.adoc index efe67f586b8f..e29a55044ded 100644 --- a/modules/cluster-logging-forwarding-about.adoc +++ b/modules/cluster-logging-forwarding-about.adoc @@ -6,4 +6,5 @@ [id="cluster-logging-forwarding-about_{context}"] = About log forwarding -By default, the {logging-title} sends logs to the default internal Elasticsearch log store, defined in the `ClusterLogging` custom resource (CR). If you want to forward logs to other log aggregators, you can use the log forwarding features to send logs to specific endpoints within or outside your cluster. +By default, OpenShift Logging sends logs to the default internal Elasticsearch log store, defined in the `ClusterLogging` custom resource (CR). If you want to forward logs to other log aggregators, you can use the log forwarding features to send logs to specific endpoints within or outside your cluster. + diff --git a/modules/cluster-logging-kibana-tolerations.adoc b/modules/cluster-logging-kibana-tolerations.adoc index f081f4e51af0..a281c09a06c1 100644 --- a/modules/cluster-logging-kibana-tolerations.adoc +++ b/modules/cluster-logging-kibana-tolerations.adoc @@ -16,7 +16,7 @@ that is not on other pods ensures only the Kibana pod can run on that node. .Prerequisites -* The {logging-title} and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-log-store-status-viewing.adoc b/modules/cluster-logging-log-store-status-viewing.adoc index 7f47ba5cacd6..02e5b83e0649 100644 --- a/modules/cluster-logging-log-store-status-viewing.adoc +++ b/modules/cluster-logging-log-store-status-viewing.adoc @@ -10,7 +10,7 @@ You can view the status of your log store. .Prerequisites -* The {logging-title} and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-logstore-limits.adoc b/modules/cluster-logging-logstore-limits.adoc index d2573f82af0a..b5c929cfbf36 100644 --- a/modules/cluster-logging-logstore-limits.adoc +++ b/modules/cluster-logging-logstore-limits.adoc @@ -20,7 +20,7 @@ For production use, you should have no less than the default 16Gi allocated to e .Prerequisites -* The {logging-title} and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-maintenance-support-about.adoc b/modules/cluster-logging-maintenance-support-about.adoc index f71a4c5c6520..a0b9203d5255 100644 --- a/modules/cluster-logging-maintenance-support-about.adoc +++ b/modules/cluster-logging-maintenance-support-about.adoc @@ -4,9 +4,9 @@ :_content-type: CONCEPT [id="cluster-logging-maintenance-support-about_{context}"] -= About unsupported configurations += About unsupported configurations -The supported way of configuring the {logging-title} is by configuring it using the options described in this documentation. Do not use other configurations, as they are unsupported. Configuration paradigms might change across {product-title} releases, and such cases can only be handled gracefully if all configuration possibilities are controlled. If you use configurations other than those described in this documentation, your changes will disappear because the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator reconcile any differences. The Operators reverse everything to the defined state by default and by design. +The supported way of configuring OpenShift Logging is by configuring it using the options described in this documentation. Do not use other configurations, as they are unsupported. Configuration paradigms might change across {product-title} releases, and such cases can only be handled gracefully if all configuration possibilities are controlled. If you use configurations other than those described in this documentation, your changes will disappear because the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator reconcile any differences. The Operators reverse everything to the defined state by default and by design. [NOTE] ==== diff --git a/modules/cluster-logging-manual-rollout-rolling.adoc b/modules/cluster-logging-manual-rollout-rolling.adoc index 5f73d675f2f4..763b8c85dfdc 100644 --- a/modules/cluster-logging-manual-rollout-rolling.adoc +++ b/modules/cluster-logging-manual-rollout-rolling.adoc @@ -12,7 +12,7 @@ Also, a rolling restart is recommended if the nodes on which an Elasticsearch po .Prerequisites -* The {logging-title} and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-must-gather-about.adoc b/modules/cluster-logging-must-gather-about.adoc index e8abfa298832..cce8f926c31d 100644 --- a/modules/cluster-logging-must-gather-about.adoc +++ b/modules/cluster-logging-must-gather-about.adoc @@ -8,7 +8,7 @@ The `oc adm must-gather` CLI command collects the information from your cluster that is most likely needed for debugging issues. -For your {logging}, `must-gather` collects the following information: +For your OpenShift Logging environment, `must-gather` collects the following information: * Project-level resources, including pods, configuration maps, service accounts, roles, role bindings, and events at the project level * Cluster-level resources, including nodes, roles, and role bindings at the cluster level diff --git a/modules/cluster-logging-must-gather-collecting.adoc b/modules/cluster-logging-must-gather-collecting.adoc index 1747f51ffa6b..32254b9a968c 100644 --- a/modules/cluster-logging-must-gather-collecting.adoc +++ b/modules/cluster-logging-must-gather-collecting.adoc @@ -6,15 +6,15 @@ [id="cluster-logging-must-gather-collecting_{context}"] = Collecting OpenShift Logging data -You can use the `oc adm must-gather` CLI command to collect information about your {logging}. +You can use the `oc adm must-gather` CLI command to collect information about your OpenShift Logging environment. .Procedure -To collect {logging} information with `must-gather`: +To collect OpenShift Logging information with `must-gather`: . Navigate to the directory where you want to store the `must-gather` information. -. Run the `oc adm must-gather` command against the OpenShift Logging image: +. Run the `oc adm must-gather` command against the OpenShift Logging image: + ifndef::openshift-origin[] [source,terminal] diff --git a/modules/cluster-logging-release-notes-5.2.0.adoc b/modules/cluster-logging-release-notes-5.2.0.adoc deleted file mode 100644 index fbf6c5399d05..000000000000 --- a/modules/cluster-logging-release-notes-5.2.0.adoc +++ /dev/null @@ -1,109 +0,0 @@ - -// Module included in the following assemblies: -//cluster-logging-release-notes.adoc - -[id="cluster-logging-release-notes-5-2-0"] -= {logging-title-uc} 5.2.0 - -The following advisories are available for {logging} 5.2.x: - -* link:https://access.redhat.com/errata/RHBA-2021:3550[RHBA-2021:3550 OpenShift Logging Bug Fix Release 5.2.1] -* link:https://access.redhat.com/errata/RHBA-2021:3393[RHBA-2021:3393 OpenShift Logging Bug Fix Release 5.2.0] - -[id="openshift-logging-5-2-0-new-features-and-enhancements"] -=== New features and enhancements - -* With this update, you can forward log data to Amazon CloudWatch, which provides application and infrastructure monitoring. For more information, see xref:../logging/cluster-logging-external.html#cluster-logging-collector-log-forward-cloudwatch_cluster-logging-external[Forwarding logs to Amazon CloudWatch]. (link:https://issues.redhat.com/browse/LOG-1173[LOG-1173]) - -* With this update, you can forward log data to Loki, a horizontally scalable, highly available, multi-tenant log aggregation system. For more information, see xref:../logging/cluster-logging-external.html#cluster-logging-collector-log-forward-loki_cluster-logging-external[Forwarding logs to Loki]. (link:https://issues.redhat.com/browse/LOG-684[LOG-684]) - -* With this update, if you use the Fluentd forward protocol to forward log data over a TLS-encrypted connection, now you can use a password-encrypted private key file and specify the passphrase in the Cluster Log Forwarder configuration. For more information, see xref:../logging/cluster-logging-external.html#cluster-logging-collector-log-forward-fluentd_cluster-logging-external[Forwarding logs using the Fluentd forward protocol]. (link:https://issues.redhat.com/browse/LOG-1525[LOG-1525]) - -* This enhancement enables you to use a username and password to authenticate a log forwarding connection to an external Elasticsearch instance. For example, if you cannot use mutual TLS (mTLS) because a third-party operates the Elasticsearch instance, you can use HTTP or HTTPS and set a secret that contains the username and password. For more information, see xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-es_cluster-logging-external[Forwarding logs to an external Elasticsearch instance]. (link:https://issues.redhat.com/browse/LOG-1022[LOG-1022]) - -* With this update, you can collect OVN network policy audit logs for forwarding to a logging server. For more information, see xref:../logging/cluster-logging-external.html#cluster-logging-collecting-ovn-audit-logs_cluster-logging-external[Collecting OVN network policy audit logs]. (link:https://issues.redhat.com/browse/LOG-1526[LOG-1526]) - -* By default, the data model introduced in {product-title} 4.5 gave logs from different namespaces a single index in common. This change made it harder to see which namespaces produced the most logs. -+ -The current release adds namespace metrics to the *Logging* dashboard in the {product-title} console. With these metrics, you can see which namespaces produce logs and how many logs each namespace produces for a given timestamp. -+ -To see these metrics, open the *Administrator* perspective in the {product-title} web console, and navigate to *Observe* -> *Dashboards* -> *Logging/Elasticsearch*. (link:https://issues.redhat.com/browse/LOG-1680[LOG-1680]) - -* The current release, OpenShift Logging 5.2, enables two new metrics: For a given timestamp or duration, you can see the total logs produced or logged by individual containers, and the total logs collected by the collector. These metrics are labeled by namespace, pod, and container name so that you can see how many logs each namespace and pod collects and produces. (link:https://issues.redhat.com/browse/LOG-1213[LOG-1213]) - -[id="openshift-logging-5-2-0-bug-fixes"] -=== Bug fixes - -* Before this update, when the OpenShift Elasticsearch Operator created index management cronjobs, it added the `POLICY_MAPPING` environment variable twice, which caused the apiserver to report the duplication. This update fixes the issue so that the `POLICY_MAPPING` environment variable is set only once per cronjob, and there is no duplication for the apiserver to report. (link:https://issues.redhat.com/browse/LOG-1130[LOG-1130]) - -* Before this update, suspending an Elasticsearch cluster to zero nodes did not suspend the index-management cronjobs, which put these cronjobs into maximum backoff. Then, after unsuspending the Elasticsearch cluster, these cronjobs stayed halted due to maximum backoff reached. This update resolves the issue by suspending the cronjobs and the cluster. (link:https://issues.redhat.com/browse/LOG-1268[LOG-1268]) - -* Before this update, in the *Logging* dashboard in the {product-title} console, the list of top 10 log-producing containers was missing the "chart namespace" label and provided the incorrect metric name, `fluentd_input_status_total_bytes_logged`. With this update, the chart shows the namespace label and the correct metric name, `log_logged_bytes_total`. (link:https://issues.redhat.com/browse/LOG-1271[LOG-1271]) - -* Before this update, if an index management cronjob terminated with an error, it did not report the error exit code: instead, its job status was "complete." This update resolves the issue by reporting the error exit codes of index management cronjobs that terminate with errors. (link:https://issues.redhat.com/browse/LOG-1273[LOG-1273]) - -* The `priorityclasses.v1beta1.scheduling.k8s.io` was removed in 1.22 and replaced by `priorityclasses.v1.scheduling.k8s.io` (`v1beta1` was replaced by `v1`). Before this update, `APIRemovedInNextReleaseInUse` alerts were generated for `priorityclasses` because `v1beta1` was still present . This update resolves the issue by replacing `v1beta1` with `v1`. The alert is no longer generated. (link:https://issues.redhat.com/browse/LOG-1385[LOG-1385]) - -* Previously, the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator did not have the annotation that was required for them to appear in the {product-title} web console list of operators that can run in a disconnected environment. This update adds the `operators.openshift.io/infrastructure-features: '["Disconnected"]'` annotation to these two operators so that they appear in the list of operators that run in disconnected environments. (link:https://issues.redhat.com/browse/LOG-1420[LOG-1420]) - -* Before this update, Red Hat OpenShift Logging Operator pods were scheduled on CPU cores that were reserved for customer workloads on performance-optimized single-node clusters. With this update, cluster logging operator pods are scheduled on the correct CPU cores. (link:https://issues.redhat.com/browse/LOG-1440[LOG-1440]) - -* Before this update, some log entries had unrecognized UTF-8 bytes, which caused Elasticsearch to reject the messages and block the entire buffered payload. With this update, rejected payloads drop the invalid log entries and resubmit the remaining entries to resolve the issue. (link:https://issues.redhat.com/browse/LOG-1499[LOG-1499]) - -* Before this update, the `kibana-proxy` pod sometimes entered the `CrashLoopBackoff` state and logged the following message `Invalid configuration: cookie_secret must be 16, 24, or 32 bytes to create an AES cipher when pass_access_token == true or cookie_refresh != 0, but is 29 bytes.` The exact actual number of bytes could vary. With this update, the generation of the Kibana session secret has been corrected, and the kibana-proxy pod no longer enters a `CrashLoopBackoff` state due to this error. (link:https://issues.redhat.com/browse/LOG-1446[LOG-1446]) - -* Before this update, the AWS CloudWatch Fluentd plug-in logged its AWS API calls to the Fluentd log at all log levels, consuming additional {product-title} node resources. With this update, the AWS CloudWatch Fluentd plug-in logs AWS API calls only at the "debug" and "trace" log levels. This way, at the default "warn" log level, Fluentd does not consume extra node resources. (link:https://issues.redhat.com/browse/LOG-1071[LOG-1071]) - -* Before this update, the Elasticsearch OpenDistro security plug-in caused user index migrations to fail. This update resolves the issue by providing a newer version of the plug-in. Now, index migrations proceed without errors. (link:https://issues.redhat.com/browse/LOG-1276[LOG-1276]) - -* Before this update, in the *Logging* dashboard in the {product-title} console, the list of top 10 log-producing containers lacked data points. This update resolves the issue, and the dashboard displays all data points. (link:https://issues.redhat.com/browse/LOG-1353[LOG-1353]) - -* Before this update, if you were tuning the performance of the Fluentd log forwarder by adjusting the `chunkLimitSize` and `totalLimitSize` values, the `Setting queued_chunks_limit_size for each buffer to` message reported values that were too low. The current update fixes this issue so that this message reports the correct values. (link:https://issues.redhat.com/browse/LOG-1411[LOG-1411]) - -* Before this update, the Kibana OpenDistro security plug-in caused user index migrations to fail. This update resolves the issue by providing a newer version of the plug-in. Now, index migrations proceed without errors. (link:https://issues.redhat.com/browse/LOG-1558[LOG-1558]) - -* Before this update, using a namespace input filter prevented logs in that namespace from appearing in other inputs. With this update, logs are sent to all inputs that can accept them. (link:https://issues.redhat.com/browse/LOG-1570[LOG-1570]) - -* Before this update, a missing license file for the `viaq/logerr` dependency caused license scanners to abort without success. With this update, the `viaq/logerr` dependency is licensed under Apache 2.0 and the license scanners run successfully. (link:https://issues.redhat.com/browse/LOG-1590[LOG-1590]) - -* Before this update, an incorrect brew tag for `curator5` within the `elasticsearch-operator-bundle` build pipeline caused the pull of an image pinned to a dummy SHA1. With this update, the build pipeline uses the `logging-curator5-rhel8` reference for `curator5`, enabling index management cronjobs to pull the correct image from `registry.redhat.io`. (link:https://issues.redhat.com/browse/LOG-1624[LOG-1624]) - -* Before this update, an issue with the `ServiceAccount` permissions caused errors such as `no permissions for [indices:admin/aliases/get]`. With this update, a permission fix resolves the issue. (link:https://issues.redhat.com/browse/LOG-1657[LOG-1657]) - -* Before this update, the Custom Resource Definition (CRD) for the Red Hat OpenShift Logging Operator was missing the Loki output type, which caused the admission controller to reject the `ClusterLogForwarder` custom resource object. With this update, the CRD includes Loki as an output type so that administrators can configure `ClusterLogForwarder` to send logs to a Loki server. (link:https://issues.redhat.com/browse/LOG-1683[LOG-1683]) - -* Before this update, OpenShift Elasticsearch Operator reconciliation of the `ServiceAccounts` overwrote third-party-owned fields that contained secrets. This issue caused memory and CPU spikes due to frequent recreation of secrets. This update resolves the issue. Now, the OpenShift Elasticsearch Operator does not overwrite third-party-owned fields. (link:https://issues.redhat.com/browse/LOG-1714[LOG-1714]) - -* Before this update, in the `ClusterLogging` custom resource (CR) definition, if you specified a `flush_interval` value but did not set `flush_mode` to `interval`, the Red Hat OpenShift Logging Operator generated a Fluentd configuration. However, the Fluentd collector generated an error at runtime. With this update, the Red Hat OpenShift Logging Operator validates the `ClusterLogging` CR definition and only generates the Fluentd configuration if both fields are specified. (link:https://issues.redhat.com/browse/LOG-1723[LOG-1723]) - -[id="openshift-logging-5-2-0-known-issues"] -=== Known issues - -* If you forward logs to an external Elasticsearch server and then change a configured value in the pipeline secret, such as the username and password, the Fluentd forwarder loads the new secret but uses the old value to connect to an external Elasticsearch server. This issue happens because the Red Hat OpenShift Logging Operator does not currently monitor secrets for content changes. (link:https://issues.redhat.com/browse/LOG-1652[LOG-1652]) -+ -As a workaround, if you change the secret, you can force the Fluentd pods to redeploy by entering: -+ -[source,terminal] ----- -$ oc delete pod -l component=collector ----- - -[id="openshift-logging-5-2-0-deprecated-removed-features"] -== Deprecated and removed features - -Some features available in previous releases have been deprecated or removed. - -Deprecated functionality is still included in OpenShift Logging and continues to be supported; however, it will be removed in a future release of this product and is not recommended for new deployments. - -[id="openshift-logging-5-2-0-legacy-forwarding"] -=== Forwarding logs using the legacy Fluentd and legacy syslog methods have been deprecated - -From {product-title} 4.6 to the present, forwarding logs by using the following legacy methods have been deprecated and will be removed in a future release: - -* Forwarding logs using the legacy Fluentd method -* Forwarding logs using the legacy syslog method - -Instead, use the following non-legacy methods: - -* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-fluentd_cluster-logging-external[Forwarding logs using the Fluentd forward protocol] -* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-syslog_cluster-logging-external[Forwarding logs using the syslog protocol] diff --git a/modules/cluster-logging-release-notes-5.2.z.adoc b/modules/cluster-logging-release-notes-5.2.z.adoc index 2a81425a207b..0f026699cf92 100644 --- a/modules/cluster-logging-release-notes-5.2.z.adoc +++ b/modules/cluster-logging-release-notes-5.2.z.adoc @@ -1,7 +1,4 @@ //Z-stream Release Notes by Version -// Module included in the following assemblies: -//cluster-logging-release-notes.adoc - [id="cluster-logging-release-notes-5-2-8"] = OpenShift Logging 5.2.8 diff --git a/modules/cluster-logging-release-notes-5.3.0.adoc b/modules/cluster-logging-release-notes-5.3.0.adoc deleted file mode 100644 index 0c73d30726a2..000000000000 --- a/modules/cluster-logging-release-notes-5.3.0.adoc +++ /dev/null @@ -1,54 +0,0 @@ - -// Module included in the following assemblies: -//cluster-logging-release-notes.adoc - -[id="cluster-logging-release-notes-5-3-0"] -= {logging-title-uc} 5.3.0 -The following advisories are available for {logging} 5.3.x: - -[id="openshift-logging-5-3-0-new-features-and-enhancements"] -=== New features and enhancements -* With this update, authorization requirements for Log Forwarding have been relaxed. Outputs may now be configured with SASL, username/password, or TLS. - -[id="openshift-logging-5-3-0-bug-fixes"] -=== Bug fixes -* Before this update, application logs were not correctly configured to forward to the proper Cloudwatch stream with multi-line error detection enabled. (link:https://issues.redhat.com/browse/LOG-1939[LOG-1939]) - -* Before this update, a name change of the deployed collector in the 5.3 release caused the alert 'fluentnodedown' to generate. (link:https://issues.redhat.com/browse/LOG-1918[LOG-1918]) - -* Before this update, a regression introduced in a prior release configuration caused the collector to flush its buffered messages before shutdown, creating a delay the termination and restart of collector Pods. With this update, fluentd no longer flushes buffers at shutdown, resolving the issue. (link:https://issues.redhat.com/browse/LOG-1735[LOG-1735]) - -* Before this update, a regression introduced in a prior release intentionally disabled JSON message parsing. With this update, a log entry's "level" value is set based on: a parsed JSON message that has a "level" field or by applying a regex against the message field to extract a match. (link:https://issues.redhat.com/browse/LOG-1199[LOG-1199]) - -[id="openshift-logging-5-3-0-known-issues"] -=== Known issues -* If you forward logs to an external Elasticsearch server and then change a configured value in the pipeline secret, such as the username and password, the Fluentd forwarder loads the new secret but uses the old value to connect to an external Elasticsearch server. This issue happens because the Red Hat OpenShift Logging Operator does not currently monitor secrets for content changes. (link:https://issues.redhat.com/browse/LOG-1652[LOG-1652]) -+ -As a workaround, if you change the secret, you can force the Fluentd pods to redeploy by entering: -+ -[source,terminal] ----- -$ oc delete pod -l component=collector ----- - -[id="openshift-logging-5-3-0-deprecated-removed-features"] -== Deprecated and removed features -Some features available in previous releases have been deprecated or removed. - -Deprecated functionality is still included in OpenShift Logging and continues to be supported; however, it will be removed in a future release of this product and is not recommended for new deployments. - -[id="openshift-logging-5-3-0-legacy-forwarding"] -=== Forwarding logs using the legacy Fluentd and legacy syslog methods have been removed - -In OpenShift Logging 5.3, the legacy methods of forwarding logs to Syslog and Fluentd are removed. Bug fixes and support are provided through the end of the OpenShift Logging 5.2 life cycle. After which, no new feature enhancements are made. - -Instead, use the following non-legacy methods: - -* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-fluentd_cluster-logging-external[Forwarding logs using the Fluentd forward protocol] - -* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-syslog_cluster-logging-external[Forwarding logs using the syslog protocol] - -[id="openshift-logging-5-3-0-legacy-forwarding-config"] -=== Configuration mechanisms for legacy forwarding methods have been removed - -In OpenShift Logging 5.3, the legacy configuration mechanism for log forwarding is removed: You cannot forward logs using the legacy Fluentd method and legacy Syslog method. Use the standard log forwarding methods instead. diff --git a/modules/cluster-logging-release-notes-5.3.z.adoc b/modules/cluster-logging-release-notes-5.3.z.adoc index b48f6fdc29ce..d7e24b3f4358 100644 --- a/modules/cluster-logging-release-notes-5.3.z.adoc +++ b/modules/cluster-logging-release-notes-5.3.z.adoc @@ -1,6 +1,4 @@ //Z-stream Release Notes by Version -// Module included in the following assemblies: -//cluster-logging-release-notes.adoc [id="cluster-logging-release-notes-5-3-4"] = OpenShift Logging 5.3.4 diff --git a/modules/cluster-logging-release-notes-5.4.0.adoc b/modules/cluster-logging-release-notes-5.4.0.adoc deleted file mode 100644 index f2b44bf08dfa..000000000000 --- a/modules/cluster-logging-release-notes-5.4.0.adoc +++ /dev/null @@ -1,34 +0,0 @@ - -// Module included in the following assemblies: -//cluster-logging-release-notes.adoc - -[id="cluster-logging-release-notes-5-4-0"] -= {logging-title-uc} 5.4.0 -The following advisories are available for {logging} 5.4.x: - - - -[id="openshift-logging-5-4-0-new-features-and-enhancements"] -=== New features and enhancements - - - -[id="openshift-logging-5-4-0-bug-fixes"] -=== Bug fixes - - - -[id="openshift-logging-5-4-0-known-issues"] -=== Known issues - - - -[id="openshift-logging-5-2-0-deprecated-removed-features"] -== Deprecated and removed features - -== CVEs -[id="openshift-logging-5-4-0-CVEs"] -.Click to expand CVEs -[%collapsible] -==== -[content] diff --git a/modules/cluster-logging-supported-versions.adoc b/modules/cluster-logging-supported-versions.adoc new file mode 100644 index 000000000000..d05b411f70dc --- /dev/null +++ b/modules/cluster-logging-supported-versions.adoc @@ -0,0 +1,10 @@ + +.{product-title} version support for Red Hat OpenShift Logging (RHOL) +[options="header"] +|==== +|OpenShift |4.7 |4.8 |4.9 +|RHOL 5.0|X |X | +|RHOL 5.1|X |X | +|RHOL 5.2|X |X |X +|RHOL 5.3| | |X +|==== diff --git a/modules/cluster-logging-uninstall.adoc b/modules/cluster-logging-uninstall.adoc index 860d6db70e7e..f6afaef6cb9f 100644 --- a/modules/cluster-logging-uninstall.adoc +++ b/modules/cluster-logging-uninstall.adoc @@ -4,16 +4,16 @@ :_content-type: PROCEDURE [id="cluster-logging-uninstall_{context}"] -= Uninstalling the {logging-title} += Uninstalling OpenShift Logging from {product-title} -You can stop log aggregation by deleting the `ClusterLogging` custom resource (CR). After deleting the CR, there are other {logging} components that remain, which you can optionally remove. +You can stop log aggregation by deleting the `ClusterLogging` custom resource (CR). After deleting the CR, there are other OpenShift Logging components that remain, which you can optionally remove. Deleting the `ClusterLogging` CR does not remove the persistent volume claims (PVCs). To preserve or delete the remaining PVCs, persistent volumes (PVs), and associated data, you must take further action. .Prerequisites -* The {logging-title} and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-visualizer-kibana.adoc b/modules/cluster-logging-visualizer-kibana.adoc index 03e0d513b7fd..0b7fdd5eabcb 100644 --- a/modules/cluster-logging-visualizer-kibana.adoc +++ b/modules/cluster-logging-visualizer-kibana.adoc @@ -10,7 +10,7 @@ You view cluster logs in the Kibana web console. The methods for viewing and vis .Prerequisites -* The {logging-title} and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. * Kibana index patterns must exist. diff --git a/modules/infrastructure-moving-logging.adoc b/modules/infrastructure-moving-logging.adoc index a34c912f2f6a..9cc31c0ddf34 100644 --- a/modules/infrastructure-moving-logging.adoc +++ b/modules/infrastructure-moving-logging.adoc @@ -7,13 +7,13 @@ [id="infrastructure-moving-logging_{context}"] = Moving OpenShift Logging resources -You can configure the Cluster Logging Operator to deploy the pods for {logging} components, such as Elasticsearch and Kibana, to different nodes. You cannot move the Cluster Logging Operator pod from its installed location. +You can configure the Cluster Logging Operator to deploy the pods for OpenShift Logging components, such as Elasticsearch and Kibana, to different nodes. You cannot move the Cluster Logging Operator pod from its installed location. For example, you can move the Elasticsearch pods to a separate node because of high CPU, memory, and disk requirements. .Prerequisites -* The {logging-title} and Elasticsearch must be installed. These features are not installed by default. +* OpenShift Logging and Elasticsearch must be installed. These features are not installed by default. .Procedure