From 3353f1b686e243c76478c012246d96d4f1c35a15 Mon Sep 17 00:00:00 2001 From: Rolfe Dlugy-Hegwer Date: Mon, 18 Jan 2021 11:29:17 -0500 Subject: [PATCH] RHDEVDOCS-2493 Logging: Change name from "cluster logging" to "OpenShift Logging" --- ...ter-logging-configuring-node-selector.adoc | 6 +- .../cluster-logging-elasticsearch-admin.adoc | 2 +- ...luster-logging-exported-fields-docker.adoc | 2 +- ...cluster-logging-uninstall-cluster-ops.adoc | 8 +-- logging/cluster-logging-deploying.adoc | 10 ++-- logging/cluster-logging-eventrouter.adoc | 2 +- logging/cluster-logging-external.adoc | 2 +- logging/cluster-logging-uninstall.adoc | 4 +- logging/cluster-logging-upgrading.adoc | 6 +- logging/cluster-logging-visualizer.adoc | 2 +- logging/cluster-logging.adoc | 12 ++-- .../cluster-logging-configuring-cr.adoc | 2 +- .../config/cluster-logging-configuring.adoc | 10 ++-- logging/config/cluster-logging-memory.adoc | 4 +- .../config/cluster-logging-moving-nodes.adoc | 6 +- ...luster-logging-storage-considerations.adoc | 4 +- .../config/cluster-logging-tolerations.adoc | 6 +- .../config/cluster-logging-visualizer.adoc | 2 +- logging/dedicated-cluster-deploying.adoc | 2 +- logging/dedicated-cluster-logging.adoc | 10 ++-- .../cluster-logging-alerts.adoc | 2 +- .../cluster-logging-cluster-status.adoc | 4 +- .../cluster-logging-must-gather.adoc | 6 +- .../planning-migration-3-to-4.adoc | 8 +-- modules/cluster-logging-about-components.adoc | 6 +- modules/cluster-logging-about-crd.adoc | 4 +- modules/cluster-logging-about-logstore.adoc | 2 +- modules/cluster-logging-about.adoc | 16 ++--- modules/cluster-logging-clo-status-comp.adoc | 10 ++-- modules/cluster-logging-clo-status.adoc | 8 +-- ...ster-logging-collector-alerts-viewing.adoc | 2 +- modules/cluster-logging-collector-envvar.adoc | 2 +- ...ogging-collector-log-forwarding-about.adoc | 2 +- ...cluster-logging-collector-tolerations.adoc | 2 +- modules/cluster-logging-collector-tuning.adoc | 2 +- ...uster-logging-configuring-image-about.adoc | 4 +- modules/cluster-logging-cpu-memory.adoc | 2 +- .../cluster-logging-curator-delete-index.adoc | 2 +- modules/cluster-logging-curator-schedule.adoc | 4 +- .../cluster-logging-curator-troubleshoot.adoc | 4 +- modules/cluster-logging-deploy-cli.adoc | 24 ++++---- modules/cluster-logging-deploy-console.adoc | 20 +++---- .../cluster-logging-deploy-multitenant.adoc | 4 +- ...logging-deploy-storage-considerations.adoc | 2 +- modules/cluster-logging-deploying-about.adoc | 16 ++--- .../cluster-logging-elasticsearch-audit.adoc | 2 +- ...luster-logging-elasticsearch-exposing.adoc | 4 +- modules/cluster-logging-elasticsearch-ha.adoc | 2 +- ...lasticsearch-persistent-storage-empty.adoc | 2 +- ...uster-logging-elasticsearch-retention.adoc | 60 +++++++++---------- ...cluster-logging-elasticsearch-storage.adoc | 2 +- ...ter-logging-elasticsearch-tolerations.adoc | 4 +- .../cluster-logging-eventrouter-about.adoc | 2 +- .../cluster-logging-eventrouter-deploy.adoc | 4 +- ...uster-logging-exported-fields-aushape.adoc | 2 +- ...ter-logging-exported-fields-container.adoc | 2 +- ...er-logging-exported-fields-kubernetes.adoc | 2 +- ...cluster-logging-exported-fields-ovirt.adoc | 2 +- ...uster-logging-exported-fields-systemd.adoc | 2 +- .../cluster-logging-exported-fields-tlog.adoc | 2 +- modules/cluster-logging-forwarding-about.adoc | 2 +- .../cluster-logging-kibana-tolerations.adoc | 2 +- ...luster-logging-log-forwarding-disable.adoc | 4 +- ...ster-logging-log-store-status-viewing.adoc | 2 +- modules/cluster-logging-logstore-limits.adoc | 2 +- ...ter-logging-maintenance-support-about.adoc | 4 +- ...luster-logging-manual-rollout-rolling.adoc | 2 +- .../cluster-logging-must-gather-about.adoc | 4 +- ...luster-logging-must-gather-collecting.adoc | 8 +-- modules/cluster-logging-systemd-scaling.adoc | 4 +- modules/cluster-logging-uninstall.adoc | 12 ++-- modules/cluster-logging-updating-logging.adoc | 10 ++-- .../cluster-logging-viewing-logs-console.adoc | 2 +- modules/cluster-logging-viewing-logs.adoc | 2 +- .../cluster-logging-visualizer-kibana.adoc | 2 +- modules/dedicated-cluster-install-deploy.adoc | 24 ++++---- modules/gathering-data-specific-features.adoc | 8 +-- modules/infrastructure-moving-logging.adoc | 6 +- modules/jaeger-install-elasticsearch.adoc | 2 +- ...nodes-cluster-overcommit-buffer-chunk.adoc | 2 +- modules/nodes-pods-priority-about.adoc | 2 +- .../nw-configure-ingress-access-logging.adoc | 2 +- .../security-monitoring-cluster-logging.adoc | 2 +- ...-find-logs-knative-serving-components.adoc | 4 +- ...r-logging-find-logs-services-deployed.adoc | 4 +- ...gging-operator-component-certificates.adoc | 2 +- .../security-monitoring.adoc | 2 +- .../cluster-logging-serverless.adoc | 2 +- .../virt-openshift-cluster-monitoring.adoc | 4 +- welcome/index.adoc | 6 +- 90 files changed, 240 insertions(+), 248 deletions(-) diff --git a/_unused_topics/cluster-logging-configuring-node-selector.adoc b/_unused_topics/cluster-logging-configuring-node-selector.adoc index 2348b69f8733..35d910e88e96 100644 --- a/_unused_topics/cluster-logging-configuring-node-selector.adoc +++ b/_unused_topics/cluster-logging-configuring-node-selector.adoc @@ -3,13 +3,13 @@ // * logging/cluster-logging-elasticsearch.adoc [id="cluster-logging-configuring-node-selector_{context}"] -= Specifying a node for cluster logging components using node selectors += Specifying a node for OpenShift Logging components using node selectors -Each component specification allows the component to target a specific node. +Each component specification allows the component to target a specific node. .Procedure -. Edit the Cluster Logging Custom Resource (CR) in the `openshift-logging` project: +. Edit the Cluster Logging custom resource (CR) in the `openshift-logging` project: + ---- $ oc edit ClusterLogging instance diff --git a/_unused_topics/cluster-logging-elasticsearch-admin.adoc b/_unused_topics/cluster-logging-elasticsearch-admin.adoc index 933f61925063..cdde0bc9e0f2 100644 --- a/_unused_topics/cluster-logging-elasticsearch-admin.adoc +++ b/_unused_topics/cluster-logging-elasticsearch-admin.adoc @@ -11,7 +11,7 @@ administrative operations on Elasticsearch are provided within the [NOTE] ==== -To confirm whether or not your cluster logging installation provides these, run: +To confirm whether or not your OpenShift Logging installation provides these, run: ---- $ oc describe secret elasticsearch -n openshift-logging ---- diff --git a/_unused_topics/cluster-logging-exported-fields-docker.adoc b/_unused_topics/cluster-logging-exported-fields-docker.adoc index e9c1117db444..26d77f062ca0 100644 --- a/_unused_topics/cluster-logging-exported-fields-docker.adoc +++ b/_unused_topics/cluster-logging-exported-fields-docker.adoc @@ -5,7 +5,7 @@ [id="cluster-logging-exported-fields-container_{context}"] = Container exported fields -These are the Docker fields exported by the {product-title} cluster logging available for searching from Elasticsearch and Kibana. +These are the Docker fields exported by OpenShift Logging available for searching from Elasticsearch and Kibana. Namespace for docker container-specific metadata. The docker.container_id is the Docker container ID. diff --git a/_unused_topics/cluster-logging-uninstall-cluster-ops.adoc b/_unused_topics/cluster-logging-uninstall-cluster-ops.adoc index 18c5e5a2ee31..ec4c0d37eac0 100644 --- a/_unused_topics/cluster-logging-uninstall-cluster-ops.adoc +++ b/_unused_topics/cluster-logging-uninstall-cluster-ops.adoc @@ -5,15 +5,15 @@ [id="cluster-logging-uninstall-ops_{context}"] = Uninstall the infra cluster -You can uninstall the infra cluster from the {product-title} cluster logging. +You can uninstall the infra cluster from OpenShift Logging. After uninstalling, Fluentd no longer splits logs. .Procedure To uninstall the infra cluster: -. +. -. +. -. \ No newline at end of file +. diff --git a/logging/cluster-logging-deploying.adoc b/logging/cluster-logging-deploying.adoc index b5d5d3ff9584..e27a9f1f2c6c 100644 --- a/logging/cluster-logging-deploying.adoc +++ b/logging/cluster-logging-deploying.adoc @@ -1,19 +1,19 @@ :context: cluster-logging-deploying [id="cluster-logging-deploying"] -= Installing cluster logging += Installing OpenShift Logging include::modules/common-attributes.adoc[] toc::[] -You can install cluster logging by deploying +You can install OpenShift Logging by deploying the Elasticsearch and Cluster Logging Operators. The Elasticsearch Operator -creates and manages the Elasticsearch cluster used by cluster logging. +creates and manages the Elasticsearch cluster used by OpenShift Logging. The Cluster Logging Operator creates and manages the components of the logging stack. -The process for deploying cluster logging to {product-title} involves: +The process for deploying OpenShift Logging to {product-title} involves: -* Reviewing the xref:../logging/config/cluster-logging-storage-considerations#cluster-logging-storage[cluster logging storage considerations]. +* Reviewing the xref:../logging/config/cluster-logging-storage-considerations#cluster-logging-storage[OpenShift Logging storage considerations]. * Installing the Elasticsearch Operator and Cluster Logging Operator using the {product-title} xref:../logging/cluster-logging-deploying.adoc#cluster-logging-deploy-console_cluster-logging-deploying[web console] or xref:../logging/cluster-logging-deploying.adoc#cluster-logging-deploy-cli_cluster-logging-deploying[CLI]. diff --git a/logging/cluster-logging-eventrouter.adoc b/logging/cluster-logging-eventrouter.adoc index cd17feed9a17..5e116049668d 100644 --- a/logging/cluster-logging-eventrouter.adoc +++ b/logging/cluster-logging-eventrouter.adoc @@ -5,7 +5,7 @@ include::modules/common-attributes.adoc[] toc::[] -The {product-title} Event Router is a pod that watches Kubernetes events and logs them for collection by cluster logging. You must manually deploy the Event Router. +The {product-title} Event Router is a pod that watches Kubernetes events and logs them for collection by OpenShift Logging. You must manually deploy the Event Router. The Event Router collects events from all projects and writes them to `STDOUT`. Fluentd collects those events and forwards them into the {product-title} Elasticsearch instance. Elasticsearch indexes the events to the `infra` index. diff --git a/logging/cluster-logging-external.adoc b/logging/cluster-logging-external.adoc index eea4a6226f26..2cab0ccaebca 100644 --- a/logging/cluster-logging-external.adoc +++ b/logging/cluster-logging-external.adoc @@ -6,7 +6,7 @@ include::modules/common-attributes.adoc[] toc::[] -By default, {product-title} cluster logging sends logs to the default internal Elasticsearch log store, defined in the `ClusterLogging` custom resource. If you want to forward logs to other log aggregators, you can use the {product-title} Log Forwarding API to send container, infrastructure, and audit logs to specific endpoints within or outside your cluster. You can send different types of logs to different systems, allowing you to control who in your organization can access each type. Optional TLS support ensures that you can send logs using secure communication as required by your organization. +By default, OpenShift Logging sends logs to the default internal Elasticsearch log store, defined in the `ClusterLogging` custom resource. If you want to forward logs to other log aggregators, you can use the {product-title} Log Forwarding API to send container, infrastructure, and audit logs to specific endpoints within or outside your cluster. You can send different types of logs to different systems, allowing you to control who in your organization can access each type. Optional TLS support ensures that you can send logs using secure communication as required by your organization. When you forward logs externally, the Cluster Logging Operator creates or modifies a Fluentd config map to send logs using your desired protocols. You are responsible for configuring the protocol on the external log aggregator. diff --git a/logging/cluster-logging-uninstall.adoc b/logging/cluster-logging-uninstall.adoc index f731276002f3..d345387649ce 100644 --- a/logging/cluster-logging-uninstall.adoc +++ b/logging/cluster-logging-uninstall.adoc @@ -1,11 +1,11 @@ :context: cluster-logging-uninstall [id="cluster-logging-uninstall"] -= Uninstalling Cluster Logging += Uninstalling OpenShift Logging include::modules/common-attributes.adoc[] toc::[] -You can remove cluster logging from your {product-title} cluster. +You can remove OpenShift Logging from your {product-title} cluster. // The following include statements pull in the module files that comprise // the assembly. Include any combination of concept, procedure, or reference diff --git a/logging/cluster-logging-upgrading.adoc b/logging/cluster-logging-upgrading.adoc index 2d4138b32449..ba0a25beeb1d 100644 --- a/logging/cluster-logging-upgrading.adoc +++ b/logging/cluster-logging-upgrading.adoc @@ -1,6 +1,6 @@ :context: cluster-logging-upgrading [id="cluster-logging-upgrading"] -= Updating cluster logging += Updating OpenShift Logging include::modules/common-attributes.adoc[] toc::[] @@ -9,14 +9,14 @@ toc::[] After updating the {product-title} cluster from 4.6 to 4.7, you can then update the Elasticsearch Operator and Cluster Logging Operator from 4.6 to 4.7. -Cluster logging 4.5 introduces a new Elasticsearch version, Elasticsearch 6.8.1, and an enhanced security plug-in, Open Distro for Elasticsearch. The new Elasticsearch version introduces a new Elasticsearch data model, where the Elasticsearch data is indexed only by type: infrastructure, application, and audit. Previously, data was indexed by type (infrastructure and application) and project. +OpenShift Logging 4.5 introduces a new Elasticsearch version, Elasticsearch 6.8.1, and an enhanced security plug-in, Open Distro for Elasticsearch. The new Elasticsearch version introduces a new Elasticsearch data model, where the Elasticsearch data is indexed only by type: infrastructure, application, and audit. Previously, data was indexed by type (infrastructure and application) and project. [IMPORTANT] ==== Because of the new data model, the update does not migrate existing custom Kibana index patterns and visualizations into the new version. You must re-create your Kibana index patterns and visualizations to match the new indices after updating. ==== -Due to the nature of these changes, you are not required to update your cluster logging to 4.6. However, when you update to {product-title} 4.7, you must update cluster logging to 4.7 at that time. +Due to the nature of these changes, you are not required to update your OpenShift Logging to 4.6. However, when you update to {product-title} 4.7, you must update OpenShift Logging to 4.7 at that time. // The following include statements pull in the module files that comprise // the assembly. Include any combination of concept, procedure, or reference diff --git a/logging/cluster-logging-visualizer.adoc b/logging/cluster-logging-visualizer.adoc index 463271e4b5d0..aece33068d8e 100644 --- a/logging/cluster-logging-visualizer.adoc +++ b/logging/cluster-logging-visualizer.adoc @@ -5,7 +5,7 @@ include::modules/common-attributes.adoc[] toc::[] -{product-title} cluster logging includes a web console for visualizing collected log data. Currently, {product-title} deploys the Kibana console for visualization. +OpenShift Logging includes a web console for visualizing collected log data. Currently, {product-title} deploys the Kibana console for visualization. Using the log visualizer, you can do the following with your data: diff --git a/logging/cluster-logging.adoc b/logging/cluster-logging.adoc index d8f63ee328ab..d88e3059bdef 100644 --- a/logging/cluster-logging.adoc +++ b/logging/cluster-logging.adoc @@ -1,6 +1,6 @@ :context: cluster-logging [id="cluster-logging"] -= Understanding cluster logging += Understanding OpenShift Logging include::modules/common-attributes.adoc[] toc::[] @@ -8,11 +8,11 @@ toc::[] ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -As a cluster administrator, you can deploy cluster logging to +As a cluster administrator, you can deploy OpenShift Logging to aggregate all the logs from your {product-title} cluster, such as node system audit logs, application container logs, and infrastructure logs. -Cluster logging aggregates these logs from throughout your cluster and stores them in a default log store. You can xref:../logging/cluster-logging-visualizer.adoc#cluster-logging-visualizer[use the Kibana web console to visualize log data]. +OpenShift Logging aggregates these logs from throughout your cluster and stores them in a default log store. You can xref:../logging/cluster-logging-visualizer.adoc#cluster-logging-visualizer[use the Kibana web console to visualize log data]. -Cluster logging aggregates the following types of logs: +OpenShift Logging aggregates the following types of logs: * `application` - Container logs generated by user applications running in the cluster, except infrastructure container applications. * `infrastructure` - Logs generated by infrastructure components running in the cluster and {product-title} nodes, such as journal logs. Infrastructure components are pods that run in the `openshift*`, `kube*`, or `default` projects. @@ -25,10 +25,10 @@ Because the internal {product-title} Elasticsearch log store does not provide se endif::[] ifdef::openshift-dedicated[] -As an administrator, you can deploy cluster logging to +As an administrator, you can deploy OpenShift Logging to aggregate logs for a range of {product-title} services. -Cluster logging runs on worker nodes. As an +OpenShift Logging runs on worker nodes. As an administrator, you can monitor resource consumption in the console and via Prometheus and Grafana. Due to the high work load required for logging, more worker nodes may be required for your environment. diff --git a/logging/config/cluster-logging-configuring-cr.adoc b/logging/config/cluster-logging-configuring-cr.adoc index 9c777f2a086f..d9cee6332e5e 100644 --- a/logging/config/cluster-logging-configuring-cr.adoc +++ b/logging/config/cluster-logging-configuring-cr.adoc @@ -5,7 +5,7 @@ include::modules/common-attributes.adoc[] toc::[] -To configure {product-title} cluster logging, you customize the `ClusterLogging` custom resource (CR). +To configure OpenShift Logging, you customize the `ClusterLogging` custom resource (CR). // The following include statements pull in the module files that comprise // the assembly. Include any combination of concept, procedure, or reference diff --git a/logging/config/cluster-logging-configuring.adoc b/logging/config/cluster-logging-configuring.adoc index 756a682a7211..4f8bad4961ad 100644 --- a/logging/config/cluster-logging-configuring.adoc +++ b/logging/config/cluster-logging-configuring.adoc @@ -1,17 +1,17 @@ :context: cluster-logging-configuring [id="cluster-logging-configuring"] -= Configuring cluster logging += Configuring OpenShift Logging include::modules/common-attributes.adoc[] toc::[] -Cluster logging is configurable using a `ClusterLogging` custom resource (CR) deployed +OpenShift Logging is configurable using a `ClusterLogging` custom resource (CR) deployed in the `openshift-logging` project. The Cluster Logging Operator watches for changes to `ClusterLogging` CR, creates any missing logging components, and adjusts the logging environment accordingly. -The `ClusterLogging` CR is based on the `ClusterLogging` custom resource definition (CRD), which defines a complete cluster logging environment +The `ClusterLogging` CR is based on the `ClusterLogging` custom resource definition (CRD), which defines a complete OpenShift Logging environment and includes all the components of the logging stack to collect, store and visualize logs. .Sample `ClusterLogging` custom resource (CR) @@ -52,9 +52,9 @@ spec: resources: null type: kibana ---- -You can configure the following for cluster logging: +You can configure the following for OpenShift Logging: -* You can overwrite the image for each cluster logging component by modifying the appropriate +* You can overwrite the image for each OpenShift Logging component by modifying the appropriate environment variable in the `cluster-logging-operator` Deployment. * You can specify specific nodes for the logging components using node selectors. diff --git a/logging/config/cluster-logging-memory.adoc b/logging/config/cluster-logging-memory.adoc index 80b35a7b44ba..1728701244b2 100644 --- a/logging/config/cluster-logging-memory.adoc +++ b/logging/config/cluster-logging-memory.adoc @@ -1,12 +1,12 @@ :context: cluster-logging-memory [id="cluster-logging-memory"] -= Configuring CPU and memory limits for cluster logging components += Configuring CPU and memory limits for OpenShift Logging components include::modules/common-attributes.adoc[] toc::[] -You can configure both the CPU and memory limits for each of the cluster logging components as needed. +You can configure both the CPU and memory limits for each of the OpenShift Logging components as needed. // The following include statements pull in the module files that comprise diff --git a/logging/config/cluster-logging-moving-nodes.adoc b/logging/config/cluster-logging-moving-nodes.adoc index 69d548f08fc8..c50bdd20a892 100644 --- a/logging/config/cluster-logging-moving-nodes.adoc +++ b/logging/config/cluster-logging-moving-nodes.adoc @@ -1,6 +1,6 @@ :context: cluster-logging-moving [id="cluster-logging-moving"] -= Moving the cluster logging resources with node selectors += Moving OpenShift Logging resources with node selectors include::modules/common-attributes.adoc[] toc::[] @@ -9,7 +9,7 @@ toc::[] -You can use node selectors to deploy the Elasticsearch, Kibana, and Curator pods to different nodes. +You can use node selectors to deploy the Elasticsearch, Kibana, and Curator pods to different nodes. // The following include statements pull in the module files that comprise // the assembly. Include any combination of concept, procedure, or reference @@ -17,5 +17,3 @@ You can use node selectors to deploy the Elasticsearch, Kibana, and Curator pods // assemblies. include::modules/infrastructure-moving-logging.adoc[leveloffset=+1] - - diff --git a/logging/config/cluster-logging-storage-considerations.adoc b/logging/config/cluster-logging-storage-considerations.adoc index 5f96f2879cfa..88a44b2b263f 100644 --- a/logging/config/cluster-logging-storage-considerations.adoc +++ b/logging/config/cluster-logging-storage-considerations.adoc @@ -1,12 +1,12 @@ :context: cluster-logging-storage [id="cluster-logging-storage"] -= Configuring cluster logging storage += Configuring OpenShift Logging storage include::modules/common-attributes.adoc[] toc::[] -Elasticsearch is a memory-intensive application. The default cluster logging installation deploys 16G of memory for both memory requests and memory limits. +Elasticsearch is a memory-intensive application. The default OpenShift Logging installation deploys 16G of memory for both memory requests and memory limits. The initial set of {product-title} nodes might not be large enough to support the Elasticsearch cluster. You must add additional nodes to the {product-title} cluster to run with the recommended or higher memory. Each Elasticsearch node can operate with a lower memory setting, though this is not recommended for production environments. diff --git a/logging/config/cluster-logging-tolerations.adoc b/logging/config/cluster-logging-tolerations.adoc index df08d2af0c90..ea4d8b427823 100644 --- a/logging/config/cluster-logging-tolerations.adoc +++ b/logging/config/cluster-logging-tolerations.adoc @@ -1,11 +1,11 @@ :context: cluster-logging-tolerations [id="cluster-logging-tolerations"] -= Using tolerations to control cluster logging pod placement += Using tolerations to control OpenShift Logging pod placement include::modules/common-attributes.adoc[] toc::[] -You can use taints and tolerations to ensure that cluster logging pods run +You can use taints and tolerations to ensure that OpenShift Logging pods run on specific nodes and that no other workload can run on those nodes. Taints and tolerations are simple `key:value` pair. A taint on a node @@ -14,7 +14,7 @@ instructs the node to repel all pods that do not tolerate the taint. The `key` is any string, up to 253 characters and the `value` is any string up to 63 characters. The string must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores. -.Sample cluster logging CR with tolerations +.Sample OpenShift Logging CR with tolerations [source,yaml] ---- apiVersion: "logging.openshift.io/v1" diff --git a/logging/config/cluster-logging-visualizer.adoc b/logging/config/cluster-logging-visualizer.adoc index f071222b07e8..62435b5ed05b 100644 --- a/logging/config/cluster-logging-visualizer.adoc +++ b/logging/config/cluster-logging-visualizer.adoc @@ -5,7 +5,7 @@ include::modules/common-attributes.adoc[] toc::[] -{product-title} uses Kibana to display the log data collected by cluster logging. +{product-title} uses Kibana to display the log data collected by OpenShift Logging. You can scale Kibana for redundancy and configure the CPU and memory for your Kibana nodes. diff --git a/logging/dedicated-cluster-deploying.adoc b/logging/dedicated-cluster-deploying.adoc index 252406bfb963..c14f7ba2b03c 100644 --- a/logging/dedicated-cluster-deploying.adoc +++ b/logging/dedicated-cluster-deploying.adoc @@ -1,6 +1,6 @@ :context: dedicated-cluster-deploying [id="dedicated-cluster-deploying"] -= Installing the Cluster Logging and Elasticsearch Operators += Installing the Cluster Logging Operator and Elasticsearch Operator include::modules/common-attributes.adoc[] toc::[] diff --git a/logging/dedicated-cluster-logging.adoc b/logging/dedicated-cluster-logging.adoc index 98b8b70c8bb0..3ea419676ce0 100644 --- a/logging/dedicated-cluster-logging.adoc +++ b/logging/dedicated-cluster-logging.adoc @@ -1,23 +1,23 @@ :context: dedicated-cluster-logging [id="dedicated-cluster-logging"] -= Configuring cluster logging in {product-title} += Configuring OpenShift Logging in {product-title} include::modules/common-attributes.adoc[] -As a cluster administrator, you can deploy cluster logging +As a cluster administrator, you can deploy OpenShift Logging to aggregate logs for a range of services. {product-title} clusters can perform logging tasks using the Elasticsearch -Operator. Cluster logging is configured through the Curator tool to retain logs +Operator. OpenShift Logging is configured through the Curator tool to retain logs for two days. -Cluster logging is configurable using a `ClusterLogging` custom resource (CR) +OpenShift Logging is configurable using a `ClusterLogging` custom resource (CR) deployed in the `openshift-logging` project namespace. The Cluster Logging Operator watches for changes to `ClusterLogging` CR, creates any missing logging components, and adjusts the logging environment accordingly. The `ClusterLogging` CR is based on the `ClusterLogging` custom resource -definition (CRD), which defines a complete cluster logging environment and +definition (CRD), which defines a complete OpenShift Logging environment and includes all the components of the logging stack to collect, store and visualize logs. diff --git a/logging/troubleshooting/cluster-logging-alerts.adoc b/logging/troubleshooting/cluster-logging-alerts.adoc index e80f7faacd2e..e89adadedc82 100644 --- a/logging/troubleshooting/cluster-logging-alerts.adoc +++ b/logging/troubleshooting/cluster-logging-alerts.adoc @@ -1,6 +1,6 @@ :context: cluster-logging-alerts [id="cluster-logging-alerts"] -= Understanding cluster logging alerts += Understanding OpenShift Logging alerts include::modules/common-attributes.adoc[] toc::[] diff --git a/logging/troubleshooting/cluster-logging-cluster-status.adoc b/logging/troubleshooting/cluster-logging-cluster-status.adoc index d7e3990f7942..54bbf58e1e50 100644 --- a/logging/troubleshooting/cluster-logging-cluster-status.adoc +++ b/logging/troubleshooting/cluster-logging-cluster-status.adoc @@ -1,11 +1,11 @@ :context: cluster-logging-cluster-status [id="cluster-logging-cluster-status"] -= Viewing cluster logging status += Viewing OpenShift Logging status include::modules/common-attributes.adoc[] toc::[] -You can view the status of the Cluster Logging Operator and for a number of cluster logging components. +You can view the status of the Cluster Logging Operator and for a number of OpenShift Logging components. // The following include statements pull in the module files that comprise // the assembly. Include any combination of concept, procedure, or reference diff --git a/logging/troubleshooting/cluster-logging-must-gather.adoc b/logging/troubleshooting/cluster-logging-must-gather.adoc index 977c15cf1f51..742f1a008456 100644 --- a/logging/troubleshooting/cluster-logging-must-gather.adoc +++ b/logging/troubleshooting/cluster-logging-must-gather.adoc @@ -9,9 +9,9 @@ toc::[] When opening a support case, it is helpful to provide debugging information about your cluster to Red Hat Support. -The xref:../../support/gathering-cluster-data.adoc#gathering-cluster-data[`must-gather` tool] enables you to collect diagnostic information for project-level resources, cluster-level resources, and each of the cluster logging components. +The xref:../../support/gathering-cluster-data.adoc#gathering-cluster-data[`must-gather` tool] enables you to collect diagnostic information for project-level resources, cluster-level resources, and each of the OpenShift Logging components. -For prompt support, supply diagnostic information for both {product-title} and cluster logging. +For prompt support, supply diagnostic information for both {product-title} and OpenShift Logging. [NOTE] ==== @@ -23,6 +23,6 @@ include::modules/cluster-logging-must-gather-about.adoc[leveloffset=+1] [id="cluster-logging-must-gather-prereqs"] == Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. include::modules/cluster-logging-must-gather-collecting.adoc[leveloffset=+1] diff --git a/migration/migrating_3_4/planning-migration-3-to-4.adoc b/migration/migrating_3_4/planning-migration-3-to-4.adoc index 57176499d264..463a6e0387d2 100644 --- a/migration/migrating_3_4/planning-migration-3-to-4.adoc +++ b/migration/migrating_3_4/planning-migration-3-to-4.adoc @@ -157,18 +157,18 @@ endif::[] Review the following logging changes to consider when transitioning from {product-title} 3.11 to {product-title} {product-version}. [discrete] -==== Deploying cluster logging +==== Deploying OpenShift Logging -{product-title} 4 provides a simple deployment mechanism for cluster logging, by using a Cluster Logging custom resource. +{product-title} 4 provides a simple deployment mechanism for OpenShift Logging, by using a Cluster Logging custom resource. -For more information, see xref:../../logging/cluster-logging-deploying.adoc#cluster-logging-deploying_cluster-logging-deploying[Installing cluster logging]. +For more information, see xref:../../logging/cluster-logging-deploying.adoc#cluster-logging-deploying_cluster-logging-deploying[Installing OpenShift Logging]. [discrete] ==== Aggregated logging data You cannot transition your aggregate logging data from {product-title} 3.11 into your new {product-title} 4 cluster. -For more information, see xref:../../logging/cluster-logging.adoc#cluster-logging-about_cluster-logging[About cluster logging]. +For more information, see xref:../../logging/cluster-logging.adoc#cluster-logging-about_cluster-logging[About OpenShift Logging]. [discrete] ==== Unsupported logging configurations diff --git a/modules/cluster-logging-about-components.adoc b/modules/cluster-logging-about-components.adoc index 40692fcb6e07..4a6e5ba34515 100644 --- a/modules/cluster-logging-about-components.adoc +++ b/modules/cluster-logging-about-components.adoc @@ -9,13 +9,13 @@ ifeval::["{context}" == "virt-openshift-cluster-monitoring"] endif::[] [id="cluster-logging-about-components_{context}"] -= About cluster logging components += About OpenShift Logging components -The cluster logging components include a collector deployed to each node in the {product-title} cluster +The OpenShift Logging components include a collector deployed to each node in the {product-title} cluster that collects all node and container logs and writes them to a log store. You can use a centralized web UI to create rich visualizations and dashboards with the aggregated data. -The major components of cluster logging are: +The major components of OpenShift Logging are: * collection - This is the component that collects logs from the cluster, formats them, and forwards them to the log store. The current implementation is Fluentd. * log store - This is where the logs are stored. The default implementation is Elasticsearch. You can use the default Elasticsearch log store or forward logs to external log stores. The default log store is optimized and tested for short-term storage. diff --git a/modules/cluster-logging-about-crd.adoc b/modules/cluster-logging-about-crd.adoc index 7e236a17377c..8e4b336399a1 100644 --- a/modules/cluster-logging-about-crd.adoc +++ b/modules/cluster-logging-about-crd.adoc @@ -5,10 +5,10 @@ [id="cluster-logging-configuring-crd_{context}"] = About the `ClusterLogging` custom resource -To make changes to your cluster logging environment, create and modify the `ClusterLogging` custom resource (CR). +To make changes to your OpenShift Logging environment, create and modify the `ClusterLogging` custom resource (CR). Instructions for creating or modifying a CR are provided in this documentation as appropriate. -The following is an example of a typical custom resource for cluster logging. +The following is an example of a typical custom resource for OpenShift Logging. [id="efk-logging-configuring-about-sample_{context}"] .Sample `ClusterLogging` CR diff --git a/modules/cluster-logging-about-logstore.adoc b/modules/cluster-logging-about-logstore.adoc index 9a301679e1a8..29acccde96c4 100644 --- a/modules/cluster-logging-about-logstore.adoc +++ b/modules/cluster-logging-about-logstore.adoc @@ -7,7 +7,7 @@ By default, {product-title} uses link:https://www.elastic.co/products/elasticsearch[Elasticsearch (ES)] to store log data. Optionally, you can use the log forwarding features to forward logs to external log stores using Fluentd protocols, syslog protocols, or the {product-title} Log Forwarding API. -The cluster logging Elasticsearch instance is optimized and tested for short term storage, approximately seven days. If you want to retain your logs over a longer term, it is recommended you move the data to a third-party storage system. +The OpenShift Logging Elasticsearch instance is optimized and tested for short term storage, approximately seven days. If you want to retain your logs over a longer term, it is recommended you move the data to a third-party storage system. Elasticsearch organizes the log data from Fluentd into datastores, or _indices_, then subdivides each index into multiple pieces called _shards_, which it spreads across a set of Elasticsearch nodes in an Elasticsearch cluster. You can configure Elasticsearch to make copies of the shards, called _replicas_, which Elasticsearch also spreads across the Elasticsearch nodes. The `ClusterLogging` custom resource (CR) allows you to specify how the shards are replicated to provide data redundancy and resilience to failure. You can also specify how long the different types of logs are retained using a retention policy in the `ClusterLogging` CR. diff --git a/modules/cluster-logging-about.adoc b/modules/cluster-logging-about.adoc index 05cc838a0332..23f57ae09054 100644 --- a/modules/cluster-logging-about.adoc +++ b/modules/cluster-logging-about.adoc @@ -9,15 +9,15 @@ [id="cluster-logging-about_{context}"] -= About deploying cluster logging += About deploying OpenShift Logging ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -{product-title} cluster administrators can deploy cluster logging using +{product-title} cluster administrators can deploy OpenShift Logging using the {product-title} web console or CLI to install the Elasticsearch Operator and Cluster Logging Operator. When the operators are installed, you create -a `ClusterLogging` custom resource (CR) to schedule cluster logging pods and -other resources necessary to support cluster logging. The operators are -responsible for deploying, upgrading, and maintaining cluster logging. +a `ClusterLogging` custom resource (CR) to schedule OpenShift Logging pods and +other resources necessary to support OpenShift Logging. The operators are +responsible for deploying, upgrading, and maintaining OpenShift Logging. endif::openshift-enterprise,openshift-webscale,openshift-origin[] ifdef::openshift-dedicated[] @@ -25,11 +25,11 @@ ifdef::openshift-dedicated[] Elasticsearch Operator by using the {product-title} web console and can configure logging in the `openshift-logging` namespace. Configuring logging will deploy Elasticsearch, Fluentd, and Kibana in the `openshift-logging` namespace. The operators are -responsible for deploying, upgrading, and maintaining cluster logging. +responsible for deploying, upgrading, and maintaining OpenShift Logging. endif::openshift-dedicated[] -The `ClusterLogging` CR defines a complete cluster logging environment that includes all the components -of the logging stack to collect, store and visualize logs. The Cluster Logging Operator watches the Cluster Logging +The `ClusterLogging` CR defines a complete OpenShift Logging environment that includes all the components +of the logging stack to collect, store and visualize logs. The Cluster Logging Operator watches the OpenShift Logging CR and adjusts the logging deployment accordingly. Administrators and application developers can view the logs of the projects for which they have view access. diff --git a/modules/cluster-logging-clo-status-comp.adoc b/modules/cluster-logging-clo-status-comp.adoc index 4bbaebc7892f..9729a90292be 100644 --- a/modules/cluster-logging-clo-status-comp.adoc +++ b/modules/cluster-logging-clo-status-comp.adoc @@ -3,13 +3,13 @@ // * logging/cluster-logging-cluster-status.adoc [id="cluster-logging-clo-status-example_{context}"] -= Viewing the status of cluster logging components += Viewing the status of OpenShift Logging components -You can view the status for a number of cluster logging components. +You can view the status for a number of OpenShift Logging components. .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure @@ -20,7 +20,7 @@ You can view the status for a number of cluster logging components. $ oc project openshift-logging ---- -. View the status of the cluster logging environment: +. View the status of the OpenShift Logging environment: + [source,terminal] ---- @@ -48,7 +48,7 @@ Events: Normal ScalingReplicaSet 62m deployment-controller Scaled up replica set cluster-logging-operator-574b8987df to 1---- ---- -. View the status of the cluster logging replica set: +. View the status of the OpenShift Logging replica set: .. Get the name of a replica set: + diff --git a/modules/cluster-logging-clo-status.adoc b/modules/cluster-logging-clo-status.adoc index 73fc5e22c6c3..86803df1d706 100644 --- a/modules/cluster-logging-clo-status.adoc +++ b/modules/cluster-logging-clo-status.adoc @@ -9,7 +9,7 @@ You can view the status of your Cluster Logging Operator. .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure @@ -20,9 +20,9 @@ You can view the status of your Cluster Logging Operator. $ oc project openshift-logging ---- -. To view the cluster logging status: +. To view the OpenShift Logging status: -.. Get the cluster logging status: +.. Get the OpenShift Logging status: + [source,terminal] ---- @@ -113,7 +113,7 @@ visualization: <4> [id="cluster-logging-clo-status-message_{context}"] == Example condition messages -The following are examples of some condition messages from the `Status.Nodes` section of the cluster logging instance. +The following are examples of some condition messages from the `Status.Nodes` section of the OpenShift Logging instance. // https://github.com/openshift/elasticsearch-operator/pull/92 diff --git a/modules/cluster-logging-collector-alerts-viewing.adoc b/modules/cluster-logging-collector-alerts-viewing.adoc index de8ce4fda364..a63a7401f8ad 100644 --- a/modules/cluster-logging-collector-alerts-viewing.adoc +++ b/modules/cluster-logging-collector-alerts-viewing.adoc @@ -13,7 +13,7 @@ Alerts are shown in the {product-title} web console, on the *Alerts* tab of the .Procedure -To view cluster logging and other {product-title} alerts: +To view OpenShift Logging and other {product-title} alerts: . In the {product-title} console, click *Monitoring* → *Alerting*. diff --git a/modules/cluster-logging-collector-envvar.adoc b/modules/cluster-logging-collector-envvar.adoc index 2ca0f3bd8dfe..ccc0fe7ed8ab 100644 --- a/modules/cluster-logging-collector-envvar.adoc +++ b/modules/cluster-logging-collector-envvar.adoc @@ -13,7 +13,7 @@ available environment variables. .Prerequisite -* Set cluster logging to the unmanaged state. Operators in an unmanaged state are unsupported and the cluster administrator assumes full control of the individual component configurations and upgrades. +* Set OpenShift Logging to the unmanaged state. Operators in an unmanaged state are unsupported and the cluster administrator assumes full control of the individual component configurations and upgrades. .Procedure diff --git a/modules/cluster-logging-collector-log-forwarding-about.adoc b/modules/cluster-logging-collector-log-forwarding-about.adoc index 95cefe3364de..9c8b55aa3a6c 100644 --- a/modules/cluster-logging-collector-log-forwarding-about.adoc +++ b/modules/cluster-logging-collector-log-forwarding-about.adoc @@ -49,7 +49,7 @@ Note the following: * You can use multiple types of outputs in the `ClusterLogForwarder` custom resource (CR) to send logs to servers that support different protocols. -* The internal {product-title} Elasticsearch instance does not provide secure storage for audit logs. We recommend you ensure that the system to which you forward audit logs is compliant with your organizational and governmental regulations and is properly secured. {product-title} cluster logging does not comply with those regulations. +* The internal {product-title} Elasticsearch instance does not provide secure storage for audit logs. We recommend you ensure that the system to which you forward audit logs is compliant with your organizational and governmental regulations and is properly secured. OpenShift Logging does not comply with those regulations. * You are responsible for creating and maintaining any additional configurations that external destinations might require, such as keys and secrets, service accounts, port openings, or global proxy configuration. diff --git a/modules/cluster-logging-collector-tolerations.adoc b/modules/cluster-logging-collector-tolerations.adoc index 40c46a4e63bd..ae2a49462830 100644 --- a/modules/cluster-logging-collector-tolerations.adoc +++ b/modules/cluster-logging-collector-tolerations.adoc @@ -24,7 +24,7 @@ tolerations: .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-collector-tuning.adoc b/modules/cluster-logging-collector-tuning.adoc index 8c181469595f..7985f96cc150 100644 --- a/modules/cluster-logging-collector-tuning.adoc +++ b/modules/cluster-logging-collector-tuning.adoc @@ -5,7 +5,7 @@ [id="cluster-logging-collector-tuning_{context}"] = Advanced configuration for the log forwarder -Cluster logging includes multiple Fluentd parameters that you can use for tuning the performance of the Fluentd log forwarder. With these parameters, you can change the following Fluentd behaviors: +OpenShift Logging includes multiple Fluentd parameters that you can use for tuning the performance of the Fluentd log forwarder. With these parameters, you can change the following Fluentd behaviors: * the size of Fluentd chunks and chunk buffer * the Fluentd chunk flushing behavior diff --git a/modules/cluster-logging-configuring-image-about.adoc b/modules/cluster-logging-configuring-image-about.adoc index 2c1d77c51cb8..e4b10cca24a0 100644 --- a/modules/cluster-logging-configuring-image-about.adoc +++ b/modules/cluster-logging-configuring-image-about.adoc @@ -3,9 +3,9 @@ // * logging/cluster-logging-configuring.adoc [id="cluster-logging-configuring-image-about_{context}"] -= Understanding the cluster logging component images += Understanding OpenShift Logging component images -There are several components in cluster logging, each one implemented with one +There are several components in OpenShift Logging, each one implemented with one or more images. Each image is specified by an environment variable defined in the *cluster-logging-operator* deployment in the *openshift-logging* project and should not be changed. diff --git a/modules/cluster-logging-cpu-memory.adoc b/modules/cluster-logging-cpu-memory.adoc index 2fecd5ffe449..e0de402fa9a3 100644 --- a/modules/cluster-logging-cpu-memory.adoc +++ b/modules/cluster-logging-cpu-memory.adoc @@ -5,7 +5,7 @@ [id="cluster-logging-memory-limits_{context}"] = Configuring CPU and memory limits -The cluster logging components allow for adjustments to both the CPU and memory limits. +The OpenShift Logging components allow for adjustments to both the CPU and memory limits. .Procedure diff --git a/modules/cluster-logging-curator-delete-index.adoc b/modules/cluster-logging-curator-delete-index.adoc index c2703500626a..e94ee769b296 100644 --- a/modules/cluster-logging-curator-delete-index.adoc +++ b/modules/cluster-logging-curator-delete-index.adoc @@ -13,7 +13,7 @@ You can configure Curator to delete Elasticsearch data that uses the data model .Prerequisite -* Cluster logging must be installed. +* OpenShift Logging must be installed. .Procedure diff --git a/modules/cluster-logging-curator-schedule.adoc b/modules/cluster-logging-curator-schedule.adoc index 2c908675eac7..38248079ae72 100644 --- a/modules/cluster-logging-curator-schedule.adoc +++ b/modules/cluster-logging-curator-schedule.adoc @@ -6,11 +6,11 @@ = Configuring the Curator schedule You can specify the schedule for Curator using the `Cluster Logging` custom resource -created by the cluster logging installation. +created by the OpenShift Logging installation. .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-curator-troubleshoot.adoc b/modules/cluster-logging-curator-troubleshoot.adoc index 915cba182ddc..1375aebf4cbd 100644 --- a/modules/cluster-logging-curator-troubleshoot.adoc +++ b/modules/cluster-logging-curator-troubleshoot.adoc @@ -10,7 +10,7 @@ provide a reason, you could increase the log level and trigger a new job, instea .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure @@ -35,7 +35,7 @@ The default value is INFO. + [NOTE] ==== -Cluster logging uses the {product-title} custom environment variable `CURATOR_SCRIPT_LOG_LEVEL` in {product-title} wrapper scripts (`run.sh` and `convert.py`). +OpenShift Logging uses the {product-title} custom environment variable `CURATOR_SCRIPT_LOG_LEVEL` in {product-title} wrapper scripts (`run.sh` and `convert.py`). The environment variable takes the same values as `CURATOR_LOG_LEVEL` for script debugging, as needed. ==== diff --git a/modules/cluster-logging-deploy-cli.adoc b/modules/cluster-logging-deploy-cli.adoc index 94d82f259eac..c7872f37e67b 100644 --- a/modules/cluster-logging-deploy-cli.adoc +++ b/modules/cluster-logging-deploy-cli.adoc @@ -3,9 +3,9 @@ // * logging/cluster-logging-deploying.adoc [id="cluster-logging-deploy-cli_{context}"] -= Installing cluster logging using the CLI += Installing OpenShift Logging using the CLI -You can use the {product-title} CLI to install the Elasticsearch and Cluster Logging operators. +You can use the {product-title} CLI to install the Elasticsearch and Cluster Logging Operators. .Prerequisites @@ -181,7 +181,7 @@ There should be an Elasticsearch Operator in each Namespace. The version number . Install the Cluster Logging Operator by creating the following objects: -.. Create an Operator Group object YAML file (for example, `clo-og.yaml`) for the Cluster Logging operator: +.. Create an Operator Group object YAML file (for example, `clo-og.yaml`) for the Cluster Logging Operator: + [source,yaml] ---- @@ -258,18 +258,18 @@ oc get csv -n openshift-logging ---- NAMESPACE NAME DISPLAY VERSION REPLACES PHASE ... -openshift-logging clusterlogging.4.7.0-202007012112.p0 Cluster Logging 4.7.0-202007012112.p0 Succeeded +openshift-logging clusterlogging.4.7.0-202007012112.p0 OpenShift Logging 4.7.0-202007012112.p0 Succeeded ... ---- -. Create a Cluster Logging instance: +. Create a OpenShift Logging instance: .. Create an instance object YAML file (for example, `clo-instance.yaml`) for the Cluster Logging Operator: + [NOTE] ==== -This default Cluster Logging configuration should support a wide array of environments. Review the topics on tuning and -configuring the Cluster Logging components for information on modifications you can make to your Cluster Logging cluster. +This default OpenShift Logging configuration should support a wide array of environments. Review the topics on tuning and +configuring OpenShift Logging components for information on modifications you can make to your OpenShift Logging cluster. ==== + ifdef::openshift-dedicated[] @@ -362,12 +362,12 @@ spec: fluentd: {} ---- <1> The name must be `instance`. -<2> The cluster logging management state. In some cases, if you change the cluster logging defaults, you must set this to `Unmanaged`. -However, an unmanaged deployment does not receive updates until cluster logging is placed back into a managed state. Placing a deployment back into a managed state might revert any modifications you made. +<2> The OpenShift Logging management state. In some cases, if you change the OpenShift Logging defaults, you must set this to `Unmanaged`. +However, an unmanaged deployment does not receive updates until OpenShift Logging is placed back into a managed state. Placing a deployment back into a managed state might revert any modifications you made. <3> Settings for configuring Elasticsearch. Using the custom resource (CR), you can configure shard replication policy and persistent storage. <4> Specify the length of time that Elasticsearch should retain each log source. Enter an integer and a time designation: weeks(w), hours(h/H), minutes(m) and seconds(s). For example, `7d` for seven days. Logs older than the `maxAge` are deleted. You must specify a retention policy for each log source or the Elasticsearch indices will not be created for that source. <5> Specify the number of Elasticsearch nodes. See the note that follows this list. -<6> Enter the name of an existing storage class for Elasticsearch storage. For best performance, specify a storage class that allocates block storage. If you do not specify a storage class, {product-title} deploys cluster logging with ephemeral storage only. +<6> Enter the name of an existing storage class for Elasticsearch storage. For best performance, specify a storage class that allocates block storage. If you do not specify a storage class, {product-title} deploys OpenShift Logging with ephemeral storage only. <7> Settings for configuring Kibana. Using the CR, you can scale Kibana for redundancy and configure the CPU and memory for your Kibana pods. For more information, see *Configuring Kibana*. <8> Settings for configuring the Curator schedule. Curator is used to remove data that is in the Elasticsearch index format prior to {product-title} 4.5 and will be removed in a later release. <9> Settings for configuring Fluentd. Using the CR, you can configure Fluentd CPU and memory limits. For more information, see *Configuring Fluentd*. @@ -411,11 +411,11 @@ For example: $ oc create -f clo-instance.yaml ---- + -This creates the Cluster Logging components, the `Elasticsearch` custom resource and components, and the Kibana interface. +This creates the OpenShift Logging components, the `Elasticsearch` custom resource and components, and the Kibana interface. . Verify the install by listing the pods in the *openshift-logging* project. + -You should see several pods for Cluster Logging, Elasticsearch, Fluentd, and Kibana similar to the following list: +You should see several pods for OpenShift Logging, Elasticsearch, Fluentd, and Kibana similar to the following list: + [source,terminal] ---- diff --git a/modules/cluster-logging-deploy-console.adoc b/modules/cluster-logging-deploy-console.adoc index e01fb91f6c96..a47a24a620e9 100644 --- a/modules/cluster-logging-deploy-console.adoc +++ b/modules/cluster-logging-deploy-console.adoc @@ -3,9 +3,9 @@ // * logging/cluster-logging-deploying.adoc [id="cluster-logging-deploy-console_{context}"] -= Installing cluster logging using the web console += Installing OpenShift Logging using the web console -You can use the {product-title} web console to install the Elasticsearch and Cluster Logging operators. +You can use the {product-title} web console to install the Elasticsearch and Cluster Logging Operators. .Prerequisites @@ -90,7 +90,7 @@ the *Status* column for any errors or failures. * Switch to the *Workloads* → *Pods* page and check the logs in any pods in the `openshift-logging` project that are reporting issues. -. Create a cluster logging instance: +. Create a OpenShift Logging instance: .. Switch to the *Administration* -> *Custom Resource Definitions* page. @@ -106,8 +106,8 @@ You might have to refresh the page to load the data. + [NOTE] ==== -This default cluster logging configuration should support a wide array of environments. Review the topics on tuning and -configuring the cluster logging components for information on modifications you can make to your cluster logging cluster. +This default OpenShift Logging configuration should support a wide array of environments. Review the topics on tuning and +configuring OpenShift Logging components for information on modifications you can make to your OpenShift Logging cluster. ==== + ifdef::openshift-dedicated[] @@ -200,12 +200,12 @@ spec: fluentd: {} ---- <1> The name must be `instance`. -<2> The cluster logging management state. In some cases, if you change the cluster logging defaults, you must set this to `Unmanaged`. -However, an unmanaged deployment does not receive updates until the cluster logging is placed back into a managed state. +<2> The OpenShift Logging management state. In some cases, if you change the OpenShift Logging defaults, you must set this to `Unmanaged`. +However, an unmanaged deployment does not receive updates until OpenShift Logging is placed back into a managed state. <3> Settings for configuring Elasticsearch. Using the CR, you can configure shard replication policy and persistent storage. <4> Specify the length of time that Elasticsearch should retain each log source. Enter an integer and a time designation: weeks(w), hours(h/H), minutes(m) and seconds(s). For example, `7d` for seven days. Logs older than the `maxAge` are deleted. You must specify a retention policy for each log source or the Elasticsearch indices will not be created for that source. <5> Specify the number of Elasticsearch nodes. See the note that follows this list. -<6> Enter the name of an existing storage class for Elasticsearch storage. For best performance, specify a storage class that allocates block storage. If you do not specify a storage class, {product-title} deploys cluster logging with ephemeral storage only. +<6> Enter the name of an existing storage class for Elasticsearch storage. For best performance, specify a storage class that allocates block storage. If you do not specify a storage class, {product-title} deploys OpenShift Logging with ephemeral storage only. <7> Settings for configuring Kibana. Using the CR, you can scale Kibana for redundancy and configure the CPU and memory for your Kibana nodes. For more information, see *Configuring Kibana*. <8> Settings for configuring the Curator schedule. Curator is used to remove data that is in the Elasticsearch index format prior to {product-title} 4.5 and will be removed in a later release. <9> Settings for configuring Fluentd. Using the CR, you can configure Fluentd CPU and memory limits. For more information, see *Configuring Fluentd*. @@ -235,7 +235,7 @@ elasticsearch-cdm-x6kdekli-3 0/1 1 0 6m44s The number of primary shards for the index templates is equal to the number of Elasticsearch data nodes. ==== -.. Click *Create*. This creates the Cluster Logging components, the `Elasticsearch` custom resource and components, and the Kibana interface. +.. Click *Create*. This creates the OpenShift Logging components, the `Elasticsearch` custom resource and components, and the Kibana interface. . Verify the install: @@ -243,7 +243,7 @@ The number of primary shards for the index templates is equal to the number of E .. Select the *openshift-logging* project. + -You should see several pods for cluster logging, Elasticsearch, Fluentd, and Kibana similar to the following list: +You should see several pods for OpenShift Logging, Elasticsearch, Fluentd, and Kibana similar to the following list: + * cluster-logging-operator-cb795f8dc-xkckc * elasticsearch-cdm-b3nqzchd-1-5c6797-67kfz diff --git a/modules/cluster-logging-deploy-multitenant.adoc b/modules/cluster-logging-deploy-multitenant.adoc index 95a8c9a1cfcc..b42e5da57c57 100644 --- a/modules/cluster-logging-deploy-multitenant.adoc +++ b/modules/cluster-logging-deploy-multitenant.adoc @@ -3,9 +3,9 @@ // * logging/cluster-logging-deploying.adoc [id="cluster-logging-deploy-multitenant_{context}"] -= Installing cluster logging into a multitenant network += Installing OpenShift Logging into a multitenant network -If you are deploying cluster logging into a cluster that uses multitenant isolation mode, projects are isolated from other projects. As a result, network traffic is not allowed between pods or services in different projects. +If you are deploying OpenShift Logging into a cluster that uses multitenant isolation mode, projects are isolated from other projects. As a result, network traffic is not allowed between pods or services in different projects. Because the Elasticsearch Operator and the Cluster Logging Operator are installed in different projects, you must explicitly allow access between the `openshift-operators-redhat` and `openshift-logging` projects. How you allow this access depends on how you configured multitenant isolation mode. diff --git a/modules/cluster-logging-deploy-storage-considerations.adoc b/modules/cluster-logging-deploy-storage-considerations.adoc index 098b4d36b5d0..f5cdf1e7182c 100644 --- a/modules/cluster-logging-deploy-storage-considerations.adoc +++ b/modules/cluster-logging-deploy-storage-considerations.adoc @@ -3,7 +3,7 @@ // * logging/cluster-logging-deploy.adoc [id="cluster-logging-deploy-storage-considerations_{context}"] -= Storage considerations for cluster logging and {product-title} += Storage considerations for OpenShift Logging and {product-title} //// An Elasticsearch index is a collection of primary shards and its corresponding replica diff --git a/modules/cluster-logging-deploying-about.adoc b/modules/cluster-logging-deploying-about.adoc index cb7b865f803f..cd2493e871ae 100644 --- a/modules/cluster-logging-deploying-about.adoc +++ b/modules/cluster-logging-deploying-about.adoc @@ -3,21 +3,21 @@ // * logging/cluster-logging-deploying-about.adoc [id="cluster-logging-deploying-about_{context}"] -= About deploying and configuring cluster logging += About deploying and configuring OpenShift Logging -{product-title} cluster logging is designed to be used with the default configuration, which is tuned for small to medium sized {product-title} clusters. +OpenShift Logging is designed to be used with the default configuration, which is tuned for small to medium sized {product-title} clusters. -The installation instructions that follow include a sample `ClusterLogging` custom resource (CR), which you can use to create a cluster logging instance -and configure your cluster logging environment. +The installation instructions that follow include a sample `ClusterLogging` custom resource (CR), which you can use to create a OpenShift Logging instance +and configure your OpenShift Logging environment. -If you want to use the default cluster logging install, you can use the sample CR directly. +If you want to use the default OpenShift Logging install, you can use the sample CR directly. -If you want to customize your deployment, make changes to the sample CR as needed. The following describes the configurations you can make when installing your cluster logging instance or modify after installation. See the Configuring sections for more information on working with each component, including modifications you can make outside of the `ClusterLogging` custom resource. +If you want to customize your deployment, make changes to the sample CR as needed. The following describes the configurations you can make when installing your OpenShift Logging instance or modify after installation. See the Configuring sections for more information on working with each component, including modifications you can make outside of the `ClusterLogging` custom resource. [id="cluster-logging-deploy-about-config_{context}"] -== Configuring and Tuning Cluster Logging +== Configuring and Tuning OpenShift Logging -You can configure your cluster logging environment by modifying the `ClusterLogging` custom resource deployed +You can configure your OpenShift Logging environment by modifying the `ClusterLogging` custom resource deployed in the `openshift-logging` project. You can modify any of the following components upon install or after install: diff --git a/modules/cluster-logging-elasticsearch-audit.adoc b/modules/cluster-logging-elasticsearch-audit.adoc index c2702a492776..7d243ddf6f55 100644 --- a/modules/cluster-logging-elasticsearch-audit.adoc +++ b/modules/cluster-logging-elasticsearch-audit.adoc @@ -11,7 +11,7 @@ If you want to send the audit logs to the internal log store, for example to vie [IMPORTANT] ==== -The internal {product-title} Elasticsearch log store does not provide secure storage for audit logs. We recommend you ensure that the system to which you forward audit logs is compliant with your organizational and governmental regulations and is properly secured. {product-title} cluster logging does not comply with those regulations. +The internal {product-title} Elasticsearch log store does not provide secure storage for audit logs. We recommend you ensure that the system to which you forward audit logs is compliant with your organizational and governmental regulations and is properly secured. OpenShift Logging does not comply with those regulations. ==== .Procedure diff --git a/modules/cluster-logging-elasticsearch-exposing.adoc b/modules/cluster-logging-elasticsearch-exposing.adoc index ff326fa707ea..517305fa8a59 100644 --- a/modules/cluster-logging-elasticsearch-exposing.adoc +++ b/modules/cluster-logging-elasticsearch-exposing.adoc @@ -5,7 +5,7 @@ [id="cluster-logging-elasticsearch-exposing_{context}"] = Exposing the log store service as a route -By default, the log store that is deployed with cluster logging is not +By default, the log store that is deployed with OpenShift Logging is not accessible from outside the logging cluster. You can enable a route with re-encryption termination for external access to the log store service for those tools that access its data. @@ -58,7 +58,7 @@ $ oc exec elasticsearch-cdm-oplnhinv-1-5746475887-fj2f8 -n openshift-logging -- .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. * You must have access to the project in order to be able to access to the logs. diff --git a/modules/cluster-logging-elasticsearch-ha.adoc b/modules/cluster-logging-elasticsearch-ha.adoc index 0d5a46abdae6..ac05bfd0a299 100644 --- a/modules/cluster-logging-elasticsearch-ha.adoc +++ b/modules/cluster-logging-elasticsearch-ha.adoc @@ -9,7 +9,7 @@ You can define how Elasticsearch shards are replicated across data nodes in the .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc b/modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc index 933bea78f758..1b78a8e59497 100644 --- a/modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc +++ b/modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc @@ -15,7 +15,7 @@ When using emptyDir, if log storage is restarted or redeployed, you will lose da .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-elasticsearch-retention.adoc b/modules/cluster-logging-elasticsearch-retention.adoc index 5b0d382b584b..d34c36818a06 100644 --- a/modules/cluster-logging-elasticsearch-retention.adoc +++ b/modules/cluster-logging-elasticsearch-retention.adoc @@ -5,38 +5,33 @@ [id="cluster-logging-elasticsearch-retention_{context}"] = Configuring log retention time -You can specify how long the default Elasticsearch log store keeps indices -using a separate _retention policy_ for each of the three log sources: -infrastructure logs, application logs, and audit logs. The retention policy, -which you configure using the `maxAge` parameter in the Cluster Logging Custom -Resource (CR), is considered for the Elasticsearch roll over schedule and -determines when Elasticsearch deletes the rolled-over indices. +You can specify how long the default Elasticsearch log store keeps indices using a separate _retention policy_ for each of the three log sources: infrastructure logs, application logs, and audit logs. The retention policy, which you configure using the `maxAge` parameter in the Cluster Logging custom resource (CR), is considered for the Elasticsearch roll over schedule and determines when Elasticsearch deletes the rolled-over indices. -Elasticsearch rolls over an index, moving the current index and creating a new +Elasticsearch rolls over an index, moving the current index and creating a new index, when an index matches any of the following conditions: * The index is older than the `rollover.maxAge` value in the `Elasticsearch` CR. * The index size is greater than 40 GB × the number of primary shards. * The index doc count is greater than 40960 KB × the number of primary shards. -Elasticsearch deletes the rolled-over indices are deleted based on the +Elasticsearch deletes the rolled-over indices are deleted based on the retention policy you configure. -If you do not create a retention policy for any of the log sources, logs -are deleted after seven days by default. +If you do not create a retention policy for any of the log sources, logs +are deleted after seven days by default. [IMPORTANT] ==== -If you do not specify a retention policy for all three log sources, only logs -from the sources with a retention policy are stored. For example, if you -set a retention policy for the infrastructure and applicaiton logs, but do not -set a retention policy for audit logs, the audit logs will not be retained -and there will be no *audit-* index in Elasticsearch or Kibana. +If you do not specify a retention policy for all three log sources, only logs +from the sources with a retention policy are stored. For example, if you +set a retention policy for the infrastructure and applicaiton logs, but do not +set a retention policy for audit logs, the audit logs will not be retained +and there will be no *audit-* index in Elasticsearch or Kibana. ==== .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure @@ -64,17 +59,17 @@ spec: nodeCount: 3 ... ---- -<1> Specify the time that Elasticsearch should retain each log source. Enter an -integer and a time designation: weeks(w), hours(h/H), minutes(m) and seconds(s). -For example, `1d` for one day. Logs older than the `maxAge` are deleted. -By default, logs are retained for seven days. +<1> Specify the time that Elasticsearch should retain each log source. Enter an +integer and a time designation: weeks(w), hours(h/H), minutes(m) and seconds(s). +For example, `1d` for one day. Logs older than the `maxAge` are deleted. +By default, logs are retained for seven days. . You can verify the settings in the `Elasticsearch` custom resource (CR). + For example, the Cluster Logging Operator updated the following -`Elasticsearch` CR to configure a retention policy that includes settings -to roll over active indices for the infrastructure logs every eight hours and -the rolled-ver indices are deleted seven days after rollover. {product-title} checks +`Elasticsearch` CR to configure a retention policy that includes settings +to roll over active indices for the infrastructure logs every eight hours and +the rolled-ver indices are deleted seven days after rollover. {product-title} checks every 15 minutes to determine if the indices need to be rolled over. + [source,yaml] @@ -98,22 +93,22 @@ spec: pollInterval: 15m <4> ... ---- -<1> For each log source, the retention policy indicates when to delete and -rollover logs for that source. -<2> When {product-title} deletes the rolled-over indices. This setting +<1> For each log source, the retention policy indicates when to delete and +rollover logs for that source. +<2> When {product-title} deletes the rolled-over indices. This setting is the `maxAge` you set in the `ClusterLogging` CR. -<3> The index age for {product-title} to consider when rolling over the indices. -This value is determined from the `maxAge` you set in the `ClusterLogging` CR. -<4> When {product-title} checks if the indices should be rolled over. +<3> The index age for {product-title} to consider when rolling over the indices. +This value is determined from the `maxAge` you set in the `ClusterLogging` CR. +<4> When {product-title} checks if the indices should be rolled over. This setting is the default and cannot be changed. + [NOTE] ==== -Modifying the `Elasticsearch` CR is not supported. All changes to the retention +Modifying the `Elasticsearch` CR is not supported. All changes to the retention policies must be made in the `ClusterLogging` CR. -==== +==== + -The Elasticsearch Operator deploys a cron job to roll over indices for each +The Elasticsearch Operator deploys a cron job to roll over indices for each mapping using the defined policy, scheduled using the `pollInterval`. + [source,terminal] @@ -132,4 +127,3 @@ elasticsearch-rollover-app */15 * * * * False 0 elasticsearch-rollover-audit */15 * * * * False 0 27s elasticsearch-rollover-infra */15 * * * * False 0 27s ---- - diff --git a/modules/cluster-logging-elasticsearch-storage.adoc b/modules/cluster-logging-elasticsearch-storage.adoc index 0f4933d00b0f..3ebc847945bd 100644 --- a/modules/cluster-logging-elasticsearch-storage.adoc +++ b/modules/cluster-logging-elasticsearch-storage.adoc @@ -17,7 +17,7 @@ occur. .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-elasticsearch-tolerations.adoc b/modules/cluster-logging-elasticsearch-tolerations.adoc index 4a7f9c5f80f3..ae4ed73dc2cc 100644 --- a/modules/cluster-logging-elasticsearch-tolerations.adoc +++ b/modules/cluster-logging-elasticsearch-tolerations.adoc @@ -25,11 +25,11 @@ tolerations: .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure -. Use the following command to add a taint to a node where you want to schedule the cluster logging pods: +. Use the following command to add a taint to a node where you want to schedule the OpenShift Logging pods: + [source,terminal] ---- diff --git a/modules/cluster-logging-eventrouter-about.adoc b/modules/cluster-logging-eventrouter-about.adoc index 3c4ef6e9ccbb..79719c38718e 100644 --- a/modules/cluster-logging-eventrouter-about.adoc +++ b/modules/cluster-logging-eventrouter-about.adoc @@ -5,7 +5,7 @@ [id="cluster-logging-eventrouter-about_{context}"] = About event routing -The Event Router is a pod that watches {product-title} events so they can be collected by cluster logging. +The Event Router is a pod that watches {product-title} events so they can be collected by OpenShift Logging. The Event Router collects events from all projects and writes them to `STDOUT`. Fluentd collects those events and forwards them into the {product-title} Elasticsearch instance. Elasticsearch indexes the events to the `infra` index. You must manually deploy the Event Router. diff --git a/modules/cluster-logging-eventrouter-deploy.adoc b/modules/cluster-logging-eventrouter-deploy.adoc index 6c20db7b2a39..7a5c6508b407 100644 --- a/modules/cluster-logging-eventrouter-deploy.adoc +++ b/modules/cluster-logging-eventrouter-deploy.adoc @@ -13,7 +13,7 @@ The following Template object creates the service account, cluster role, and clu * You need proper permissions to create service accounts and update cluster role bindings. For example, you can run the following template with a user that has the *cluster-admin* role. -* Cluster logging must be installed. +* OpenShift Logging must be installed. .Procedure @@ -26,7 +26,7 @@ apiVersion: v1 metadata: name: eventrouter-template annotations: - description: "A pod forwarding kubernetes events to cluster logging stack." + description: "A pod forwarding kubernetes events to OpenShift Logging stack." tags: "events,EFK,logging,cluster-logging" objects: - kind: ServiceAccount <1> diff --git a/modules/cluster-logging-exported-fields-aushape.adoc b/modules/cluster-logging-exported-fields-aushape.adoc index ad735d1cbcde..3223ed28b26e 100644 --- a/modules/cluster-logging-exported-fields-aushape.adoc +++ b/modules/cluster-logging-exported-fields-aushape.adoc @@ -5,7 +5,7 @@ [id="cluster-logging-exported-fields-aushape_{context}"] = Aushape exported fields -These are the Aushape fields exported by the {product-title} cluster logging available for searching +These are the Aushape fields exported by OpenShift Logging available for searching from Elasticsearch and Kibana. Audit events converted with Aushape. For more information, see diff --git a/modules/cluster-logging-exported-fields-container.adoc b/modules/cluster-logging-exported-fields-container.adoc index d122c79f491e..d893b804f0cc 100644 --- a/modules/cluster-logging-exported-fields-container.adoc +++ b/modules/cluster-logging-exported-fields-container.adoc @@ -5,7 +5,7 @@ [id="cluster-logging-exported-fields-container_{context}"] = Container exported fields -These are the Docker fields exported by the {product-title} cluster logging available for searching from Elasticsearch and Kibana. +These are the Docker fields exported by OpenShift Logging available for searching from Elasticsearch and Kibana. Namespace for docker container-specific metadata. The docker.container_id is the Docker container ID. diff --git a/modules/cluster-logging-exported-fields-kubernetes.adoc b/modules/cluster-logging-exported-fields-kubernetes.adoc index 4dfa66e7d978..fcb5db82fd47 100644 --- a/modules/cluster-logging-exported-fields-kubernetes.adoc +++ b/modules/cluster-logging-exported-fields-kubernetes.adoc @@ -5,7 +5,7 @@ [id="cluster-logging-exported-fields-kubernetes_{context}"] = Kubernetes exported fields -These are the Kubernetes fields exported by the {product-title} cluster logging available for searching +These are the Kubernetes fields exported by OpenShift Logging available for searching from Elasticsearch and Kibana. The namespace for Kubernetes-specific metadata. The `kubernetes.pod_name` is the diff --git a/modules/cluster-logging-exported-fields-ovirt.adoc b/modules/cluster-logging-exported-fields-ovirt.adoc index 6a2e4698984d..d05611540b7a 100644 --- a/modules/cluster-logging-exported-fields-ovirt.adoc +++ b/modules/cluster-logging-exported-fields-ovirt.adoc @@ -5,7 +5,7 @@ [id="cluster-logging-exported-fields-ovirt_{context}"] = oVirt exported fields -These are the oVirt fields exported by the {product-title} cluster logging available for searching +These are the oVirt fields exported by OpenShift Logging available for searching from Elasticsearch and Kibana. Namespace for oVirt metadata. diff --git a/modules/cluster-logging-exported-fields-systemd.adoc b/modules/cluster-logging-exported-fields-systemd.adoc index 759980d8f8ac..4e74873b5f1d 100644 --- a/modules/cluster-logging-exported-fields-systemd.adoc +++ b/modules/cluster-logging-exported-fields-systemd.adoc @@ -5,7 +5,7 @@ [id="cluster-logging-exported-fields-systemd_{context}"] = `systemd` exported fields -These are the `systemd` fields exported by the {product-title} cluster logging available for searching +These are the `systemd` fields exported by OpenShift Logging available for searching from Elasticsearch and Kibana. Contains common fields specific to `systemd` journal. diff --git a/modules/cluster-logging-exported-fields-tlog.adoc b/modules/cluster-logging-exported-fields-tlog.adoc index e58011d69634..82724afc1591 100644 --- a/modules/cluster-logging-exported-fields-tlog.adoc +++ b/modules/cluster-logging-exported-fields-tlog.adoc @@ -5,7 +5,7 @@ [id="cluster-logging-exported-fields-tlog_{context}"] = Tlog exported fields -These are the Tlog fields exported by the {product-title} cluster logging system and available for searching +These are the Tlog fields exported by the OpenShift Logging system and available for searching from Elasticsearch and Kibana. Tlog terminal I/O recording messages. For more information see diff --git a/modules/cluster-logging-forwarding-about.adoc b/modules/cluster-logging-forwarding-about.adoc index c533f34491df..628257362fc5 100644 --- a/modules/cluster-logging-forwarding-about.adoc +++ b/modules/cluster-logging-forwarding-about.adoc @@ -5,5 +5,5 @@ [id="cluster-logging-forwarding-about_{context}"] = About log forwarding -By default, {product-title} cluster logging sends logs to the default internal Elasticsearch log store, defined in the `ClusterLogging` custom resource (CR). If you want to forward logs to other log aggregators, you can use the log forwarding features to send logs to specific endpoints within or outside your cluster. +By default, OpenShift Logging sends logs to the default internal Elasticsearch log store, defined in the `ClusterLogging` custom resource (CR). If you want to forward logs to other log aggregators, you can use the log forwarding features to send logs to specific endpoints within or outside your cluster. diff --git a/modules/cluster-logging-kibana-tolerations.adoc b/modules/cluster-logging-kibana-tolerations.adoc index 8cf929d7d1fb..b9cbb7c94010 100644 --- a/modules/cluster-logging-kibana-tolerations.adoc +++ b/modules/cluster-logging-kibana-tolerations.adoc @@ -15,7 +15,7 @@ that is not on other pods ensures only the Kibana pod can run on that node. .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-log-forwarding-disable.adoc b/modules/cluster-logging-log-forwarding-disable.adoc index 206cad4d246f..680ea9b95686 100644 --- a/modules/cluster-logging-log-forwarding-disable.adoc +++ b/modules/cluster-logging-log-forwarding-disable.adoc @@ -9,14 +9,14 @@ To disable the Log Forwarding feature, remove the `clusterlogging.openshift.io/l [IMPORTANT] ==== -You cannot disable Log Forwarding by setting the `disableDefaultForwarding` to `false` in the `ClusterLogForwarder` CR. This prevents cluster logging from sending logs to the specified endpoints *and* to default internal {product-title} Elasticsearch instance. +You cannot disable Log Forwarding by setting the `disableDefaultForwarding` to `false` in the `ClusterLogForwarder` CR. This prevents OpenShift Logging from sending logs to the specified endpoints *and* to default internal {product-title} Elasticsearch instance. ==== .Procedure To disable the Log Forwarding feature: -. Edit the Cluster Logging CR in the `openshift-logging` project: +. Edit the OpenShift Logging CR in the `openshift-logging` project: + [source,terminal] ---- diff --git a/modules/cluster-logging-log-store-status-viewing.adoc b/modules/cluster-logging-log-store-status-viewing.adoc index 1ab2f14a8e9e..b0a0eb128f97 100644 --- a/modules/cluster-logging-log-store-status-viewing.adoc +++ b/modules/cluster-logging-log-store-status-viewing.adoc @@ -9,7 +9,7 @@ You can view the status of your log store. .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-logstore-limits.adoc b/modules/cluster-logging-logstore-limits.adoc index e31432035265..791db3ed3b00 100644 --- a/modules/cluster-logging-logstore-limits.adoc +++ b/modules/cluster-logging-logstore-limits.adoc @@ -14,7 +14,7 @@ For production use, you should have no less than the default 16Gi allocated to e .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-maintenance-support-about.adoc b/modules/cluster-logging-maintenance-support-about.adoc index 86391f327833..1da46f588ae9 100644 --- a/modules/cluster-logging-maintenance-support-about.adoc +++ b/modules/cluster-logging-maintenance-support-about.adoc @@ -5,9 +5,9 @@ [id="cluster-logging-maintenance-support-about_{context}"] = About unsupported configurations -The supported way of configuring cluster logging is by configuring it using the options described in this documentation. Do not use other configurations, as they are unsupported. Configuration paradigms might change across {product-title} releases, and such cases can only be handled gracefully if all configuration possibilities are controlled. If you use configurations other than those described in this documentation, your changes will disappear because the Elasticsearch Operator and Cluster Logging Operator reconcile any differences. The Operators reverse everything to the defined state by default and by design. +The supported way of configuring OpenShift Logging is by configuring it using the options described in this documentation. Do not use other configurations, as they are unsupported. Configuration paradigms might change across {product-title} releases, and such cases can only be handled gracefully if all configuration possibilities are controlled. If you use configurations other than those described in this documentation, your changes will disappear because the Elasticsearch Operator and Cluster Logging Operator reconcile any differences. The Operators reverse everything to the defined state by default and by design. [NOTE] ==== -If you _must_ perform configurations not described in the {product-title} documentation, you _must_ set your Cluster Logging Operator or Elasticsearch Operator to *Unmanaged*. An unmanaged cluster logging environment is _not supported_ and does not receive updates until you return cluster logging to *Managed*. +If you _must_ perform configurations not described in the {product-title} documentation, you _must_ set your Cluster Logging Operator or Elasticsearch Operator to *Unmanaged*. An unmanaged OpenShift Logging environment is _not supported_ and does not receive updates until you return OpenShift Logging to *Managed*. ==== diff --git a/modules/cluster-logging-manual-rollout-rolling.adoc b/modules/cluster-logging-manual-rollout-rolling.adoc index 35f09fbb9e6d..13208aa1bd1d 100644 --- a/modules/cluster-logging-manual-rollout-rolling.adoc +++ b/modules/cluster-logging-manual-rollout-rolling.adoc @@ -13,7 +13,7 @@ runs requires a reboot. .Prerequisite -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. * Install the {product-title} link:https://github.com/openshift/origin-aggregated-logging/tree/master/elasticsearch#es_util[*es_util*] tool diff --git a/modules/cluster-logging-must-gather-about.adoc b/modules/cluster-logging-must-gather-about.adoc index fd42bc04e4f9..4ab38170a3ca 100644 --- a/modules/cluster-logging-must-gather-about.adoc +++ b/modules/cluster-logging-must-gather-about.adoc @@ -7,11 +7,11 @@ The `oc adm must-gather` CLI command collects the information from your cluster that is most likely needed for debugging issues. -For your cluster logging environment, `must-gather` collects the following information: +For your OpenShift Logging environment, `must-gather` collects the following information: * project-level resources, including pods, configuration maps, service accounts, roles, role bindings, and events at the project level * cluster-level resources, including nodes, roles, and role bindings at the cluster level -* cluster logging resources in the `openshift-logging` and `openshift-operators-redhat` namespaces, including health status for the log collector, the log store, the curator, and the log visualizer +* OpenShift Logging resources in the `openshift-logging` and `openshift-operators-redhat` namespaces, including health status for the log collector, the log store, the curator, and the log visualizer When you run `oc adm must-gather`, a new pod is created on the cluster. The data is collected on that pod and saved in a new directory that starts with `must-gather.local`. This directory is created in the current working directory. diff --git a/modules/cluster-logging-must-gather-collecting.adoc b/modules/cluster-logging-must-gather-collecting.adoc index 8915e761a5aa..0ba6694bd737 100644 --- a/modules/cluster-logging-must-gather-collecting.adoc +++ b/modules/cluster-logging-must-gather-collecting.adoc @@ -3,17 +3,17 @@ // * logging/troubleshooting/cluster-logging-must-gather.adoc [id="cluster-logging-must-gather-collecting_{context}"] -= Collecting cluster logging data += Collecting OpenShift Logging data -You can use the `oc adm must-gather` CLI command to collect information about your cluster logging environment. +You can use the `oc adm must-gather` CLI command to collect information about your OpenShift Logging environment. .Procedure -To collect cluster logging information with `must-gather`: +To collect OpenShift Logging information with `must-gather`: . Navigate to the directory where you want to store the `must-gather` information. -. Run the `oc adm must-gather` command against the cluster logging image: +. Run the `oc adm must-gather` command against the OpenShift Logging image: + ifndef::openshift-origin[] [source,terminal] diff --git a/modules/cluster-logging-systemd-scaling.adoc b/modules/cluster-logging-systemd-scaling.adoc index dc392285a981..36756995230d 100644 --- a/modules/cluster-logging-systemd-scaling.adoc +++ b/modules/cluster-logging-systemd-scaling.adoc @@ -3,14 +3,14 @@ // * logging/config/cluster-logging-systemd [id="cluster-logging-systemd-scaling_{context}"] -= Configuring systemd-journald for cluster logging += Configuring systemd-journald for OpenShift Logging As you scale up your project, the default logging environment might need some adjustments. For example, if you are missing logs, you might have to increase the rate limits for journald. You can adjust the number of messages to retain for a specified period of time to ensure that -cluster logging does not use excessive resources without dropping logs. +OpenShift Logging does not use excessive resources without dropping logs. You can also determine if you want the logs compressed, how long to retain logs, how or if the logs are stored, and other settings. diff --git a/modules/cluster-logging-uninstall.adoc b/modules/cluster-logging-uninstall.adoc index 88ac67f50680..021371971c81 100644 --- a/modules/cluster-logging-uninstall.adoc +++ b/modules/cluster-logging-uninstall.adoc @@ -3,17 +3,17 @@ // * logging/cluster-logging-uninstall.adoc [id="cluster-logging-uninstall_{context}"] -= Uninstalling cluster logging from {product-title} += Uninstalling OpenShift Logging from {product-title} -You can stop log aggregation by deleting the `ClusterLogging` custom resource (CR). However, after deleting the CR there are other cluster logging components that remain, which you can optionally remove. +You can stop log aggregation by deleting the `ClusterLogging` custom resource (CR). However, after deleting the CR there are other OpenShift Logging components that remain, which you can optionally remove. .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure -To remove cluster logging: +To remove OpenShift Logging: . Use the {product-title} web console to remove the `ClusterLogging` CR: @@ -43,7 +43,7 @@ To remove cluster logging: .. Click the Options menu {kebab} next to the Elasticsearch Operator and select *Uninstall Operator*. -. Optional: Remove the Cluster Logging and Elasticsearch projects. +. Optional: Remove the OpenShift Logging and Elasticsearch projects. .. Switch to the *Home* -> *Projects* page. @@ -60,7 +60,7 @@ Do not delete the `openshift-operators-redhat` project if other global operators .. Confirm the deletion by typing `openshift-operators-redhat` in the dialog box and click *Delete*. -. Optional: Remove any Cluster Logging persistent volume claims (PVC): +. Optional: Remove any OpenShift Logging persistent volume claims (PVC): .. Switch to the *Storage* -> *Persistent Volume Claims* page. diff --git a/modules/cluster-logging-updating-logging.adoc b/modules/cluster-logging-updating-logging.adoc index 6174295a4229..482a856a0978 100644 --- a/modules/cluster-logging-updating-logging.adoc +++ b/modules/cluster-logging-updating-logging.adoc @@ -3,9 +3,9 @@ // * logging/cluster-logging-upgrading.adoc [id="cluster-logging-updating-logging_{context}"] -= Updating cluster logging += Updating OpenShift Logging -After updating the {product-title} cluster, you can update cluster logging from 4.6 to 4.7 by changing the subscription for the Elasticsearch Operator and the Cluster Logging Operator. +After updating the {product-title} cluster, you can update OpenShift Logging from 4.6 to 4.7 by changing the subscription for the Elasticsearch Operator and the Cluster Logging Operator. When you update: @@ -18,14 +18,14 @@ If you update the Cluster Logging Operator before the Elasticsearch Operator, Ki [IMPORTANT] ==== -If your cluster logging version is prior to 4.5, you must upgrade cluster logging to 4.6 before updating to 4.7. +If your OpenShift Logging version is prior to 4.5, you must upgrade OpenShift Logging to 4.6 before updating to 4.7. ==== .Prerequisites * Update the {product-title} cluster from 4.6 to 4.7. -* Make sure the cluster logging status is healthy: +* Make sure the OpenShift Logging status is healthy: + ** All pods are `ready`. ** The Elasticsearch cluster is healthy. @@ -77,7 +77,7 @@ The Cluster Logging Operator is shown as 4.7. For example: + [source,terminal] ---- -Cluster Logging +OpenShift Logging 4.7.0-202007012112.p0 provided by Red Hat, Inc ---- diff --git a/modules/cluster-logging-viewing-logs-console.adoc b/modules/cluster-logging-viewing-logs-console.adoc index 9a6e4bcddf80..d09ce5ae39a6 100644 --- a/modules/cluster-logging-viewing-logs-console.adoc +++ b/modules/cluster-logging-viewing-logs-console.adoc @@ -9,7 +9,7 @@ You can view cluster logs in the {product-title} web console . .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-viewing-logs.adoc b/modules/cluster-logging-viewing-logs.adoc index 56461838a041..0186389257aa 100644 --- a/modules/cluster-logging-viewing-logs.adoc +++ b/modules/cluster-logging-viewing-logs.adoc @@ -9,7 +9,7 @@ You can view cluster logs in the CLI. .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. .Procedure diff --git a/modules/cluster-logging-visualizer-kibana.adoc b/modules/cluster-logging-visualizer-kibana.adoc index 48e3c484b905..4fdf2593d47f 100644 --- a/modules/cluster-logging-visualizer-kibana.adoc +++ b/modules/cluster-logging-visualizer-kibana.adoc @@ -9,7 +9,7 @@ You view cluster logs in the Kibana web console. The methods for viewing and vis .Prerequisites -* Cluster logging and Elasticsearch must be installed. +* OpenShift Logging and Elasticsearch must be installed. * Kibana index patterns must exist. diff --git a/modules/dedicated-cluster-install-deploy.adoc b/modules/dedicated-cluster-install-deploy.adoc index c64908de7ebc..bbb408007833 100644 --- a/modules/dedicated-cluster-install-deploy.adoc +++ b/modules/dedicated-cluster-install-deploy.adoc @@ -4,23 +4,23 @@ [id="dedicated-cluster-install-deploy"] -= Installing the Cluster Logging and Elasticsearch Operators += Installing OpenShift Logging and Elasticsearch Operators -You can use the {product-title} console to install cluster logging by deploying instances of -the Cluster Logging and Elasticsearch Operators. The Cluster Logging Operator +You can use the {product-title} console to install OpenShift Logging by deploying instances of +the OpenShift Logging and Elasticsearch Operators. The Cluster Logging Operator creates and manages the components of the logging stack. The Elasticsearch Operator -creates and manages the Elasticsearch cluster used by cluster logging. +creates and manages the Elasticsearch cluster used by OpenShift Logging. [NOTE] ==== -The {product-title} cluster logging solution requires that you install both the +The OpenShift Logging solution requires that you install both the Cluster Logging Operator and Elasticsearch Operator. When you deploy an instance of the Cluster Logging Operator, it also deploys an instance of the Elasticsearch Operator. ==== Your OpenShift Dedicated cluster includes 600 GiB of persistent storage that is -exclusively available for deploying Elasticsearch for cluster logging. +exclusively available for deploying Elasticsearch for OpenShift Logging. Elasticsearch is a memory-intensive application. Each Elasticsearch node needs 8G of memory for both memory requests and limits. Each Elasticsearch node can @@ -67,7 +67,7 @@ the *Status* column for any errors or failures. * Switch to the *Workloads* → *Pods* page and check the logs in each Pod in the `openshift-logging` project that is reporting issues. -. Create and deploy a cluster logging instance: +. Create and deploy a OpenShift Logging instance: .. Switch to the *Operators* → *Installed Operators* page. @@ -78,7 +78,7 @@ the *Status* column for any errors or failures. radio button and paste the following YAML definition into the window that displays. + -.Cluster Logging Custom Resource (CR) +.Cluster Logging custom resource (CR) [source,yaml] ---- apiVersion: "logging.openshift.io/v1" @@ -122,15 +122,15 @@ spec: ---- .. Click *Create* to deploy the logging instance, which creates the Cluster -Logging and Elasticsearch Custom Resources. +Logging and Elasticsearch custom resources. -. Verify that the pods for the Cluster Logging instance deployed: +. Verify that the pods for the OpenShift Logging instance deployed: .. Switch to the *Workloads* → *Pods* page. .. Select the *openshift-logging* project. + -You should see several pods for cluster logging, Elasticsearch, Fluentd, and Kibana similar to the following list: +You should see several pods for OpenShift Logging, Elasticsearch, Fluentd, and Kibana similar to the following list: + * cluster-logging-operator-cb795f8dc-xkckc * elasticsearch-cdm-b3nqzchd-1-5c6797-67kfz @@ -144,5 +144,5 @@ You should see several pods for cluster logging, Elasticsearch, Fluentd, and Kib * fluentd-zqgqx * kibana-7fb4fd4cc9-bvt4p -. Access the Cluster Logging interface, *Kibana*, from the *Monitoring* → +. Access the OpenShift Logging interface, *Kibana*, from the *Monitoring* → *Logging* page of the {product-title} web console. diff --git a/modules/gathering-data-specific-features.adoc b/modules/gathering-data-specific-features.adoc index 76b9a27776e9..45244fdf06a3 100644 --- a/modules/gathering-data-specific-features.adoc +++ b/modules/gathering-data-specific-features.adoc @@ -42,7 +42,7 @@ ifndef::openshift-origin[] |Data collection for Red Hat OpenShift Container Storage. |`registry.redhat.io/openshift4/ose-cluster-logging-operator` -|Data collection for Red Hat OpenShift cluster logging. +|Data collection for OpenShift Logging. |=== @@ -71,7 +71,7 @@ ifdef::openshift-origin[] |Data collection for OpenShift Container Storage. |`quay.io/openshift/origin-cluster-logging-operator` -|Data collection for Red Hat OpenShift cluster logging. +|Data collection for OpenShift Logging. |=== @@ -107,7 +107,7 @@ $ oc adm must-gather \ <1> The default {product-title} `must-gather` image <2> The must-gather image for {VirtProductName} + -You can use the `must-gather` tool with additional arguments to gather data that is specifically related to cluster logging and the Cluster Logging Operator in your cluster. For cluster logging, run the following command: +You can use the `must-gather` tool with additional arguments to gather data that is specifically related to OpenShift Logging and the Cluster Logging Operator in your cluster. For OpenShift Logging, run the following command: + [source,terminal] ---- @@ -115,7 +115,7 @@ $ oc adm must-gather --image=$(oc -n openshift-logging get deployment.apps/clust -o jsonpath='{.spec.template.spec.containers[?(@.name == "cluster-logging-operator")].image}') ---- + -.Example `must-gather` output for cluster logging +.Example `must-gather` output for OpenShift Logging [%collapsible] ==== [source,terminal] diff --git a/modules/infrastructure-moving-logging.adoc b/modules/infrastructure-moving-logging.adoc index 507c3cc6b540..d50cb8a8464a 100644 --- a/modules/infrastructure-moving-logging.adoc +++ b/modules/infrastructure-moving-logging.adoc @@ -4,9 +4,9 @@ // * logging/cluster-logging-moving.adoc [id="infrastructure-moving-logging_{context}"] -= Moving the cluster logging resources += Moving OpenShift Logging resources -You can configure the Cluster Logging Operator to deploy the pods for any or all of the Cluster Logging components, Elasticsearch, Kibana, and Curator to different nodes. You cannot move the Cluster Logging Operator pod from its installed location. +You can configure the Cluster Logging Operator to deploy the pods for any or all of the OpenShift Logging components, Elasticsearch, Kibana, and Curator to different nodes. You cannot move the Cluster Logging Operator pod from its installed location. For example, you can move the Elasticsearch pods to a separate node because of high CPU, memory, and disk requirements. @@ -17,7 +17,7 @@ You should set your machine set to use at least 6 replicas. .Prerequisites -* Cluster logging and Elasticsearch must be installed. These features are not installed by default. +* OpenShift Logging and Elasticsearch must be installed. These features are not installed by default. .Procedure diff --git a/modules/jaeger-install-elasticsearch.adoc b/modules/jaeger-install-elasticsearch.adoc index 2485e1def307..0943c4734567 100644 --- a/modules/jaeger-install-elasticsearch.adoc +++ b/modules/jaeger-install-elasticsearch.adoc @@ -20,7 +20,7 @@ Do not install Community versions of the Operators. Community Operators are not [NOTE] ==== -If you have already installed the Elasticsearch Operator as part of OpenShift cluster logging, you do not need to install the Elasticsearch Operator again. The Jaeger Operator will create the Elasticsearch instance using the installed Elasticsearch Operator. +If you have already installed the Elasticsearch Operator as part of OpenShift Logging, you do not need to install the Elasticsearch Operator again. The Jaeger Operator will create the Elasticsearch instance using the installed Elasticsearch Operator. ==== .Procedure diff --git a/modules/nodes-cluster-overcommit-buffer-chunk.adoc b/modules/nodes-cluster-overcommit-buffer-chunk.adoc index 1f5e12261c6f..dbec0821a566 100644 --- a/modules/nodes-cluster-overcommit-buffer-chunk.adoc +++ b/modules/nodes-cluster-overcommit-buffer-chunk.adoc @@ -15,7 +15,7 @@ Fluentd file buffering stores records in _chunks_. Chunks are stored in _buffers [NOTE] ==== To modify the `FILE_BUFFER_LIMIT` or `BUFFER_SIZE_LIMIT` parameters -in the Fluentd daemonset as described below, you must set cluster logging to the unmanaged state. +in the Fluentd daemonset as described below, you must set OpenShift Logging to the unmanaged state. Operators in an unmanaged state are unsupported and the cluster administrator assumes full control of the individual component configurations and upgrades. ==== diff --git a/modules/nodes-pods-priority-about.adoc b/modules/nodes-pods-priority-about.adoc index 1184ee11af94..049a3bd334e2 100644 --- a/modules/nodes-pods-priority-about.adoc +++ b/modules/nodes-pods-priority-about.adoc @@ -50,7 +50,7 @@ A number of critical components include the `system-cluster-critical` priority c ==== If you upgrade your existing cluster, the priority of your existing pods is effectively zero. However, existing pods with the `scheduler.alpha.kubernetes.io/critical-pod` annotation are automatically converted to `system-cluster-critical` class. -Fluentd cluster logging pods with the annotation are converted to the `cluster-logging` priority class. +Fluentd OpenShift Logging pods with the annotation are converted to the `cluster-logging` priority class. ==== [id="admin-guide-priority-preemption-names_{context}"] diff --git a/modules/nw-configure-ingress-access-logging.adoc b/modules/nw-configure-ingress-access-logging.adoc index 26aacbdef324..95b41b8ff36d 100644 --- a/modules/nw-configure-ingress-access-logging.adoc +++ b/modules/nw-configure-ingress-access-logging.adoc @@ -9,7 +9,7 @@ You can configure the Ingress Controller to enable access logs. If you have clus Container logging is useful to enable access logs on low-traffic clusters when there is no existing Syslog logging infrastructure, or for short-term use while diagnosing problems with the Ingress Controller. -Syslog is needed for high-traffic clusters where access logs could exceed the cluster logging stack's capacity, or for environments where any logging solution needs to integrate with an existing Syslog logging infrastructure. The Syslog use-cases can overlap. +Syslog is needed for high-traffic clusters where access logs could exceed the OpenShift Logging stack's capacity, or for environments where any logging solution needs to integrate with an existing Syslog logging infrastructure. The Syslog use-cases can overlap. .Prerequisites diff --git a/modules/security-monitoring-cluster-logging.adoc b/modules/security-monitoring-cluster-logging.adoc index c0a636333c60..4f97b8ca636e 100644 --- a/modules/security-monitoring-cluster-logging.adoc +++ b/modules/security-monitoring-cluster-logging.adoc @@ -14,5 +14,5 @@ access to logs: To save your logs for further audit and analysis, you can enable the `cluster-logging` add-on feature to collect, manage, and view system, container, and audit logs. -You can deploy, manage, and upgrade cluster logging through the Elasticsearch Operator +You can deploy, manage, and upgrade OpenShift Logging through the Elasticsearch Operator and Cluster Logging Operator. diff --git a/modules/serverless-using-cluster-logging-find-logs-knative-serving-components.adoc b/modules/serverless-using-cluster-logging-find-logs-knative-serving-components.adoc index 1224332dbfc7..ddb671b9d515 100644 --- a/modules/serverless-using-cluster-logging-find-logs-knative-serving-components.adoc +++ b/modules/serverless-using-cluster-logging-find-logs-knative-serving-components.adoc @@ -3,7 +3,7 @@ // serverless/cluster-logging-serverless.adoc [id="using-cluster-logging-to-find-logs-for-knative-serving-components_{context}"] -= Using cluster logging to find logs for Knative Serving components += Using OpenShift Logging to find logs for Knative Serving components .Procedure @@ -19,5 +19,5 @@ $ oc -n openshift-logging get route kibana [NOTE] ==== -Knative Serving uses structured logging by default. You can enable the parsing of these logs by customizing the cluster logging Fluentd settings. This makes the logs more searchable and enables filtering on the log level to quickly identify issues. +Knative Serving uses structured logging by default. You can enable the parsing of these logs by customizing the OpenShift Logging Fluentd settings. This makes the logs more searchable and enables filtering on the log level to quickly identify issues. ==== diff --git a/modules/serverless-using-cluster-logging-find-logs-services-deployed.adoc b/modules/serverless-using-cluster-logging-find-logs-services-deployed.adoc index 2dc172ffa403..f85a0922d625 100644 --- a/modules/serverless-using-cluster-logging-find-logs-services-deployed.adoc +++ b/modules/serverless-using-cluster-logging-find-logs-services-deployed.adoc @@ -2,9 +2,9 @@ // // serverless/cluster-logging-serverless.adoc [id="using-cluster-logging-to-find-logs-for-services-deployed-with-knative-serving_{context}"] -= Using cluster logging to find logs for services deployed with Knative Serving += Using OpenShift Logging to find logs for services deployed with Knative Serving -With OpenShift Cluster Logging, the logs that your applications write to the console are collected in Elasticsearch. The following procedure outlines how to apply these capabilities to applications deployed by using Knative Serving. +With OpenShift Logging, the logs that your applications write to the console are collected in Elasticsearch. The following procedure outlines how to apply these capabilities to applications deployed by using Knative Serving. .Procedure . Get the Kibana route: diff --git a/security/certificate_types_descriptions/monitoring-and-cluster-logging-operator-component-certificates.adoc b/security/certificate_types_descriptions/monitoring-and-cluster-logging-operator-component-certificates.adoc index 29e99a08e071..f5ca461b3cf4 100644 --- a/security/certificate_types_descriptions/monitoring-and-cluster-logging-operator-component-certificates.adoc +++ b/security/certificate_types_descriptions/monitoring-and-cluster-logging-operator-component-certificates.adoc @@ -1,5 +1,5 @@ [id="cert-types-monitoring-and-cluster-logging-operator-component-certificates"] -= Monitoring and cluster logging Operator component certificates += Monitoring and Cluster Logging Operator component certificates include::modules/common-attributes.adoc[] :context: cert-types-monitoring-and-cluster-logging-operator-component-certificates diff --git a/security/container_security/security-monitoring.adoc b/security/container_security/security-monitoring.adoc index 6a0154b6802f..85b2c7cb2721 100644 --- a/security/container_security/security-monitoring.adoc +++ b/security/container_security/security-monitoring.adoc @@ -23,5 +23,5 @@ include::modules/security-monitoring-audit-logging.adoc[leveloffset=+1] .Additional resources * xref:../../nodes/clusters/nodes-containers-events.adoc#nodes-containers-events[List of system events] -* xref:../../logging/cluster-logging.adoc#cluster-logging[Understanding cluster logging] +* xref:../../logging/cluster-logging.adoc#cluster-logging[Understanding OpenShift Logging] * xref:../../security/audit-log-view.adoc#audit-log-view[Viewing audit logs] diff --git a/serverless/knative_serving/cluster-logging-serverless.adoc b/serverless/knative_serving/cluster-logging-serverless.adoc index 8f3c5420781f..55aa624461ff 100644 --- a/serverless/knative_serving/cluster-logging-serverless.adoc +++ b/serverless/knative_serving/cluster-logging-serverless.adoc @@ -1,5 +1,5 @@ [id="cluster-logging-serverless"] -= Using cluster logging += Using OpenShift Logging include::modules/common-attributes.adoc[] include::modules/serverless-document-attributes.adoc[] :context: cluster-logging-serverless diff --git a/virt/logging_events_monitoring/virt-openshift-cluster-monitoring.adoc b/virt/logging_events_monitoring/virt-openshift-cluster-monitoring.adoc index 944be2779fd1..4673262870b9 100644 --- a/virt/logging_events_monitoring/virt-openshift-cluster-monitoring.adoc +++ b/virt/logging_events_monitoring/virt-openshift-cluster-monitoring.adoc @@ -9,10 +9,10 @@ toc::[] // Cluster monitoring include::modules/monitoring-about-cluster-monitoring.adoc[leveloffset=+1] -// Cluster logging +// OpenShift Logging include::modules/cluster-logging-about-components.adoc[leveloffset=+1] -For more information on cluster logging, see the xref:../../logging/cluster-logging.adoc#cluster-logging[{product-title} cluster logging] documentation. +For more information on OpenShift Logging, see the xref:../../logging/cluster-logging.adoc#cluster-logging[OpenShift Logging] documentation. // Telemetry include::modules/telemetry-about-telemetry.adoc[leveloffset=+1] diff --git a/welcome/index.adoc b/welcome/index.adoc index 64627d45d980..efaa3eb90c96 100644 --- a/welcome/index.adoc +++ b/welcome/index.adoc @@ -233,7 +233,7 @@ xref:../updating/updating-disconnected-cluster.adoc#updating-disconnected-cluste === Monitor the cluster -- **xref:../logging/cluster-logging.adoc#cluster-logging[Work with cluster logging]**: Learn about cluster logging and configure different cluster logging types, such as Elasticsearch, Fluentd, Kibana, and Curator. +- **xref:../logging/cluster-logging.adoc#cluster-logging[Work with OpenShift Logging]**: Learn about OpenShift Logging and configure different OpenShift Logging types, such as Elasticsearch, Fluentd, Kibana, and Curator. - **xref:../monitoring/understanding-the-monitoring-stack.adoc#understanding-the-monitoring-stack[Monitor clusters]**: Learn to xref:../monitoring/configuring-the-monitoring-stack.adoc#configuring-the-monitoring-stack[configure the monitoring stack]. @@ -274,9 +274,9 @@ endif::[] //// - **xref:../applications/pruning-objects.adoc#pruning-objects[Prune and reclaim resources]**: You can reclaim spaceby pruning unneeded Operators, groups, deployments, builds, images, registries, and cron jobs. -- **xref:../logging/cluster-logging.adoc#cluster-logging[Work with cluster logging]**: +- **xref:../logging/cluster-logging.adoc#cluster-logging[Work with OpenShift Logging]**: Learn about how to deploy and use a cluster to aggregate logs for a range of {product-title} -services. Cluster logging collects, stores, and visualizes logging data from hosts and +services. OpenShift Logging collects, stores, and visualizes logging data from hosts and applications, whether coming from multiple containers or even deleted pods. - **xref:../monitoring/understanding-the-monitoring-stack.adoc#understanding-the-monitoring-stack[Monitor clusters]**: Learn to