diff --git a/_attributes/attributes-openshift-dedicated.adoc b/_attributes/attributes-openshift-dedicated.adoc index f438ad295b1f..22994ab42eb8 100644 --- a/_attributes/attributes-openshift-dedicated.adoc +++ b/_attributes/attributes-openshift-dedicated.adoc @@ -20,8 +20,17 @@ :rhq-short: Red Hat Quay :SMProductName: Red Hat OpenShift Service Mesh :pipelines-title: Red Hat OpenShift Pipelines +//logging +:logging-title: logging subsystem for Red Hat OpenShift +:logging-title-uc: Logging subsystem for Red Hat OpenShift +:logging: logging subsystem +:logging-uc: Logging subsystem +:clo: Red Hat OpenShift Logging Operator +:loki-op: Loki Operator +:es-op: Elasticsearch Operator :logging-sd: Red Hat OpenShift Logging :log-plug: logging subsystem Console Plugin +// :ServerlessProductName: OpenShift Serverless :rh-openstack-first: Red Hat OpenStack Platform (RHOSP) :rh-openstack: RHOSP diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index 15a6aa441fd0..e570490d94df 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -2543,8 +2543,6 @@ Topics: Dir: config Distros: openshift-enterprise,openshift-origin Topics: - - Name: About the Cluster Logging custom resource - File: cluster-logging-configuring-cr - Name: Configuring the log store File: cluster-logging-log-store - Name: Configuring CPU and memory limits for Logging components diff --git a/_topic_maps/_topic_map_osd.yml b/_topic_maps/_topic_map_osd.yml index f65cc0456ced..0662d0b4d90a 100644 --- a/_topic_maps/_topic_map_osd.yml +++ b/_topic_maps/_topic_map_osd.yml @@ -1055,8 +1055,6 @@ Topics: - Name: Configuring your Logging deployment Dir: config Topics: - - Name: About the Cluster Logging custom resource - File: cluster-logging-configuring-cr - Name: Configuring the log store File: cluster-logging-log-store - Name: Configuring CPU and memory limits for Logging components diff --git a/_topic_maps/_topic_map_rosa.yml b/_topic_maps/_topic_map_rosa.yml index bfeda8457d83..496b7b85c1cd 100644 --- a/_topic_maps/_topic_map_rosa.yml +++ b/_topic_maps/_topic_map_rosa.yml @@ -88,7 +88,7 @@ Topics: # File: rosa-mobb-prerequisites-tutorial - Name: Verifying Permissions for a ROSA STS Deployment File: rosa-mobb-verify-permissions-sts-deployment -- Name: Configuring the Cluster Log Forwarder for CloudWatch logs and STS +- Name: Configuring log forwarding for CloudWatch logs and STS File: cloud-experts-rosa-cloudwatch-sts - Name: Using AWS WAF and Amazon CloudFront to protect ROSA workloads File: cloud-experts-using-cloudfront-and-waf @@ -351,7 +351,7 @@ Topics: Topics: - Name: Installing the web terminal File: installing-web-terminal -# Do not have sufficient permissions to read any cluster configuration. +# Do not have sufficient permissions to read any cluster configuration. # - Name: Configuring the web terminal # File: configuring-web-terminal - Name: Using the web terminal @@ -1239,8 +1239,6 @@ Topics: - Name: Configuring your Logging deployment Dir: config Topics: - - Name: About the Cluster Logging custom resource - File: cluster-logging-configuring-cr - Name: Configuring the log store File: cluster-logging-log-store - Name: Configuring CPU and memory limits for Logging components diff --git a/cloud_experts_tutorials/cloud-experts-rosa-cloudwatch-sts.adoc b/cloud_experts_tutorials/cloud-experts-rosa-cloudwatch-sts.adoc index 960075c27bac..aa745cf97ca8 100644 --- a/cloud_experts_tutorials/cloud-experts-rosa-cloudwatch-sts.adoc +++ b/cloud_experts_tutorials/cloud-experts-rosa-cloudwatch-sts.adoc @@ -1,6 +1,6 @@ :_mod-docs-content-type: ASSEMBLY [id="cloud-experts-rosa-cloudwatch-sts"] -= Tutorial: Configuring the Cluster Log Forwarder for CloudWatch logs and STS += Configuring log forwarding for CloudWatch logs and STS include::_attributes/attributes-openshift-dedicated.adoc[] :context: cloud-experts-rosa-cloudwatch-sts @@ -18,7 +18,7 @@ toc::[] // - Connor Wooley // --- -Use this tutorial to deploy the Cluster Log Forwarder Operator and configure it to use Security Token Services (STS) authentication to forward logs to CloudWatch. +Use this tutorial to deploy the {clo} and configure it to use Security Token Services (STS) authentication to forward logs to CloudWatch. [id="cloud-experts-rosa-cloudwatch-sts-prerequisites"] .Prerequisites @@ -58,7 +58,7 @@ $ echo "Cluster: ${ROSA_CLUSTER_NAME}, Region: ${REGION}, OIDC Endpoint: ${OIDC_ [id="cloud-experts-rosa-cloudwatch-sts-prep-aws"] == Preparing your AWS account -. Create an Identity Access Management (IAM) policy for OpenShift Log Forwarding: +. Create an Identity Access Management (IAM) policy for the {logging}: + [source,terminal] ---- @@ -127,7 +127,7 @@ $ aws iam attach-role-policy --role-name "${ROSA_CLUSTER_NAME}-RosaCloudWatch" \ [id="cloud-experts-rosa-cloudwatch-sts-deploy-Os"] == Deploying Operators -. Deploy the Cluster Logging Operator: +. Deploy the {clo}: + [source,terminal] ---- @@ -166,7 +166,7 @@ EOF [id="cloud-experts-rosa-cloudwatch-sts-configure-cluster-logging"] == Configuring cluster logging -. Create a cluster-log forwarding resource: +. Create a `ClusterLogForwarder` custom resource (CR): + [source,terminal] ---- @@ -197,7 +197,7 @@ $ cat << EOF | oc apply -f - EOF ---- -. Create a cluster logging resource: +. Create a `ClusterLogging` CR: + [source,terminal] ---- @@ -258,14 +258,14 @@ If this is a new cluster, you might not see a log group for `application` logs a [id="cloud-experts-rosa-cloudwatch-sts-clean-up"] == Cleaning up your resources -. Delete the cluster-log forwarding resource: +. Delete the `ClusterLogForwarder` CR: + [source,terminal] ---- $ oc delete -n openshift-logging clusterlogforwarder instance ---- -. Delete the cluster logging resource: +. Delete the `ClusterLogging` CR: + [source,terminal] ---- @@ -305,4 +305,4 @@ $ aws iam delete-policy --policy-arn "${POLICY_ARN}" ---- $ aws logs delete-log-group --log-group-name "rosa-${ROSA_CLUSTER_NAME}.audit" $ aws logs delete-log-group --log-group-name "rosa-${ROSA_CLUSTER_NAME}.infrastructure" ----- \ No newline at end of file +---- diff --git a/logging/cluster-logging-deploying.adoc b/logging/cluster-logging-deploying.adoc index 76aca1e6b2ab..e857ca610c78 100644 --- a/logging/cluster-logging-deploying.adoc +++ b/logging/cluster-logging-deploying.adoc @@ -43,8 +43,12 @@ include::modules/logging-install-es-operator.adoc[leveloffset=+2] [id="cluster-logging-deploying-postinstallation"] == Postinstallation tasks -If your network plugin enforces network isolation, xref:#cluster-logging-deploy-multitenant_cluster-logging-deploying[allow network traffic between the projects that contain the {logging} Operators]. +After you have installed the {clo}, you can configure your deployment by creating and modifying a `ClusterLogging` custom resource (CR). +include::modules/cluster-logging-about-crd.adoc[leveloffset=+2] +include::modules/configuring-log-storage-cr.adoc[leveloffset=+2] +include::modules/configuring-logging-collector.adoc[leveloffset=+2] +include::modules/configuring-log-visualizer.adoc[leveloffset=+2] include::modules/cluster-logging-deploy-multitenant.adoc[leveloffset=+2] [role="_additional-resources"] diff --git a/logging/cluster-logging-loki.adoc b/logging/cluster-logging-loki.adoc index 810f2d46c4e3..32817645547c 100644 --- a/logging/cluster-logging-loki.adoc +++ b/logging/cluster-logging-loki.adoc @@ -26,6 +26,8 @@ include::modules/logging-loki-cli-install.adoc[leveloffset=+1] include::modules/logging-clo-cli-install.adoc[leveloffset=+1] +include::modules/configuring-log-storage-cr.adoc[leveloffset=+1] + include::modules/logging-loki-storage.adoc[leveloffset=+1] include::modules/logging-loki-storage-aws.adoc[leveloffset=+2] @@ -79,10 +81,7 @@ xref:../authentication/using-rbac.adoc[Using RBAC to define and apply permission endif::[] include::modules/logging-loki-retention.adoc[leveloffset=+1] - -include::modules/cluster-logging-forwarding-lokistack.adoc[leveloffset=+1] - -include::modules/loki-rate-limit-errors.adoc[leveloffset=+2] +include::modules/loki-rate-limit-errors.adoc[leveloffset=+1] [role="_additional-resources"] [id="additional-resources_cluster-logging-loki"] diff --git a/logging/config/cluster-logging-configuring-cr.adoc b/logging/config/cluster-logging-configuring-cr.adoc deleted file mode 100644 index 235544734992..000000000000 --- a/logging/config/cluster-logging-configuring-cr.adoc +++ /dev/null @@ -1,42 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-configuring-cr -[id="cluster-logging-configuring-cr"] -= About the Cluster Logging custom resource -include::_attributes/common-attributes.adoc[] - -toc::[] - -To configure {logging-title} you customize the `ClusterLogging` custom resource (CR). - -include::modules/cluster-logging-about-crd.adoc[leveloffset=+1] - -//// -// collecting this information here for a future PR - -If you want to specify collector resources or scheduling, you must create a `ClusterLogging` CR: - -.ClusterLogging resource example -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: audit-collector <1> - namespace: openshift-kube-apiserver <2> -spec: - collection: - type: "vector" <3> - resources: - limits: - memory: 2G -# ... ----- -<1> The name of the `ClusterLogging` CR must be the same as the `ClusterLogForwarder` CR. -<2> The namespace of the `ClusterLogging` CR must be the same as the `ClusterLogForwarder` CR. -<3> The collector type that you want to use. This example uses the Vector collector. - -[NOTE] -==== -The relevant `spec` fields for this CR in multiple log forwarder mode are the `managmentState` and `collection` fields. All other `spec` fields are ignored. -==== -//// diff --git a/logging/log_collection_forwarding/cluster-logging-collector.adoc b/logging/log_collection_forwarding/cluster-logging-collector.adoc index 01fe06bf7edd..890d09aa3174 100644 --- a/logging/log_collection_forwarding/cluster-logging-collector.adoc +++ b/logging/log_collection_forwarding/cluster-logging-collector.adoc @@ -11,6 +11,8 @@ toc::[] You can configure the CPU and memory limits for the log collector and xref:../../logging/config/cluster-logging-moving-nodes.adoc#cluster-logging-moving[move the log collector pods to specific nodes]. All supported modifications to the log collector can be performed though the `spec.collection.log.fluentd` stanza in the `ClusterLogging` custom resource (CR). +include::modules/configuring-logging-collector.adoc[leveloffset=+1] + include::modules/cluster-logging-collector-pod-location.adoc[leveloffset=+1] include::modules/cluster-logging-collector-limits.adoc[leveloffset=+1] diff --git a/logging/log_visualization/log-visualization.adoc b/logging/log_visualization/log-visualization.adoc index e6a2dfaf5454..22513bbbe56c 100644 --- a/logging/log_visualization/log-visualization.adoc +++ b/logging/log_visualization/log-visualization.adoc @@ -11,6 +11,8 @@ You can visualize your log data in the {product-title} web console, or the Kiban include::snippets/logging-kibana-dep-snip.adoc[] +include::modules/configuring-log-visualizer.adoc[leveloffset=+1] + [id="log-visualization-resource-logs"] == Viewing logs for a resource diff --git a/modules/cluster-logging-about-crd.adoc b/modules/cluster-logging-about-crd.adoc index a889ab364165..e05b661b4474 100644 --- a/modules/cluster-logging-about-crd.adoc +++ b/modules/cluster-logging-about-crd.adoc @@ -3,18 +3,12 @@ // * logging/cluster-logging.adoc :_mod-docs-content-type: CONCEPT -[id="cluster-logging-configuring-crd_{context}"] +[id="cluster-logging-about-crd_{context}"] = About the ClusterLogging custom resource To make changes to your {logging} environment, create and modify the `ClusterLogging` custom resource (CR). -Instructions for creating or modifying a CR are provided in this documentation as appropriate. - -The following example shows a typical custom resource for the {logging}. - -[id="efk-logging-configuring-about-sample_{context}"] .Sample `ClusterLogging` custom resource (CR) -ifdef::openshift-enterprise,openshift-rosa,openshift-dedicated,openshift-webscale,openshift-origin[] [source,yaml] ---- apiVersion: "logging.openshift.io/v1" @@ -24,52 +18,8 @@ metadata: namespace: "openshift-logging" <2> spec: managementState: "Managed" <3> - logStore: - type: "elasticsearch" <4> - retentionPolicy: - application: - maxAge: 1d - infra: - maxAge: 7d - audit: - maxAge: 7d - elasticsearch: - nodeCount: 3 - resources: - limits: - memory: 16Gi - requests: - cpu: 500m - memory: 16Gi - storage: - storageClassName: "gp2" - size: "200G" - redundancyPolicy: "SingleRedundancy" - visualization: <5> - type: "kibana" - kibana: - resources: - limits: - memory: 736Mi - requests: - cpu: 100m - memory: 736Mi - replicas: 1 - collection: <6> - logs: - type: "fluentd" - fluentd: - resources: - limits: - memory: 736Mi - requests: - cpu: 100m - memory: 736Mi +# ... ---- <1> The CR name must be `instance`. <2> The CR must be installed to the `openshift-logging` namespace. <3> The Red Hat OpenShift Logging Operator management state. When set to `unmanaged` the operator is in an unsupported state and will not get updates. -<4> Settings for the log store, including retention policy, the number of nodes, the resource requests and limits, and the storage class. -<5> Settings for the visualizer, including the resource requests and limits, and the number of pod replicas. -<6> Settings for the log collector, including the resource requests and limits. -endif::[] diff --git a/modules/cluster-logging-collector-log-forward-syslog.adoc b/modules/cluster-logging-collector-log-forward-syslog.adoc index dda4261cc816..c21346614b11 100644 --- a/modules/cluster-logging-collector-log-forward-syslog.adoc +++ b/modules/cluster-logging-collector-log-forward-syslog.adoc @@ -42,7 +42,7 @@ spec: procID: myproc rfc: RFC5424 severity: debug - url: 'udp://rsyslogserver.west.example.com:514' + url: 'tcp://rsyslogserver.west.example.com:514' pipelines: - name: syslog-east <8> inputRefs: <9> diff --git a/modules/cluster-logging-forwarding-lokistack.adoc b/modules/cluster-logging-forwarding-lokistack.adoc deleted file mode 100644 index dc03bcfc22f2..000000000000 --- a/modules/cluster-logging-forwarding-lokistack.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module is included in the following assemblies: -//cluster-logging-loki.adoc -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-forwarding-lokistack_{context}"] -= Forwarding logs to LokiStack - -To configure log forwarding to the LokiStack gateway, you must create a `ClusterLogging` custom resource (CR). - -.Prerequisites - -* The {logging-title-uc} version 5.5 or newer is installed on your cluster. -* The Loki Operator is installed on your cluster. - -.Procedure - -* Create a `ClusterLogging` custom resource (CR): -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - name: instance - namespace: openshift-logging -spec: - managementState: Managed - logStore: - type: lokistack - lokistack: - name: logging-loki - collection: - type: vector ----- diff --git a/modules/configuring-log-storage-cr.adoc b/modules/configuring-log-storage-cr.adoc new file mode 100644 index 000000000000..c02f6b2edd8b --- /dev/null +++ b/modules/configuring-log-storage-cr.adoc @@ -0,0 +1,55 @@ +// Module included in the following assemblies: +// +// * logging/cluster-logging-deploying.adoc +// * logging/cluster-logging-loki.adoc + +:_mod-docs-content-type: PROCEDURE +[id="configuring-log-storage-cr_{context}"] += Configuring log storage + +You can configure which log storage type your {logging} uses by modifying the `ClusterLogging` custom resource (CR). + +.Prerequisites + +* You have administrator permissions. +* You have installed the {oc-first}. +* You have installed the {clo} and an internal log store that is either the LokiStack or Elasticsearch. +* You have created a `ClusterLogging` CR. + +include::snippets/logging-elastic-dep-snip.adoc[] + +.Procedure + +. Modify the `ClusterLogging` CR `logStore` spec: ++ +.`ClusterLogging` CR example +[source,yaml] +---- +apiVersion: logging.openshift.io/v1 +kind: ClusterLogging +metadata: +# ... +spec: +# ... + logStore: + type: <1> + elasticsearch: <2> + nodeCount: + resources: {} + storage: {} + redundancyPolicy: <3> + lokistack: <4> + name: {} +# ... +---- +<1> Specify the log store type. This can be either `lokistack` or `elasticsearch`. +<2> Optional configuration options for the Elasticsearch log store. +<3> Specify the redundancy type. This value can be `ZeroRedundancy`, `SingleRedundancy`, `MultipleRedundancy`, or `FullRedundancy`. +<4> Optional configuration options for LokiStack. + +. Apply the `ClusterLogging` CR by running the following command: ++ +[source,terminal] +---- +$ oc apply -f .yaml +---- diff --git a/modules/configuring-log-visualizer.adoc b/modules/configuring-log-visualizer.adoc new file mode 100644 index 000000000000..797943e3e4de --- /dev/null +++ b/modules/configuring-log-visualizer.adoc @@ -0,0 +1,59 @@ +// Module included in the following assemblies: +// +// * logging/log_visualization/log-visualization.adoc +// * logging/cluster-logging-deploying.adoc + +:_mod-docs-content-type: PROCEDURE +[id="configuring-log-visualizer_{context}"] += Configuring the log visualizer + +You can configure which log visualizer type your {logging} uses by modifying the `ClusterLogging` custom resource (CR). + +.Prerequisites + +* You have administrator permissions. +* You have installed the {oc-first}. +* You have installed the {clo}. +* You have created a `ClusterLogging` CR. + +[IMPORTANT] +==== +If you want to use the {product-title} web console for visualization, you must enable the {log-plug}. See the documentation about "Log visualization with the web console". +==== + +.Procedure + +. Modify the `ClusterLogging` CR `visualization` spec: ++ +.`ClusterLogging` CR example +[source,yaml] +---- +apiVersion: logging.openshift.io/v1 +kind: ClusterLogging +metadata: +# ... +spec: +# ... + visualization: + type: <1> + kibana: <2> + resources: {} + nodeSelector: {} + proxy: {} + replicas: {} + tolerations: {} + ocpConsole: <3> + logsLimit: {} + timeout: {} +# ... +---- +<1> The type of visualizer you want to use for your {logging}. This can be either `kibana` or `ocp-console`. The Kibana console is only compatible with deployments that use Elasticsearch log storage, while the {product-title} console is only compatible with LokiStack deployments. +<2> Optional configurations for the Kibana console. +<3> Optional configurations for the {product-title} web console. + +. Apply the `ClusterLogging` CR by running the following command: ++ +[source,terminal] +---- +$ oc apply -f .yaml +---- diff --git a/modules/configuring-logging-collector.adoc b/modules/configuring-logging-collector.adoc new file mode 100644 index 000000000000..e0666009014f --- /dev/null +++ b/modules/configuring-logging-collector.adoc @@ -0,0 +1,47 @@ +// Module included in the following assemblies: +// +// * logging/cluster-logging-deploying.adoc +// * logging/log_collection_forwarding/cluster-logging-collector.adoc + +:_mod-docs-content-type: PROCEDURE +[id="configuring-logging-collector_{context}"] += Configuring the log collector + +You can configure which log collector type your {logging} uses by modifying the `ClusterLogging` custom resource (CR). + +include::snippets/logging-fluentd-dep-snip.adoc[] + +.Prerequisites + +* You have administrator permissions. +* You have installed the {oc-first}. +* You have installed the {clo}. +* You have created a `ClusterLogging` CR. + +.Procedure + +. Modify the `ClusterLogging` CR `collection` spec: ++ +.`ClusterLogging` CR example +[source,yaml] +---- +apiVersion: logging.openshift.io/v1 +kind: ClusterLogging +metadata: +# ... +spec: +# ... + collection: + type: <1> + resources: {} + tolerations: {} +# ... +---- +<1> The log collector type you want to use for the {logging}. This can be `vector` or `fluentd`. + +. Apply the `ClusterLogging` CR by running the following command: ++ +[source,terminal] +---- +$ oc apply -f .yaml +---- diff --git a/snippets/logging-elastic-dep-snip.adoc b/snippets/logging-elastic-dep-snip.adoc index 86dac1dcf7f1..cadfc6fbb2aa 100644 --- a/snippets/logging-elastic-dep-snip.adoc +++ b/snippets/logging-elastic-dep-snip.adoc @@ -4,10 +4,11 @@ // // Text snippet included in the following modules: // -// +// * configuring-log-storage-cr.adoc + :_mod-docs-content-type: SNIPPET [NOTE] ==== -As of logging version 5.4.3 the OpenShift Elasticsearch Operator is deprecated and is planned to be removed in a future release. Red Hat will provide bug fixes and support for this feature during the current release lifecycle, but this feature will no longer receive enhancements and will be removed. As an alternative to using the OpenShift Elasticsearch Operator to manage the default log storage, you can use the Loki Operator. +The {es-op} is deprecated and is planned to be removed in a future release. Red{nbsp}Hat provides bug fixes and support for this feature during the current release lifecycle, but this feature no longer receives enhancements. As an alternative to using the {es-op} to manage the default log storage, you can use the {loki-op}. ==== diff --git a/snippets/logging-fluentd-dep-snip.adoc b/snippets/logging-fluentd-dep-snip.adoc index f605ddf482ef..5697c1338a91 100644 --- a/snippets/logging-fluentd-dep-snip.adoc +++ b/snippets/logging-fluentd-dep-snip.adoc @@ -4,10 +4,11 @@ // // Text snippet included in the following modules: // -// +// * configuring-logging-collector.adoc + :_mod-docs-content-type: SNIPPET [NOTE] ==== -As of logging version 5.6 Fluentd is deprecated and is planned to be removed in a future release. Red Hat will provide bug fixes and support for this feature during the current release lifecycle, but this feature will no longer receive enhancements and will be removed. As an alternative to Fluentd, you can use Vector instead. +Fluentd is deprecated and is planned to be removed in a future release. Red Hat provides bug fixes and support for this feature during the current release lifecycle, but this feature no longer receives enhancements. As an alternative to Fluentd, you can use Vector instead. ====