diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index cb1778e0d304..faeb12cfeb16 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -3037,156 +3037,8 @@ Topics: Dir: logging Distros: openshift-enterprise,openshift-origin Topics: -# - Name: Release notes -# Dir: logging_release_notes -# Topics: -# - Name: Logging 5.9 -# File: logging-5-9-release-notes -# - Name: Logging 5.8 -# File: logging-5-8-release-notes -# - Name: Logging 5.7 -# File: logging-5-7-release-notes - - Name: Logging 6.2 - Dir: logging-6.2 - Topics: - - Name: Support - File: log62-cluster-logging-support - - Name: Release notes - File: log6x-release-notes-6.2 - - Name: About logging 6.2 - File: log6x-about-6.2 - - Name: Configuring log forwarding - File: log6x-clf-6.2 - - Name: Configuring the logging collector - File: 6x-cluster-logging-collector-6.2 - - Name: Configuring LokiStack storage - File: log6x-loki-6.2 - - Name: Configuring LokiStack for OTLP - File: log6x-configuring-lokistack-otlp-6.2 - - Name: Visualization for logging - File: log6x-visual-6.2 - - Name: Logging 6.1 - Dir: logging-6.1 - Topics: - - Name: Support - File: log61-cluster-logging-support - - Name: Release notes - File: log6x-release-notes-6.1 - - Name: About logging 6.1 - File: log6x-about-6.1 - - Name: Configuring log forwarding - File: log6x-clf-6.1 - - Name: Configuring the logging collector - File: 6x-cluster-logging-collector-6.1 - - Name: Configuring LokiStack storage - File: log6x-loki-6.1 - - Name: Configuring LokiStack for OTLP - File: log6x-configuring-lokistack-otlp-6.1 - - Name: OpenTelemetry data model - File: log6x-opentelemetry-data-model-6.1 - - Name: Visualization for logging - File: log6x-visual-6.1 -# - Name: Support -# File: cluster-logging-support -# - Name: Troubleshooting logging -# Dir: troubleshooting -# Topics: -# - Name: Viewing Logging status -# File: cluster-logging-cluster-status -# - Name: Troubleshooting log forwarding -# File: log-forwarding-troubleshooting -# - Name: Troubleshooting logging alerts -# File: troubleshooting-logging-alerts -# File: cluster-logging-log-store-status -# - Name: About Logging -# File: cluster-logging -# - Name: Installing Logging -# File: cluster-logging-deploying -# - Name: Updating Logging -# File: cluster-logging-upgrading -# Distros: openshift-enterprise,openshift-origin -# - Name: Visualizing logs -# Topics: -# - Name: About log visualization -# File: log-visualization -# - Name: Log visualization with the web console -# File: log-visualization-ocp-console -# - Name: Viewing cluster dashboards -# File: cluster-logging-dashboards -# - Name: Log visualization with Kibana -# File: logging-kibana -# - Name: Configuring your Logging deployment -# Dir: config -# Distros: openshift-enterprise,openshift-origin -# Topics: -# - Name: Configuring CPU and memory limits for Logging components -# File: cluster-logging-memory -# - Name: Configuring systemd-journald for Logging -# File: cluster-logging-systemd -# - Name: Log collection and forwarding -# Dir: log_collection_forwarding -# Topics: -# - Name: About log collection and forwarding -# File: log-forwarding -# - Name: Log output types -# File: logging-output-types -# - Name: Enabling JSON log forwarding -# File: cluster-logging-enabling-json-logging -# - Name: Configuring log forwarding -# File: configuring-log-forwarding -# - Name: Configuring the logging collector -# File: cluster-logging-collector -# - Name: Collecting and storing Kubernetes events -# File: cluster-logging-eventrouter -# - Name: Log storage -# Dir: log_storage -# Topics: -# - Name: About log storage -# File: about-log-storage -# File: installing-log-storage -# - Name: Configuring the LokiStack log store -# File: cluster-logging-loki -# - Name: Configuring the Elasticsearch log store -# File: logging-config-es-store -# - Name: Logging alerts -# Dir: logging_alerts -# Topics: -# - Name: Default logging alerts -# File: default-logging-alerts -# - Name: Custom logging alerts -# File: custom-logging-alerts -# - Name: Performance and reliability tuning -# Dir: performance_reliability -# Topics: -# - Name: Flow control mechanisms -# File: logging-flow-control-mechanisms -# - Name: Filtering logs by content -# File: logging-content-filtering -# - Name: Filtering logs by metadata -# File: logging-input-spec-filtering -# - Name: Scheduling resources -# Dir: scheduling_resources -# Topics: -# - Name: Using node selectors to move logging resources -# File: logging-node-selectors -# - Name: Using tolerations to control logging pod placement -# File: logging-taints-tolerations -# - Name: Uninstalling Logging -# File: cluster-logging-uninstall -# - Name: Exported fields -# File: cluster-logging-exported-fields -# Distros: openshift-enterprise,openshift-origin -# - Name: API reference -# Dir: api_reference -# Topics: - # - Name: 5.8 Logging API reference - # File: logging-5-8-reference - # - Name: 5.7 Logging API reference - # File: logging-5-7-reference -# - Name: 5.6 Logging API reference -# File: logging-5-6-reference -# - Name: Glossary -# File: logging-common-terms + - Name: About Logging + File: about-logging - Name: Distributed Tracing Dir: distr_tracing Distros: openshift-enterprise diff --git a/modules/about-log-collection.adoc b/modules/about-log-collection.adoc deleted file mode 100644 index f716c5fe7a3e..000000000000 --- a/modules/about-log-collection.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/log-forwarding.adoc - -:_mod-docs-content-type: CONCEPT -[id="about-log-collection_{context}"] -= Log collection - -The log collector is a daemon set that deploys pods to each {product-title} node to collect container and node logs. - -By default, the log collector uses the following sources: - -* System and infrastructure logs generated by journald log messages from the operating system, the container runtime, and {product-title}. -* `/var/log/containers/*.log` for all container logs. - -If you configure the log collector to collect audit logs, it collects them from `/var/log/audit/audit.log`. - -The log collector collects the logs from these sources and forwards them internally or externally depending on your {logging} configuration. - -[id="about-log-collectors-types_{context}"] -== Log collector types - -link:https://vector.dev/docs/about/what-is-vector/[Vector] is a log collector offered as an alternative to Fluentd for the {logging}. - -You can configure which logging collector type your cluster uses by modifying the `ClusterLogging` custom resource (CR) `collection` spec: - -.Example ClusterLogging CR that configures Vector as the collector -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - name: instance - namespace: openshift-logging -spec: - collection: - logs: - type: vector - vector: {} -# ... ----- - -[id="about-log-collectors-limitations_{context}"] -== Log collection limitations - -The container runtimes provide minimal information to identify the source of log messages: project, pod name, and container ID. This information is not sufficient to uniquely identify the source of the logs. If a pod with a given name and project is deleted before the log collector begins processing its logs, information from the API server, such as labels and annotations, might not be available. There might not be a way to distinguish the log messages from a similarly named pod and project or trace the logs to their source. This limitation means that log collection and normalization are considered _best effort_. - -[IMPORTANT] -==== -The available container runtimes provide minimal information to identify the source of log messages and do not guarantee unique individual log messages or that these messages can be traced to their source. -==== diff --git a/modules/cluster-logging-about-es-logstore.adoc b/modules/cluster-logging-about-es-logstore.adoc deleted file mode 100644 index c73abb216ac5..000000000000 --- a/modules/cluster-logging-about-es-logstore.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging.adoc - -:_mod-docs-content-type: CONCEPT -[id="cluster-logging-about-es-logstore_{context}"] -= About the Elasticsearch log store - -The {logging} Elasticsearch instance is optimized and tested for short term storage, approximately seven days. If you want to retain your logs over a longer term, it is recommended you move the data to a third-party storage system. - -Elasticsearch organizes the log data from Fluentd into datastores, or _indices_, then subdivides each index into multiple pieces called _shards_, which it spreads across a set of Elasticsearch nodes in an Elasticsearch cluster. You can configure Elasticsearch to make copies of the shards, called _replicas_, which Elasticsearch also spreads across the Elasticsearch nodes. The `ClusterLogging` custom resource (CR) allows you to specify how the shards are replicated to provide data redundancy and resilience to failure. You can also specify how long the different types of logs are retained using a retention policy in the `ClusterLogging` CR. - -[NOTE] -==== -The number of primary shards for the index templates is equal to the number of Elasticsearch data nodes. -==== - -The Red Hat OpenShift Logging Operator and companion OpenShift Elasticsearch Operator ensure that each Elasticsearch node is deployed using a unique deployment that includes its own storage volume. -You can use a `ClusterLogging` custom resource (CR) to increase the number of Elasticsearch nodes, as needed. -See the link:https://www.elastic.co/guide/en/elasticsearch/guide/current/hardware.html[Elasticsearch documentation] for considerations involved in configuring storage. - -[NOTE] -==== -A highly-available Elasticsearch environment requires at least three Elasticsearch nodes, each on a different host. -==== - -Role-based access control (RBAC) applied on the Elasticsearch indices enables the controlled access of the logs to the developers. Administrators can access all logs and developers can access only the logs in their projects. diff --git a/modules/cluster-logging-clo-status-comp.adoc b/modules/cluster-logging-clo-status-comp.adoc deleted file mode 100644 index 972d5d8f641f..000000000000 --- a/modules/cluster-logging-clo-status-comp.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/troubleshooting/cluster-logging-cluster-status.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-clo-status-comp_{context}"] -= Viewing the status of {logging} components - -You can view the status for a number of {logging} components. - -.Prerequisites - -* The {clo} and {es-op} are installed. - -.Procedure - -. Change to the `openshift-logging` project. -+ -[source,terminal] ----- -$ oc project openshift-logging ----- - -. View the status of {logging} environment: -+ -[source,terminal] ----- -$ oc describe deployment cluster-logging-operator ----- -+ -.Example output -[source,terminal] ----- -Name: cluster-logging-operator - -.... - -Conditions: - Type Status Reason - ---- ------ ------ - Available True MinimumReplicasAvailable - Progressing True NewReplicaSetAvailable - -.... - -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal ScalingReplicaSet 62m deployment-controller Scaled up replica set cluster-logging-operator-574b8987df to 1---- ----- - -. View the status of the {logging} replica set: - -.. Get the name of a replica set: -+ -.Example output -[source,terminal] ----- -$ oc get replicaset ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT READY AGE -cluster-logging-operator-574b8987df 1 1 1 159m -elasticsearch-cdm-uhr537yu-1-6869694fb 1 1 1 157m -elasticsearch-cdm-uhr537yu-2-857b6d676f 1 1 1 156m -elasticsearch-cdm-uhr537yu-3-5b6fdd8cfd 1 1 1 155m -kibana-5bd5544f87 1 1 1 157m ----- - -.. Get the status of the replica set: -+ -[source,terminal] ----- -$ oc describe replicaset cluster-logging-operator-574b8987df ----- -+ -.Example output -[source,terminal] ----- -Name: cluster-logging-operator-574b8987df - -.... - -Replicas: 1 current / 1 desired -Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed - -.... - -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulCreate 66m replicaset-controller Created pod: cluster-logging-operator-574b8987df-qjhqv---- ----- diff --git a/modules/cluster-logging-clo-status.adoc b/modules/cluster-logging-clo-status.adoc deleted file mode 100644 index 92bd048f593b..000000000000 --- a/modules/cluster-logging-clo-status.adoc +++ /dev/null @@ -1,226 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/troubleshooting/cluster-logging-cluster-status.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-clo-status_{context}"] -= Viewing the status of the {clo} - -You can view the status of the {clo}. - -.Prerequisites - -* The {clo} and {es-op} are installed. - -.Procedure - -. Change to the `openshift-logging` project by running the following command: -+ -[source,terminal] ----- -$ oc project openshift-logging ----- - -. Get the `ClusterLogging` instance status by running the following command: -+ -[source,terminal] ----- -$ oc get clusterlogging instance -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -# ... -status: <1> - collection: - logs: - fluentdStatus: - daemonSet: fluentd <2> - nodes: - collector-2rhqp: ip-10-0-169-13.ec2.internal - collector-6fgjh: ip-10-0-165-244.ec2.internal - collector-6l2ff: ip-10-0-128-218.ec2.internal - collector-54nx5: ip-10-0-139-30.ec2.internal - collector-flpnn: ip-10-0-147-228.ec2.internal - collector-n2frh: ip-10-0-157-45.ec2.internal - pods: - failed: [] - notReady: [] - ready: - - collector-2rhqp - - collector-54nx5 - - collector-6fgjh - - collector-6l2ff - - collector-flpnn - - collector-n2frh - logstore: <3> - elasticsearchStatus: - - ShardAllocationEnabled: all - cluster: - activePrimaryShards: 5 - activeShards: 5 - initializingShards: 0 - numDataNodes: 1 - numNodes: 1 - pendingTasks: 0 - relocatingShards: 0 - status: green - unassignedShards: 0 - clusterName: elasticsearch - nodeConditions: - elasticsearch-cdm-mkkdys93-1: - nodeCount: 1 - pods: - client: - failed: - notReady: - ready: - - elasticsearch-cdm-mkkdys93-1-7f7c6-mjm7c - data: - failed: - notReady: - ready: - - elasticsearch-cdm-mkkdys93-1-7f7c6-mjm7c - master: - failed: - notReady: - ready: - - elasticsearch-cdm-mkkdys93-1-7f7c6-mjm7c - visualization: <4> - kibanaStatus: - - deployment: kibana - pods: - failed: [] - notReady: [] - ready: - - kibana-7fb4fd4cc9-f2nls - replicaSets: - - kibana-7fb4fd4cc9 - replicas: 1 ----- -<1> In the output, the cluster status fields appear in the `status` stanza. -<2> Information on the Fluentd pods. -<3> Information on the Elasticsearch pods, including Elasticsearch cluster health, `green`, `yellow`, or `red`. -<4> Information on the Kibana pods. - - -[id="cluster-logging-clo-status-message_{context}"] -== Example condition messages - -The following are examples of some condition messages from the `Status.Nodes` section of the `ClusterLogging` instance. - -A status message similar to the following indicates a node has exceeded the configured low watermark and no shard will be allocated to this node: - -.Example output -[source,yaml] ----- - nodes: - - conditions: - - lastTransitionTime: 2019-03-15T15:57:22Z - message: Disk storage usage for node is 27.5gb (36.74%). Shards will be not - be allocated on this node. - reason: Disk Watermark Low - status: "True" - type: NodeStorage - deploymentName: example-elasticsearch-clientdatamaster-0-1 - upgradeStatus: {} ----- - -A status message similar to the following indicates a node has exceeded the configured high watermark and shards will be relocated to other nodes: - -.Example output -[source,yaml] ----- - nodes: - - conditions: - - lastTransitionTime: 2019-03-15T16:04:45Z - message: Disk storage usage for node is 27.5gb (36.74%). Shards will be relocated - from this node. - reason: Disk Watermark High - status: "True" - type: NodeStorage - deploymentName: cluster-logging-operator - upgradeStatus: {} ----- - -A status message similar to the following indicates the Elasticsearch node selector in the CR does not match any nodes in the cluster: - -.Example output -[source,terminal] ----- - Elasticsearch Status: - Shard Allocation Enabled: shard allocation unknown - Cluster: - Active Primary Shards: 0 - Active Shards: 0 - Initializing Shards: 0 - Num Data Nodes: 0 - Num Nodes: 0 - Pending Tasks: 0 - Relocating Shards: 0 - Status: cluster health unknown - Unassigned Shards: 0 - Cluster Name: elasticsearch - Node Conditions: - elasticsearch-cdm-mkkdys93-1: - Last Transition Time: 2019-06-26T03:37:32Z - Message: 0/5 nodes are available: 5 node(s) didn't match node selector. - Reason: Unschedulable - Status: True - Type: Unschedulable - elasticsearch-cdm-mkkdys93-2: - Node Count: 2 - Pods: - Client: - Failed: - Not Ready: - elasticsearch-cdm-mkkdys93-1-75dd69dccd-f7f49 - elasticsearch-cdm-mkkdys93-2-67c64f5f4c-n58vl - Ready: - Data: - Failed: - Not Ready: - elasticsearch-cdm-mkkdys93-1-75dd69dccd-f7f49 - elasticsearch-cdm-mkkdys93-2-67c64f5f4c-n58vl - Ready: - Master: - Failed: - Not Ready: - elasticsearch-cdm-mkkdys93-1-75dd69dccd-f7f49 - elasticsearch-cdm-mkkdys93-2-67c64f5f4c-n58vl - Ready: ----- - -A status message similar to the following indicates that the requested PVC could not bind to PV: - -.Example output -[source,terminal] ----- - Node Conditions: - elasticsearch-cdm-mkkdys93-1: - Last Transition Time: 2019-06-26T03:37:32Z - Message: pod has unbound immediate PersistentVolumeClaims (repeated 5 times) - Reason: Unschedulable - Status: True - Type: Unschedulable ----- - -A status message similar to the following indicates that the Fluentd pods cannot be scheduled because the node selector did not match any nodes: - -.Example output -[source,yaml] ----- -Status: - Collection: - Logs: - Fluentd Status: - Daemon Set: fluentd - Nodes: - Pods: - Failed: - Not Ready: - Ready: ----- diff --git a/modules/cluster-logging-collector-limits.adoc b/modules/cluster-logging-collector-limits.adoc deleted file mode 100644 index efd726c1acbc..000000000000 --- a/modules/cluster-logging-collector-limits.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-collector.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-collector-limits_{context}"] -= Configure log collector CPU and memory limits - -The log collector allows for adjustments to both the CPU and memory limits. - -.Procedure - -* Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc -n openshift-logging edit ClusterLogging instance ----- -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - name: instance - namespace: openshift-logging -spec: - collection: - type: fluentd - resources: - limits: <1> - memory: 736Mi - requests: - cpu: 100m - memory: 736Mi -# ... ----- -<1> Specify the CPU and memory limits and requests as needed. The values shown are the default values. - -//// -[source,yaml] ----- -$ oc edit ClusterLogging instance - -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - -.... - -spec: - collection: - logs: - rsyslog: - resources: - limits: <1> - memory: 358Mi - requests: - cpu: 100m - memory: 358Mi ----- -<1> Specify the CPU and memory limits and requests as needed. The values shown are the default values. -//// diff --git a/modules/cluster-logging-collector-log-forward-cloudwatch.adoc b/modules/cluster-logging-collector-log-forward-cloudwatch.adoc deleted file mode 100644 index 7804264fa847..000000000000 --- a/modules/cluster-logging-collector-log-forward-cloudwatch.adoc +++ /dev/null @@ -1,301 +0,0 @@ -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-cloudwatch_{context}"] -= Forwarding logs to Amazon CloudWatch - -You can forward logs to Amazon CloudWatch, a monitoring and log storage service hosted by Amazon Web Services (AWS). You can forward logs to CloudWatch in addition to, or instead of, the default log store. - -To configure log forwarding to CloudWatch, you must create a `ClusterLogForwarder` custom resource (CR) with an output for CloudWatch, and a pipeline that uses the output. - -.Procedure - -. Create a `Secret` YAML file that uses the `aws_access_key_id` and `aws_secret_access_key` fields to specify your base64-encoded AWS credentials. For example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: cw-secret - namespace: openshift-logging -data: - aws_access_key_id: QUtJQUlPU0ZPRE5ON0VYQU1QTEUK - aws_secret_access_key: d0phbHJYVXRuRkVNSS9LN01ERU5HL2JQeFJmaUNZRVhBTVBMRUtFWQo= ----- - -. Create the secret. For example: -+ -[source,terminal] ----- -$ oc apply -f cw-secret.yaml ----- - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object. In the file, specify the name of the secret. For example: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: <1> - namespace: <2> -spec: - serviceAccountName: <3> - outputs: - - name: cw <4> - type: cloudwatch <5> - cloudwatch: - groupBy: logType <6> - groupPrefix: <7> - region: us-east-2 <8> - secret: - name: cw-secret <9> - pipelines: - - name: infra-logs <10> - inputRefs: <11> - - infrastructure - - audit - - application - outputRefs: - - cw <12> ----- -<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name. -<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace. -<3> The name of your service account. The service account is only required in multi log forwarder implementations if the log forwarder is not deployed in the `openshift-logging` namespace. -<4> Specify a name for the output. -<5> Specify the `cloudwatch` type. -<6> Optional: Specify how to group the logs: -+ -* `logType` creates log groups for each log type. -* `namespaceName` creates a log group for each application name space. It also creates separate log groups for infrastructure and audit logs. -* `namespaceUUID` creates a new log groups for each application namespace UUID. It also creates separate log groups for infrastructure and audit logs. -<7> Optional: Specify a string to replace the default `infrastructureName` prefix in the names of the log groups. -<8> Specify the AWS region. -<9> Specify the name of the secret that contains your AWS credentials. -<10> Optional: Specify a name for the pipeline. -<11> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<12> Specify the name of the output to use when forwarding logs with this pipeline. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -.Example: Using ClusterLogForwarder with Amazon CloudWatch - -Here, you see an example `ClusterLogForwarder` custom resource (CR) and the log data that it outputs to Amazon CloudWatch. - -Suppose that you are running -ifndef::openshift-rosa[] -an {product-title} cluster -endif::[] -ifdef::openshift-rosa[] -a ROSA cluster -endif::[] -named `mycluster`. The following command returns the cluster's `infrastructureName`, which you will use to compose `aws` commands later on: - -[source,terminal] ----- -$ oc get Infrastructure/cluster -ojson | jq .status.infrastructureName -"mycluster-7977k" ----- - -To generate log data for this example, you run a `busybox` pod in a namespace called `app`. The `busybox` pod writes a message to stdout every three seconds: - -[source,terminal] ----- -$ oc run busybox --image=busybox -- sh -c 'while true; do echo "My life is my message"; sleep 3; done' ----- - -[source,terminal] ----- -$ oc logs -f busybox ----- - -.Example output -[source,terminal] ----- -My life is my message -My life is my message -My life is my message -... ----- - -You can look up the UUID of the `app` namespace where the `busybox` pod runs: - -[source,terminal] ----- -$ oc get ns/app -ojson | jq .metadata.uid -"794e1e1a-b9f5-4958-a190-e76a9b53d7bf" ----- - -In your `ClusterLogForwarder` custom resource (CR), you configure the `infrastructure`, `audit`, and `application` log types as inputs to the `all-logs` pipeline. You also connect this pipeline to `cw` output, which forwards the logs to a CloudWatch instance in the `us-east-2` region: - -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - outputs: - - name: cw - type: cloudwatch - cloudwatch: - groupBy: logType - region: us-east-2 - secret: - name: cw-secret - pipelines: - - name: all-logs - inputRefs: - - infrastructure - - audit - - application - outputRefs: - - cw ----- - -Each region in CloudWatch contains three levels of objects: - -* log group -** log stream -*** log event - - -With `groupBy: logType` in the `ClusterLogForwarding` CR, the three log types in the `inputRefs` produce three log groups in Amazon Cloudwatch: - -[source,terminal] ----- -$ aws --output json logs describe-log-groups | jq .logGroups[].logGroupName -"mycluster-7977k.application" -"mycluster-7977k.audit" -"mycluster-7977k.infrastructure" ----- - -Each of the log groups contains log streams: - -[source,terminal] ----- -$ aws --output json logs describe-log-streams --log-group-name mycluster-7977k.application | jq .logStreams[].logStreamName -"kubernetes.var.log.containers.busybox_app_busybox-da085893053e20beddd6747acdbaf98e77c37718f85a7f6a4facf09ca195ad76.log" ----- - -[source,terminal] ----- -$ aws --output json logs describe-log-streams --log-group-name mycluster-7977k.audit | jq .logStreams[].logStreamName -"ip-10-0-131-228.us-east-2.compute.internal.k8s-audit.log" -"ip-10-0-131-228.us-east-2.compute.internal.linux-audit.log" -"ip-10-0-131-228.us-east-2.compute.internal.openshift-audit.log" -... ----- - -[source,terminal] ----- -$ aws --output json logs describe-log-streams --log-group-name mycluster-7977k.infrastructure | jq .logStreams[].logStreamName -"ip-10-0-131-228.us-east-2.compute.internal.kubernetes.var.log.containers.apiserver-69f9fd9b58-zqzw5_openshift-oauth-apiserver_oauth-apiserver-453c5c4ee026fe20a6139ba6b1cdd1bed25989c905bf5ac5ca211b7cbb5c3d7b.log" -"ip-10-0-131-228.us-east-2.compute.internal.kubernetes.var.log.containers.apiserver-797774f7c5-lftrx_openshift-apiserver_openshift-apiserver-ce51532df7d4e4d5f21c4f4be05f6575b93196336be0027067fd7d93d70f66a4.log" -"ip-10-0-131-228.us-east-2.compute.internal.kubernetes.var.log.containers.apiserver-797774f7c5-lftrx_openshift-apiserver_openshift-apiserver-check-endpoints-82a9096b5931b5c3b1d6dc4b66113252da4a6472c9fff48623baee761911a9ef.log" -... ----- - -Each log stream contains log events. To see a log event from the `busybox` Pod, you specify its log stream from the `application` log group: - -[source,terminal] ----- -$ aws logs get-log-events --log-group-name mycluster-7977k.application --log-stream-name kubernetes.var.log.containers.busybox_app_busybox-da085893053e20beddd6747acdbaf98e77c37718f85a7f6a4facf09ca195ad76.log -{ - "events": [ - { - "timestamp": 1629422704178, - "message": "{\"docker\":{\"container_id\":\"da085893053e20beddd6747acdbaf98e77c37718f85a7f6a4facf09ca195ad76\"},\"kubernetes\":{\"container_name\":\"busybox\",\"namespace_name\":\"app\",\"pod_name\":\"busybox\",\"container_image\":\"docker.io/library/busybox:latest\",\"container_image_id\":\"docker.io/library/busybox@sha256:0f354ec1728d9ff32edcd7d1b8bbdfc798277ad36120dc3dc683be44524c8b60\",\"pod_id\":\"870be234-90a3-4258-b73f-4f4d6e2777c7\",\"host\":\"ip-10-0-216-3.us-east-2.compute.internal\",\"labels\":{\"run\":\"busybox\"},\"master_url\":\"https://kubernetes.default.svc\",\"namespace_id\":\"794e1e1a-b9f5-4958-a190-e76a9b53d7bf\",\"namespace_labels\":{\"kubernetes_io/metadata_name\":\"app\"}},\"message\":\"My life is my message\",\"level\":\"unknown\",\"hostname\":\"ip-10-0-216-3.us-east-2.compute.internal\",\"pipeline_metadata\":{\"collector\":{\"ipaddr4\":\"10.0.216.3\",\"inputname\":\"fluent-plugin-systemd\",\"name\":\"fluentd\",\"received_at\":\"2021-08-20T01:25:08.085760+00:00\",\"version\":\"1.7.4 1.6.0\"}},\"@timestamp\":\"2021-08-20T01:25:04.178986+00:00\",\"viaq_index_name\":\"app-write\",\"viaq_msg_id\":\"NWRjZmUyMWQtZjgzNC00MjI4LTk3MjMtNTk3NmY3ZjU4NDk1\",\"log_type\":\"application\",\"time\":\"2021-08-20T01:25:04+00:00\"}", - "ingestionTime": 1629422744016 - }, -... ----- - -.Example: Customizing the prefix in log group names - -In the log group names, you can replace the default `infrastructureName` prefix, `mycluster-7977k`, with an arbitrary string like `demo-group-prefix`. To make this change, you update the `groupPrefix` field in the `ClusterLogForwarding` CR: - -[source,yaml] ----- -cloudwatch: - groupBy: logType - groupPrefix: demo-group-prefix - region: us-east-2 ----- - -The value of `groupPrefix` replaces the default `infrastructureName` prefix: - -[source,terminal] ----- -$ aws --output json logs describe-log-groups | jq .logGroups[].logGroupName -"demo-group-prefix.application" -"demo-group-prefix.audit" -"demo-group-prefix.infrastructure" ----- - -.Example: Naming log groups after application namespace names - -For each application namespace in your cluster, you can create a log group in CloudWatch whose name is based on the name of the application namespace. - -If you delete an application namespace object and create a new one that has the same name, CloudWatch continues using the same log group as before. - -If you consider successive application namespace objects that have the same name as equivalent to each other, use the approach described in this example. Otherwise, if you need to distinguish the resulting log groups from each other, see the following "Naming log groups for application namespace UUIDs" section instead. - -To create application log groups whose names are based on the names of the application namespaces, you set the value of the `groupBy` field to `namespaceName` in the `ClusterLogForwarder` CR: - -[source,terminal] ----- -cloudwatch: - groupBy: namespaceName - region: us-east-2 ----- - -Setting `groupBy` to `namespaceName` affects the application log group only. It does not affect the `audit` and `infrastructure` log groups. - -In Amazon Cloudwatch, the namespace name appears at the end of each log group name. Because there is a single application namespace, "app", the following output shows a new `mycluster-7977k.app` log group instead of `mycluster-7977k.application`: - -[source,terminal] ----- -$ aws --output json logs describe-log-groups | jq .logGroups[].logGroupName -"mycluster-7977k.app" -"mycluster-7977k.audit" -"mycluster-7977k.infrastructure" ----- - -If the cluster in this example had contained multiple application namespaces, the output would show multiple log groups, one for each namespace. - -The `groupBy` field affects the application log group only. It does not affect the `audit` and `infrastructure` log groups. - -.Example: Naming log groups after application namespace UUIDs - -For each application namespace in your cluster, you can create a log group in CloudWatch whose name is based on the UUID of the application namespace. - -If you delete an application namespace object and create a new one, CloudWatch creates a new log group. - -If you consider successive application namespace objects with the same name as different from each other, use the approach described in this example. Otherwise, see the preceding "Example: Naming log groups for application namespace names" section instead. - -To name log groups after application namespace UUIDs, you set the value of the `groupBy` field to `namespaceUUID` in the `ClusterLogForwarder` CR: - -[source,terminal] ----- -cloudwatch: - groupBy: namespaceUUID - region: us-east-2 ----- - -In Amazon Cloudwatch, the namespace UUID appears at the end of each log group name. Because there is a single application namespace, "app", the following output shows a new `mycluster-7977k.794e1e1a-b9f5-4958-a190-e76a9b53d7bf` log group instead of `mycluster-7977k.application`: - -[source,terminal] ----- -$ aws --output json logs describe-log-groups | jq .logGroups[].logGroupName -"mycluster-7977k.794e1e1a-b9f5-4958-a190-e76a9b53d7bf" // uid of the "app" namespace -"mycluster-7977k.audit" -"mycluster-7977k.infrastructure" ----- - -The `groupBy` field affects the application log group only. It does not affect the `audit` and `infrastructure` log groups. diff --git a/modules/cluster-logging-collector-log-forward-es.adoc b/modules/cluster-logging-collector-log-forward-es.adoc deleted file mode 100644 index c335d5277c39..000000000000 --- a/modules/cluster-logging-collector-log-forward-es.adoc +++ /dev/null @@ -1,134 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/log-forwarding.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-es_{context}"] -= Forwarding logs to an external Elasticsearch instance - -You can forward logs to an external Elasticsearch instance in addition to, or instead of, the internal log store. You are responsible for configuring the external log aggregator to receive log data from {product-title}. - -To configure log forwarding to an external Elasticsearch instance, you must create a `ClusterLogForwarder` custom resource (CR) with an output to that instance, and a pipeline that uses the output. The external Elasticsearch output can use the HTTP (insecure) or HTTPS (secure HTTP) connection. - -To forward logs to both an external and the internal Elasticsearch instance, create outputs and pipelines to the external instance and a pipeline that uses the `default` output to forward logs to the internal instance. - -[NOTE] -==== -If you only want to forward logs to an internal Elasticsearch instance, you do not need to create a `ClusterLogForwarder` CR. -==== - -.Prerequisites - -* You must have a logging server that is configured to receive the logging data using the specified protocol or format. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR: -+ -.Example `ClusterLogForwarder` CR -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: # <1> - namespace: # <2> -spec: - serviceAccountName: # <3> - outputs: - - name: elasticsearch-example # <4> - type: elasticsearch # <5> - elasticsearch: - version: 8 # <6> - url: http://elasticsearch.example.com:9200 # <7> - secret: - name: es-secret # <8> - pipelines: - - name: application-logs # <9> - inputRefs: # <10> - - application - - audit - outputRefs: - - elasticsearch-example # <11> - - default # <12> - labels: - myLabel: "myValue" # <13> -# ... ----- -<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name. -<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace. -<3> The name of your service account. The service account is only required in multi log forwarder implementations if the log forwarder is not deployed in the `openshift-logging` namespace. -<4> Specify a name for the output. -<5> Specify the `elasticsearch` type. -<6> Specify the Elasticsearch version. This can be `6`, `7`, or `8`. -<7> Specify the URL and port of the external Elasticsearch instance as a valid absolute URL. You can use the `http` (insecure) or `https` (secure HTTP) protocol. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP Address. -<8> For an `https` prefix, specify the name of the secret required by the endpoint for TLS communication. The secret must contain a `ca-bundle.crt` key that points to the certificate it represents. Otherwise, for `http` and `https` prefixes, you can specify a secret that contains a username and password. In legacy implementations, the secret must exist in the `openshift-logging` project. For more information, see the following "Example: Setting a secret that contains a username and password." -<9> Optional: Specify a name for the pipeline. -<10> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<11> Specify the name of the output to use when forwarding logs with this pipeline. -<12> Optional: Specify the `default` output to send the logs to the internal Elasticsearch instance. -<13> Optional: String. One or more labels to add to the logs. - -. Apply the `ClusterLogForwarder` CR: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- - -.Example: Setting a secret that contains a username and password - -You can use a secret that contains a username and password to authenticate a secure connection to an external Elasticsearch instance. - -For example, if you cannot use mutual TLS (mTLS) keys because a third party operates the Elasticsearch instance, you can use HTTP or HTTPS and set a secret that contains the username and password. - -. Create a `Secret` YAML file similar to the following example. Use base64-encoded values for the `username` and `password` fields. The secret type is opaque by default. -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: openshift-test-secret -data: - username: - password: -# ... ----- - -. Create the secret: -+ -[source,terminal] ----- -$ oc create secret -n openshift-logging openshift-test-secret.yaml ----- - -. Specify the name of the secret in the `ClusterLogForwarder` CR: -+ -[source,yaml] ----- -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - outputs: - - name: elasticsearch - type: "elasticsearch" - url: https://elasticsearch.secure.com:9200 - secret: - name: openshift-test-secret -# ... ----- -+ -[NOTE] -==== -In the value of the `url` field, the prefix can be `http` or `https`. -==== - -. Apply the CR object: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/cluster-logging-collector-log-forward-fluentd.adoc b/modules/cluster-logging-collector-log-forward-fluentd.adoc deleted file mode 100644 index 7e00eb8ea741..000000000000 --- a/modules/cluster-logging-collector-log-forward-fluentd.adoc +++ /dev/null @@ -1,89 +0,0 @@ -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-fluentd_{context}"] -= Forwarding logs using the Fluentd forward protocol - -You can use the Fluentd *forward* protocol to send a copy of your logs to an external log aggregator that is configured to accept the protocol instead of, or in addition to, the default Elasticsearch log store. You are responsible for configuring the external log aggregator to receive the logs from {product-title}. - -To configure log forwarding using the *forward* protocol, you must create a `ClusterLogForwarder` custom resource (CR) with one or more outputs to the Fluentd servers, and pipelines that use those outputs. The Fluentd output can use a TCP (insecure) or TLS (secure TCP) connection. - -.Prerequisites - -* You must have a logging server that is configured to receive the logging data using the specified protocol or format. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: fluentd-server-secure <3> - type: fluentdForward <4> - url: 'tls://fluentdserver.security.example.com:24224' <5> - secret: <6> - name: fluentd-secret - - name: fluentd-server-insecure - type: fluentdForward - url: 'tcp://fluentdserver.home.example.com:24224' - pipelines: - - name: forward-to-fluentd-secure <7> - inputRefs: <8> - - application - - audit - outputRefs: - - fluentd-server-secure <9> - - default <10> - labels: - clusterId: "C1234" <11> - - name: forward-to-fluentd-insecure <12> - inputRefs: - - infrastructure - outputRefs: - - fluentd-server-insecure - labels: - clusterId: "C1234" ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the `fluentdForward` type. -<5> Specify the URL and port of the external Fluentd instance as a valid absolute URL. You can use the `tcp` (insecure) or `tls` (secure TCP) protocol. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP address. -<6> If you are using a `tls` prefix, you must specify the name of the secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project and must contain a `ca-bundle.crt` key that points to the certificate it represents. -<7> Optional: Specify a name for the pipeline. -<8> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<9> Specify the name of the output to use when forwarding logs with this pipeline. -<10> Optional: Specify the `default` output to forward logs to the internal Elasticsearch instance. -<11> Optional: String. One or more labels to add to the logs. -<12> Optional: Configure multiple outputs to forward logs to other external log aggregators of any supported type: -** A name to describe the pipeline. -** The `inputRefs` is the log type to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -** The `outputRefs` is the name of the output to use. -** Optional: String. One or more labels to add to the logs. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -[id="cluster-logging-collector-log-forward-nano-precision"] -== Enabling nanosecond precision for Logstash to ingest data from fluentd -For Logstash to ingest log data from fluentd, you must enable nanosecond precision in the Logstash configuration file. - -.Procedure -* In the Logstash configuration file, set `nanosecond_precision` to `true`. - -.Example Logstash configuration file -[source,terminal] -.... -input { tcp { codec => fluent { nanosecond_precision => true } port => 24114 } } -filter { } -output { stdout { codec => rubydebug } } -.... diff --git a/modules/cluster-logging-collector-log-forward-gcp.adoc b/modules/cluster-logging-collector-log-forward-gcp.adoc deleted file mode 100644 index a5f9191723f3..000000000000 --- a/modules/cluster-logging-collector-log-forward-gcp.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/configuring-log-forwarding.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-gcp_{context}"] -= Forwarding logs to {gcp-first} - -You can forward logs to link:https://cloud.google.com/logging/docs/basic-concepts[{gcp-full} Logging] in addition to, or instead of, the internal default {product-title} log store. - -[NOTE] -==== -Using this feature with Fluentd is not supported. -==== - -.Prerequisites - -* {clo} 5.5.1 and later - -.Procedure - -. Create a secret using your link:https://cloud.google.com/iam/docs/creating-managing-service-account-keys[Google service account key]. -+ -[source,terminal,subs="+quotes"] ----- -$ oc -n openshift-logging create secret generic gcp-secret --from-file google-application-credentials.json=__ ----- -. Create a `ClusterLogForwarder` Custom Resource YAML using the template below: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: <1> - namespace: <2> -spec: - serviceAccountName: <3> - outputs: - - name: gcp-1 - type: googleCloudLogging - secret: - name: gcp-secret - googleCloudLogging: - projectId : "openshift-gce-devel" <4> - logId : "app-gcp" <5> - pipelines: - - name: test-app - inputRefs: <6> - - application - outputRefs: - - gcp-1 ----- -<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name. -<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace. -<3> The name of your service account. The service account is only required in multi log forwarder implementations if the log forwarder is not deployed in the `openshift-logging` namespace. -<4> Set a `projectId`, `folderId`, `organizationId`, or `billingAccountId` field and its corresponding value, depending on where you want to store your logs in the link:https://cloud.google.com/resource-manager/docs/cloud-platform-resource-hierarchy[{gcp-short} resource hierarchy]. -<5> Set the value to add to the `logName` field of the link:https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry[Log Entry]. -<6> Specify which log types to forward by using the pipeline: `application`, `infrastructure`, or `audit`. - -[role="_additional-resources"] -.Additional resources -* link:https://cloud.google.com/billing/docs/concepts[{gcp-full} Billing Documentation] -* link:https://cloud.google.com/logging/docs/view/logging-query-language[{gcp-full} Logging Query Language Documentation] diff --git a/modules/cluster-logging-collector-log-forward-kafka.adoc b/modules/cluster-logging-collector-log-forward-kafka.adoc deleted file mode 100644 index c00e527724b0..000000000000 --- a/modules/cluster-logging-collector-log-forward-kafka.adoc +++ /dev/null @@ -1,109 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/log-forwarding.adoc - -:_mod-docs-content-type: PROCEDURE - -[id="cluster-logging-collector-log-forward-kafka_{context}"] -= Forwarding logs to a Kafka broker - -You can forward logs to an external Kafka broker in addition to, or instead of, the default log store. - -To configure log forwarding to an external Kafka instance, you must create a `ClusterLogForwarder` custom resource (CR) with an output to that instance, and a pipeline that uses the output. You can include a specific Kafka topic in the output or use the default. The Kafka output can use a TCP (insecure) or TLS (secure TCP) connection. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: <1> - namespace: <2> -spec: - serviceAccountName: <3> - outputs: - - name: app-logs <4> - type: kafka <5> - url: tls://kafka.example.devlab.com:9093/app-topic <6> - secret: - name: kafka-secret <7> - - name: infra-logs - type: kafka - url: tcp://kafka.devlab2.example.com:9093/infra-topic <8> - - name: audit-logs - type: kafka - url: tls://kafka.qelab.example.com:9093/audit-topic - secret: - name: kafka-secret-qe - pipelines: - - name: app-topic <9> - inputRefs: <10> - - application - outputRefs: <11> - - app-logs - labels: - logType: "application" <12> - - name: infra-topic <13> - inputRefs: - - infrastructure - outputRefs: - - infra-logs - labels: - logType: "infra" - - name: audit-topic - inputRefs: - - audit - outputRefs: - - audit-logs - labels: - logType: "audit" ----- -<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name. -<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace. -<3> The name of your service account. The service account is only required in multi log forwarder implementations if the log forwarder is not deployed in the `openshift-logging` namespace. -<4> Specify a name for the output. -<5> Specify the `kafka` type. -<6> Specify the URL and port of the Kafka broker as a valid absolute URL, optionally with a specific topic. You can use the `tcp` (insecure) or `tls` (secure TCP) protocol. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP address. -<7> If you are using a `tls` prefix, you must specify the name of the secret required by the endpoint for TLS communication. The secret must contain a `ca-bundle.crt` key that points to the certificate it represents. In legacy implementations, the secret must exist in the `openshift-logging` project. -<8> Optional: To send an insecure output, use a `tcp` prefix in front of the URL. Also omit the `secret` key and its `name` from this output. -<9> Optional: Specify a name for the pipeline. -<10> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<11> Specify the name of the output to use when forwarding logs with this pipeline. -<12> Optional: String. One or more labels to add to the logs. -<13> Optional: Configure multiple outputs to forward logs to other external log aggregators of any supported type: -** A name to describe the pipeline. -** The `inputRefs` is the log type to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -** The `outputRefs` is the name of the output to use. -** Optional: String. One or more labels to add to the logs. - -. Optional: To forward a single output to multiple Kafka brokers, specify an array of Kafka brokers as shown in the following example: -+ -[source,yaml] ----- -# ... -spec: - outputs: - - name: app-logs - type: kafka - secret: - name: kafka-secret-dev - kafka: <1> - brokers: <2> - - tls://kafka-broker1.example.com:9093/ - - tls://kafka-broker2.example.com:9093/ - topic: app-topic <3> -# ... ----- -<1> Specify a `kafka` key that has a `brokers` and `topic` key. -<2> Use the `brokers` key to specify an array of one or more brokers. -<3> Use the `topic` key to specify the target topic that receives the logs. - -. Apply the `ClusterLogForwarder` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc b/modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc deleted file mode 100644 index 40c7dc284a3b..000000000000 --- a/modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/configuring-log-forwarding.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-logs-from-application-pods_{context}"] -= Forwarding application logs from specific pods - -As a cluster administrator, you can use Kubernetes pod labels to gather log data from specific pods and forward it to a log collector. - -Suppose that you have an application composed of pods running alongside other pods in various namespaces. If those pods have labels that identify the application, you can gather and output their log data to a specific log collector. - -To specify the pod labels, you use one or more `matchLabels` key-value pairs. If you specify multiple key-value pairs, the pods must match all of them to be selected. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object. In the file, specify the pod labels using simple equality-based selectors under `inputs[].name.application.selector.matchLabels`, as shown in the following example. -+ -.Example `ClusterLogForwarder` CR YAML file -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: <1> - namespace: <2> -spec: - pipelines: - - inputRefs: [ myAppLogData ] <3> - outputRefs: [ default ] <4> - inputs: <5> - - name: myAppLogData - application: - selector: - matchLabels: <6> - environment: production - app: nginx - namespaces: <7> - - app1 - - app2 - outputs: <8> - - - ... ----- -<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name. -<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace. -<3> Specify one or more comma-separated values from `inputs[].name`. -<4> Specify one or more comma-separated values from `outputs[]`. -<5> Define a unique `inputs[].name` for each application that has a unique set of pod labels. -<6> Specify the key-value pairs of pod labels whose log data you want to gather. You must specify both a key and value, not just a key. To be selected, the pods must match all the key-value pairs. -<7> Optional: Specify one or more namespaces. -<8> Specify one or more outputs to forward your log data to. - -. Optional: To restrict the gathering of log data to specific namespaces, use `inputs[].name.application.namespaces`, as shown in the preceding example. - -. Optional: You can send log data from additional applications that have different pod labels to the same pipeline. -.. For each unique combination of pod labels, create an additional `inputs[].name` section similar to the one shown. -.. Update the `selectors` to match the pod labels of this application. -.. Add the new `inputs[].name` value to `inputRefs`. For example: -+ ----- -- inputRefs: [ myAppLogData, myOtherAppLogData ] ----- - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -[role="_additional-resources"] -.Additional resources - -* For more information on `matchLabels` in Kubernetes, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#resources-that-support-set-based-requirements[Resources that support set-based requirements]. diff --git a/modules/cluster-logging-collector-log-forward-loki.adoc b/modules/cluster-logging-collector-log-forward-loki.adoc deleted file mode 100644 index e0323dbd3de2..000000000000 --- a/modules/cluster-logging-collector-log-forward-loki.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/log-forwarding.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-loki_{context}"] -= Forwarding logs to an external Loki logging system - -You can forward logs to an external Loki logging system in addition to, or instead of, the default log store. - -To configure log forwarding to Loki, you must create a `ClusterLogForwarder` custom resource (CR) with an output to Loki, and a pipeline that uses the output. The output to Loki can use the HTTP (insecure) or HTTPS (secure HTTP) connection. - -.Prerequisites - -* You must have a Loki logging system running at the URL you specify with the `url` field in the CR. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: <1> - namespace: <2> -spec: - serviceAccountName: <3> - outputs: - - name: loki-insecure <4> - type: "loki" <5> - url: http://loki.insecure.com:3100 <6> - loki: - tenantKey: kubernetes.namespace_name - labelKeys: - - kubernetes.labels.foo - - name: loki-secure <7> - type: "loki" - url: https://loki.secure.com:3100 - secret: - name: loki-secret <8> - loki: - tenantKey: kubernetes.namespace_name <9> - labelKeys: - - kubernetes.labels.foo <10> - pipelines: - - name: application-logs <11> - inputRefs: <12> - - application - - audit - outputRefs: <13> - - loki-secure ----- -<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name. -<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace. -<3> The name of your service account. The service account is only required in multi log forwarder implementations if the log forwarder is not deployed in the `openshift-logging` namespace. -<4> Specify a name for the output. -<5> Specify the type as `"loki"`. -<6> Specify the URL and port of the Loki system as a valid absolute URL. You can use the `http` (insecure) or `https` (secure HTTP) protocol. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP Address. Loki's default port for HTTP(S) communication is 3100. -<7> For a secure connection, you can specify an `https` or `http` URL that you authenticate by specifying a `secret`. -<8> For an `https` prefix, specify the name of the secret required by the endpoint for TLS communication. The secret must contain a `ca-bundle.crt` key that points to the certificates it represents. Otherwise, for `http` and `https` prefixes, you can specify a secret that contains a username and password. In legacy implementations, the secret must exist in the `openshift-logging` project. For more information, see the following "Example: Setting a secret that contains a username and password." -<9> Optional: Specify a metadata key field to generate values for the `TenantID` field in Loki. For example, setting `tenantKey: kubernetes.namespace_name` uses the names of the Kubernetes namespaces as values for tenant IDs in Loki. To see which other log record fields you can specify, see the "Log Record Fields" link in the following "Additional resources" section. -<10> Optional: Specify a list of metadata field keys to replace the default Loki labels. Loki label names must match the regular expression `[a-zA-Z_:][a-zA-Z0-9_:]*`. Illegal characters in metadata keys are replaced with `_` to form the label name. For example, the `kubernetes.labels.foo` metadata key becomes Loki label `kubernetes_labels_foo`. If you do not set `labelKeys`, the default value is: `[log_type, kubernetes.namespace_name, kubernetes.pod_name, kubernetes_host]`. Keep the set of labels small because Loki limits the size and number of labels allowed. See link:https://grafana.com/docs/loki/latest/configuration/#limits_config[Configuring Loki, limits_config]. You can still query based on any log record field using query filters. -<11> Optional: Specify a name for the pipeline. -<12> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<13> Specify the name of the output to use when forwarding logs with this pipeline. -+ -[NOTE] -==== -Because Loki requires log streams to be correctly ordered by timestamp, `labelKeys` always includes the `kubernetes_host` label set, even if you do not specify it. This inclusion ensures that each stream originates from a single host, which prevents timestamps from becoming disordered due to clock differences on different hosts. -==== - -. Apply the `ClusterLogForwarder` CR object by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/cluster-logging-collector-log-forward-project.adoc b/modules/cluster-logging-collector-log-forward-project.adoc deleted file mode 100644 index 88192094c2d4..000000000000 --- a/modules/cluster-logging-collector-log-forward-project.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/configuring-log-forwarding.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-project_{context}"] -= Forwarding application logs from specific projects - -You can forward a copy of the application logs from specific projects to an external log aggregator, in addition to, or instead of, using the internal log store. You must also configure the external log aggregator to receive log data from {product-title}. - -To configure forwarding application logs from a project, you must create a `ClusterLogForwarder` custom resource (CR) with at least one input from a project, optional outputs for other log aggregators, and pipelines that use those inputs and outputs. - -.Prerequisites - -* You must have a logging server that is configured to receive the logging data using the specified protocol or format. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR: -+ -.Example `ClusterLogForwarder` CR -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: fluentd-server-secure <3> - type: fluentdForward <4> - url: 'tls://fluentdserver.security.example.com:24224' <5> - secret: <6> - name: fluentd-secret - - name: fluentd-server-insecure - type: fluentdForward - url: 'tcp://fluentdserver.home.example.com:24224' - inputs: <7> - - name: my-app-logs - application: - namespaces: - - my-project <8> - pipelines: - - name: forward-to-fluentd-insecure <9> - inputRefs: <10> - - my-app-logs - outputRefs: <11> - - fluentd-server-insecure - labels: - project: "my-project" <12> - - name: forward-to-fluentd-secure <13> - inputRefs: - - application <14> - - audit - - infrastructure - outputRefs: - - fluentd-server-secure - - default - labels: - clusterId: "C1234" ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> The name of the output. -<4> The output type: `elasticsearch`, `fluentdForward`, `syslog`, or `kafka`. -<5> The URL and port of the external log aggregator as a valid absolute URL. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP address. -<6> If using a `tls` prefix, you must specify the name of the secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project and have *tls.crt*, *tls.key*, and *ca-bundle.crt* keys that each point to the certificates they represent. -<7> The configuration for an input to filter application logs from the specified projects. -<8> If no namespace is specified, logs are collected from all namespaces. -<9> The pipeline configuration directs logs from a named input to a named output. In this example, a pipeline named `forward-to-fluentd-insecure` forwards logs from an input named `my-app-logs` to an output named `fluentd-server-insecure`. -<10> A list of inputs. -<11> The name of the output to use. -<12> Optional: String. One or more labels to add to the logs. -<13> Configuration for a pipeline to send logs to other log aggregators. -+ -* Optional: Specify a name for the pipeline. -* Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -* Specify the name of the output to use when forwarding logs with this pipeline. -* Optional: Specify the `default` output to forward logs to the default log store. -* Optional: String. One or more labels to add to the logs. -<14> Note that application logs from all namespaces are collected when using this configuration. - -. Apply the `ClusterLogForwarder` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/cluster-logging-collector-log-forward-secret-cloudwatch.adoc b/modules/cluster-logging-collector-log-forward-secret-cloudwatch.adoc deleted file mode 100644 index b15719f75dd6..000000000000 --- a/modules/cluster-logging-collector-log-forward-secret-cloudwatch.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-external.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-secret-cloudwatch_{context}"] -= Creating a secret for AWS CloudWatch with an existing AWS role -If you have an existing role for AWS, you can create a secret for AWS with STS using the `oc create secret --from-literal` command. - -.Procedure - -* In the CLI, enter the following to generate a secret for AWS: -+ -[source,terminal] ----- -$ oc create secret generic cw-sts-secret -n openshift-logging --from-literal=role_arn=arn:aws:iam::123456789012:role/my-role_with-permissions ----- -+ -.Example Secret -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: openshift-logging - name: my-secret-name -stringData: - role_arn: arn:aws:iam::123456789012:role/my-role_with-permissions ----- diff --git a/modules/cluster-logging-collector-log-forward-sts-cloudwatch.adoc b/modules/cluster-logging-collector-log-forward-sts-cloudwatch.adoc deleted file mode 100644 index bbd9ddc0ce74..000000000000 --- a/modules/cluster-logging-collector-log-forward-sts-cloudwatch.adoc +++ /dev/null @@ -1,111 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/configuring-log-forwarding.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-sts-cloudwatch_{context}"] -= Forwarding logs to Amazon CloudWatch from STS enabled clusters - -For clusters with AWS Security Token Service (STS) enabled, you can create an AWS service account manually or create a credentials request by using the Cloud Credential Operator (CCO) utility `ccoctl`. - -.Prerequisites - -* {logging-title-uc}: 5.5 and later - -.Procedure - -. Create a `CredentialsRequest` custom resource YAML by using the template below: -+ -.CloudWatch credentials request template -[source,yaml] ----- -apiVersion: cloudcredential.openshift.io/v1 -kind: CredentialsRequest -metadata: - name: -credrequest - namespace: openshift-cloud-credential-operator -spec: - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 - kind: AWSProviderSpec - statementEntries: - - action: - - logs:PutLogEvents - - logs:CreateLogGroup - - logs:PutRetentionPolicy - - logs:CreateLogStream - - logs:DescribeLogGroups - - logs:DescribeLogStreams - effect: Allow - resource: arn:aws:logs:*:*:* - secretRef: - name: - namespace: openshift-logging - serviceAccountNames: - - logcollector ----- -+ -. Use the `ccoctl` command to create a role for AWS using your `CredentialsRequest` CR. With the `CredentialsRequest` object, this `ccoctl` command creates an IAM role with a trust policy that is tied to the specified OIDC identity provider, and a permissions policy that grants permissions to perform operations on CloudWatch resources. This command also creates a YAML configuration file in `//manifests/openshift-logging--credentials.yaml`. This secret file contains the `role_arn` key/value used during authentication with the AWS IAM identity provider. -+ -[source,terminal] ----- -$ ccoctl aws create-iam-roles \ ---name= \ ---region= \ ---credentials-requests-dir=/credrequests \ ---identity-provider-arn=arn:aws:iam:::oidc-provider/-oidc.s3..amazonaws.com <1> ----- -<1> is the name used to tag your cloud resources and should match the name used during your STS cluster install -+ -. Apply the secret created: -[source,terminal] -+ ----- -$ oc apply -f output/manifests/openshift-logging--credentials.yaml ----- -+ -. Create or edit a `ClusterLogForwarder` custom resource: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: <1> - namespace: <2> -spec: - serviceAccountName: clf-collector <3> - outputs: - - name: cw <4> - type: cloudwatch <5> - cloudwatch: - groupBy: logType <6> - groupPrefix: <7> - region: us-east-2 <8> - secret: - name: <9> - pipelines: - - name: to-cloudwatch <10> - inputRefs: <11> - - infrastructure - - audit - - application - outputRefs: - - cw <12> ----- -<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name. -<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace. -<3> Specify the `clf-collector` service account. The service account is only required in multi log forwarder implementations if the log forwarder is not deployed in the `openshift-logging` namespace. -<4> Specify a name for the output. -<5> Specify the `cloudwatch` type. -<6> Optional: Specify how to group the logs: -+ -* `logType` creates log groups for each log type. -* `namespaceName` creates a log group for each application name space. Infrastructure and audit logs are unaffected, remaining grouped by `logType`. -* `namespaceUUID` creates a new log groups for each application namespace UUID. It also creates separate log groups for infrastructure and audit logs. -<7> Optional: Specify a string to replace the default `infrastructureName` prefix in the names of the log groups. -<8> Specify the AWS region. -<9> Specify the name of the secret that contains your AWS credentials. -<10> Optional: Specify a name for the pipeline. -<11> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<12> Specify the name of the output to use when forwarding logs with this pipeline. diff --git a/modules/cluster-logging-collector-log-forward-syslog.adoc b/modules/cluster-logging-collector-log-forward-syslog.adoc deleted file mode 100644 index 2cdcd5412bb8..000000000000 --- a/modules/cluster-logging-collector-log-forward-syslog.adoc +++ /dev/null @@ -1,188 +0,0 @@ -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-syslog_{context}"] -= Forwarding logs using the syslog protocol - -You can use the *syslog* link:https://tools.ietf.org/html/rfc3164[RFC3164] or link:https://tools.ietf.org/html/rfc5424[RFC5424] protocol to send a copy of your logs to an external log aggregator that is configured to accept the protocol instead of, or in addition to, the default Elasticsearch log store. You are responsible for configuring the external log aggregator, such as a syslog server, to receive the logs from {product-title}. - -To configure log forwarding using the *syslog* protocol, you must create a `ClusterLogForwarder` custom resource (CR) with one or more outputs to the syslog servers, and pipelines that use those outputs. The syslog output can use a UDP, TCP, or TLS connection. - -.Prerequisites - -* You must have a logging server that is configured to receive the logging data using the specified protocol or format. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: <1> - namespace: <2> -spec: - serviceAccountName: <3> - outputs: - - name: rsyslog-east <4> - type: syslog <5> - syslog: <6> - facility: local0 - rfc: RFC3164 - payloadKey: message - severity: informational - url: 'tls://rsyslogserver.east.example.com:514' <7> - secret: <8> - name: syslog-secret - - name: rsyslog-west - type: syslog - syslog: - appName: myapp - facility: user - msgID: mymsg - procID: myproc - rfc: RFC5424 - severity: debug - url: 'tcp://rsyslogserver.west.example.com:514' - pipelines: - - name: syslog-east <9> - inputRefs: <10> - - audit - - application - outputRefs: <11> - - rsyslog-east - - default <12> - labels: - secure: "true" <13> - syslog: "east" - - name: syslog-west <14> - inputRefs: - - infrastructure - outputRefs: - - rsyslog-west - - default - labels: - syslog: "west" ----- -<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name. -<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace. -<3> The name of your service account. The service account is only required in multi log forwarder implementations if the log forwarder is not deployed in the `openshift-logging` namespace. -<4> Specify a name for the output. -<5> Specify the `syslog` type. -<6> Optional: Specify the syslog parameters, listed below. -<7> Specify the URL and port of the external syslog instance. You can use the `udp` (insecure), `tcp` (insecure) or `tls` (secure TCP) protocol. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP address. -<8> If using a `tls` prefix, you must specify the name of the secret required by the endpoint for TLS communication. The secret must contain a `ca-bundle.crt` key that points to the certificate it represents. In legacy implementations, the secret must exist in the `openshift-logging` project. -<9> Optional: Specify a name for the pipeline. -<10> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<11> Specify the name of the output to use when forwarding logs with this pipeline. -<12> Optional: Specify the `default` output to forward logs to the internal Elasticsearch instance. -<13> Optional: String. One or more labels to add to the logs. Quote values like "true" so they are recognized as string values, not as a boolean. -<14> Optional: Configure multiple outputs to forward logs to other external log aggregators of any supported type: -** A name to describe the pipeline. -** The `inputRefs` is the log type to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -** The `outputRefs` is the name of the output to use. -** Optional: String. One or more labels to add to the logs. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -[id=cluster-logging-collector-log-forward-examples-syslog-log-source] -== Adding log source information to message output - -You can add `namespace_name`, `pod_name`, and `container_name` elements to the `message` field of the record by adding the `AddLogSource` field to your `ClusterLogForwarder` custom resource (CR). - -[source,yaml] ----- - spec: - outputs: - - name: syslogout - syslog: - addLogSource: true - facility: user - payloadKey: message - rfc: RFC3164 - severity: debug - tag: mytag - type: syslog - url: tls://syslog-receiver.openshift-logging.svc:24224 - pipelines: - - inputRefs: - - application - name: test-app - outputRefs: - - syslogout ----- - -[NOTE] -==== -This configuration is compatible with both RFC3164 and RFC5424. -==== - -.Example syslog message output without `AddLogSource` -[source, text] ----- -<15>1 2020-11-15T17:06:14+00:00 fluentd-9hkb4 mytag - - - {"msgcontent"=>"Message Contents", "timestamp"=>"2020-11-15 17:06:09", "tag_key"=>"rec_tag", "index"=>56} ----- - -.Example syslog message output with `AddLogSource` - -[source, text] ----- -<15>1 2020-11-16T10:49:37+00:00 crc-j55b9-master-0 mytag - - - namespace_name=clo-test-6327,pod_name=log-generator-ff9746c49-qxm7l,container_name=log-generator,message={"msgcontent":"My life is my message", "timestamp":"2020-11-16 10:49:36", "tag_key":"rec_tag", "index":76} ----- - -[id=cluster-logging-collector-log-forward-examples-syslog-parms] -== Syslog parameters - -You can configure the following for the `syslog` outputs. For more information, see the syslog link:https://tools.ietf.org/html/rfc3164[RFC3164] or link:https://tools.ietf.org/html/rfc5424[RFC5424] RFC. - -* facility: The link:https://tools.ietf.org/html/rfc5424#section-6.2.1[syslog facility]. The value can be a decimal integer or a case-insensitive keyword: -** `0` or `kern` for kernel messages -** `1` or `user` for user-level messages, the default. -** `2` or `mail` for the mail system -** `3` or `daemon` for system daemons -** `4` or `auth` for security/authentication messages -** `5` or `syslog` for messages generated internally by syslogd -** `6` or `lpr` for the line printer subsystem -** `7` or `news` for the network news subsystem -** `8` or `uucp` for the UUCP subsystem -** `9` or `cron` for the clock daemon -** `10` or `authpriv` for security authentication messages -** `11` or `ftp` for the FTP daemon -** `12` or `ntp` for the NTP subsystem -** `13` or `security` for the syslog audit log -** `14` or `console` for the syslog alert log -** `15` or `solaris-cron` for the scheduling daemon -** `16`–`23` or `local0` – `local7` for locally used facilities -* Optional: `payloadKey`: The record field to use as payload for the syslog message. -+ -[NOTE] -==== -Configuring the `payloadKey` parameter prevents other parameters from being forwarded to the syslog. -==== -+ -* rfc: The RFC to be used for sending logs using syslog. The default is RFC5424. -* severity: The link:https://tools.ietf.org/html/rfc5424#section-6.2.1[syslog severity] to set on outgoing syslog records. The value can be a decimal integer or a case-insensitive keyword: -** `0` or `Emergency` for messages indicating the system is unusable -** `1` or `Alert` for messages indicating action must be taken immediately -** `2` or `Critical` for messages indicating critical conditions -** `3` or `Error` for messages indicating error conditions -** `4` or `Warning` for messages indicating warning conditions -** `5` or `Notice` for messages indicating normal but significant conditions -** `6` or `Informational` for messages indicating informational messages -** `7` or `Debug` for messages indicating debug-level messages, the default -* tag: Tag specifies a record field to use as a tag on the syslog message. -* trimPrefix: Remove the specified prefix from the tag. - -[id=cluster-logging-collector-log-forward-examples-syslog-5424] -== Additional RFC5424 syslog parameters - -The following parameters apply to RFC5424: - -* appName: The APP-NAME is a free-text string that identifies the application that sent the log. Must be specified for `RFC5424`. -* msgID: The MSGID is a free-text string that identifies the type of message. Must be specified for `RFC5424`. -* procID: The PROCID is a free-text string. A change in the value indicates a discontinuity in syslog reporting. Must be specified for `RFC5424`. diff --git a/modules/cluster-logging-collector-log-forwarding-about.adoc b/modules/cluster-logging-collector-log-forwarding-about.adoc deleted file mode 100644 index 81c11ee7d605..000000000000 --- a/modules/cluster-logging-collector-log-forwarding-about.adoc +++ /dev/null @@ -1,167 +0,0 @@ -:_mod-docs-content-type: CONCEPT -[id="cluster-logging-collector-log-forwarding-about_{context}"] -= About forwarding logs to third-party systems - -To send logs to specific endpoints inside and outside your {product-title} cluster, you specify a combination of _outputs_ and _pipelines_ in a `ClusterLogForwarder` custom resource (CR). You can also use _inputs_ to forward the application logs associated with a specific project to an endpoint. Authentication is provided by a Kubernetes _Secret_ object. - -_pipeline_:: Defines simple routing from one log type to one or more outputs, or which logs you want to send. The log types are one of the following: -+ --- -* `application`. Container logs generated by user applications running in the cluster, except infrastructure container applications. - -* `infrastructure`. Container logs from pods that run in the `openshift*`, `kube*`, or `default` projects and journal logs sourced from node file system. - -* `audit`. Audit logs generated by the node audit system, `auditd`, Kubernetes API server, OpenShift API server, and OVN network. --- -+ -You can add labels to outbound log messages by using `key:value` pairs in the pipeline. For example, you might add a label to messages that are forwarded to other data centers or label the logs by type. Labels that are added to objects are also forwarded with the log message. - -_input_:: Forwards the application logs associated with a specific project to a pipeline. -+ --- -In the pipeline, you define which log types to forward using an `inputRef` parameter and where to forward the logs to using an `outputRef` parameter. --- -+ - -_Secret_:: A `key:value map` that contains confidential data such as user credentials. - -Note the following: - -* If you do not define a pipeline for a log type, the logs of the undefined types are dropped. For example, if you specify a pipeline for the `application` and `audit` types, but do not specify a pipeline for the `infrastructure` type, `infrastructure` logs are dropped. - -* You can use multiple types of outputs in the `ClusterLogForwarder` custom resource (CR) to send logs to servers that support different protocols. - -The following example forwards the audit logs to a secure external Elasticsearch instance, the infrastructure logs to an insecure external Elasticsearch instance, the application logs to a Kafka broker, and the application logs from the `my-apps-logs` project to the internal Elasticsearch instance. - -.Sample log forwarding outputs and pipelines -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: <1> - namespace: <2> -spec: - serviceAccountName: <3> - outputs: - - name: elasticsearch-secure <4> - type: "elasticsearch" - url: https://elasticsearch.secure.com:9200 - secret: - name: elasticsearch - - name: elasticsearch-insecure <5> - type: "elasticsearch" - url: http://elasticsearch.insecure.com:9200 - - name: kafka-app <6> - type: "kafka" - url: tls://kafka.secure.com:9093/app-topic - inputs: <7> - - name: my-app-logs - application: - namespaces: - - my-project - pipelines: - - name: audit-logs <8> - inputRefs: - - audit - outputRefs: - - elasticsearch-secure - - default - labels: - secure: "true" <9> - datacenter: "east" - - name: infrastructure-logs <10> - inputRefs: - - infrastructure - outputRefs: - - elasticsearch-insecure - labels: - datacenter: "west" - - name: my-app <11> - inputRefs: - - my-app-logs - outputRefs: - - default - - inputRefs: <12> - - application - outputRefs: - - kafka-app - labels: - datacenter: "south" ----- -<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name. -<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace. -<3> The name of your service account. The service account is only required in multi log forwarder implementations if the log forwarder is not deployed in the `openshift-logging` namespace. -<4> Configuration for an secure Elasticsearch output using a secret with a secure URL. -** A name to describe the output. -** The type of output: `elasticsearch`. -** The secure URL and port of the Elasticsearch instance as a valid absolute URL, including the prefix. -** The secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project. -<5> Configuration for an insecure Elasticsearch output: -** A name to describe the output. -** The type of output: `elasticsearch`. -** The insecure URL and port of the Elasticsearch instance as a valid absolute URL, including the prefix. -<6> Configuration for a Kafka output using a client-authenticated TLS communication over a secure URL: -** A name to describe the output. -** The type of output: `kafka`. -** Specify the URL and port of the Kafka broker as a valid absolute URL, including the prefix. -<7> Configuration for an input to filter application logs from the `my-project` namespace. -<8> Configuration for a pipeline to send audit logs to the secure external Elasticsearch instance: -** A name to describe the pipeline. -** The `inputRefs` is the log type, in this example `audit`. -** The `outputRefs` is the name of the output to use, in this example `elasticsearch-secure` to forward to the secure Elasticsearch instance and `default` to forward to the internal Elasticsearch instance. -** Optional: Labels to add to the logs. -<9> Optional: String. One or more labels to add to the logs. Quote values like "true" so they are recognized as string values, not as a boolean. -<10> Configuration for a pipeline to send infrastructure logs to the insecure external Elasticsearch instance. -<11> Configuration for a pipeline to send logs from the `my-project` project to the internal Elasticsearch instance. -** A name to describe the pipeline. -** The `inputRefs` is a specific input: `my-app-logs`. -** The `outputRefs` is `default`. -** Optional: String. One or more labels to add to the logs. -<12> Configuration for a pipeline to send logs to the Kafka broker, with no pipeline name: -** The `inputRefs` is the log type, in this example `application`. -** The `outputRefs` is the name of the output to use. -** Optional: String. One or more labels to add to the logs. - -[discrete] -[id="cluster-logging-external-fluentd_{context}"] -== Fluentd log handling when the external log aggregator is unavailable - -If your external logging aggregator becomes unavailable and cannot receive logs, Fluentd continues to collect logs and stores them in a buffer. When the log aggregator becomes available, log forwarding resumes, including the buffered logs. If the buffer fills completely, Fluentd stops collecting logs. {product-title} rotates the logs and deletes them. You cannot adjust the buffer size or add a persistent volume claim (PVC) to the Fluentd daemon set or pods. - -[discrete] -== Supported Authorization Keys -Common key types are provided here. Some output types support additional specialized keys, documented with the output-specific configuration field. All secret keys are optional. Enable the security features you want by setting the relevant keys. You are responsible for creating and maintaining any additional configurations that external destinations might require, such as keys and secrets, service accounts, port openings, or global proxy configuration. Open Shift Logging will not attempt to verify a mismatch between authorization combinations. - -Transport Layer Security (TLS):: Using a TLS URL (`+http://...+` or `+ssl://...+`) without a secret enables basic TLS server-side authentication. Additional TLS features are enabled by including a secret and setting the following optional fields: - -* `passphrase`: (string) Passphrase to decode an encoded TLS private key. Requires `tls.key`. -* `ca-bundle.crt`: (string) File name of a customer CA for server authentication. - -Username and Password:: -* `username`: (string) Authentication user name. Requires `password`. -* `password`: (string) Authentication password. Requires `username`. - -Simple Authentication Security Layer (SASL):: -* `sasl.enable` (boolean) Explicitly enable or disable SASL. -If missing, SASL is automatically enabled when any of the other `sasl.` keys are set. -* `sasl.mechanisms`: (array) List of allowed SASL mechanism names. -If missing or empty, the system defaults are used. -* `sasl.allow-insecure`: (boolean) Allow mechanisms that send clear-text passwords. Defaults to false. - -== Creating a Secret - -You can create a secret in the directory that contains your certificate and key files by using the following command: - -[source,terminal] ----- -$ oc create secret generic -n \ - --from-file=ca-bundle.crt= \ - --from-literal=username= \ - --from-literal=password= ----- - -[NOTE] -==== -Generic or opaque secrets are recommended for best results. -==== diff --git a/modules/cluster-logging-collector-pod-location.adoc b/modules/cluster-logging-collector-pod-location.adoc deleted file mode 100644 index 5c7f5756f1d3..000000000000 --- a/modules/cluster-logging-collector-pod-location.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-collector.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-collector-pod-location_{context}"] -= Viewing logging collector pods - -You can view the logging collector pods and the corresponding nodes that they are running on. - -.Procedure - -* Run the following command in a project to view the logging collector pods and their details: -+ -[source,terminal] ----- -$ oc get pods --selector component=collector -o wide -n ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -collector-8d69v 1/1 Running 0 134m 10.130.2.30 master1.example.com -collector-bd225 1/1 Running 0 134m 10.131.1.11 master2.example.com -collector-cvrzs 1/1 Running 0 134m 10.130.0.21 master3.example.com -collector-gpqg2 1/1 Running 0 134m 10.128.2.27 worker1.example.com -collector-l9j7j 1/1 Running 0 134m 10.129.2.31 worker2.example.com ----- diff --git a/modules/cluster-logging-collector-tolerations.adoc b/modules/cluster-logging-collector-tolerations.adoc deleted file mode 100644 index 32ee32b5d4ff..000000000000 --- a/modules/cluster-logging-collector-tolerations.adoc +++ /dev/null @@ -1,98 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/scheduling_resources/logging-taints-tolerations.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-collector-tolerations_{context}"] -= Using tolerations to control log collector pod placement - -By default, log collector pods have the following `tolerations` configuration: - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: collector-example - namespace: openshift-logging -spec: -# ... - collection: - type: vector - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists - - effect: NoSchedule - key: node.kubernetes.io/disk-pressure - operator: Exists - - effect: NoExecute - key: node.kubernetes.io/not-ready - operator: Exists - - effect: NoExecute - key: node.kubernetes.io/unreachable - operator: Exists - - effect: NoSchedule - key: node.kubernetes.io/memory-pressure - operator: Exists - - effect: NoSchedule - key: node.kubernetes.io/pid-pressure - operator: Exists - - effect: NoSchedule - key: node.kubernetes.io/unschedulable - operator: Exists -# ... ----- - -.Prerequisites - -* You have installed the {clo} and {oc-first}. - -.Procedure - -. Add a taint to a node where you want logging collector pods to schedule logging collector pods by running the following command: -+ -[source,terminal] ----- -$ oc adm taint nodes =: ----- -+ -.Example command -[source,terminal] ----- -$ oc adm taint nodes node1 collector=node:NoExecute ----- -+ -This example places a taint on `node1` that has key `collector`, value `node`, and taint effect `NoExecute`. You must use the `NoExecute` taint effect. `NoExecute` schedules only pods that match the taint and removes existing pods that do not match. - -. Edit the `collection` stanza of the `ClusterLogging` custom resource (CR) to configure a toleration for the logging collector pods: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: -# ... -spec: -# ... - collection: - type: vector - tolerations: - - key: collector <1> - operator: Exists <2> - effect: NoExecute <3> - tolerationSeconds: 6000 <4> - resources: - limits: - memory: 2Gi - requests: - cpu: 100m - memory: 1Gi -# ... ----- -<1> Specify the key that you added to the node. -<2> Specify the `Exists` operator to require the `key`/`value`/`effect` parameters to match. -<3> Specify the `NoExecute` effect. -<4> Optionally, specify the `tolerationSeconds` parameter to set how long a pod can remain bound to a node before being evicted. - -This toleration matches the taint created by the `oc adm taint` command. A pod with this toleration can be scheduled onto `node1`. diff --git a/modules/cluster-logging-collector-tuning.adoc b/modules/cluster-logging-collector-tuning.adoc deleted file mode 100644 index 7762eaee3f72..000000000000 --- a/modules/cluster-logging-collector-tuning.adoc +++ /dev/null @@ -1,176 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/cluster-logging-collector.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-collector-tuning_{context}"] -= Advanced configuration for the Fluentd log forwarder - -include::snippets/logging-fluentd-dep-snip.adoc[] - -{logging-uc} includes multiple Fluentd parameters that you can use for tuning the performance of the Fluentd log forwarder. With these parameters, you can change the following Fluentd behaviors: - -* Chunk and chunk buffer sizes -* Chunk flushing behavior -* Chunk forwarding retry behavior - -Fluentd collects log data in a single blob called a _chunk_. When Fluentd creates a chunk, the chunk is considered to be in the _stage_, where the chunk gets filled with data. When the chunk is full, Fluentd moves the chunk to the _queue_, where chunks are held before being flushed, or written out to their destination. Fluentd can fail to flush a chunk for a number of reasons, such as network issues or capacity issues at the destination. If a chunk cannot be flushed, Fluentd retries flushing as configured. - -By default in {product-title}, Fluentd uses the _exponential backoff_ method to retry flushing, where Fluentd doubles the time it waits between attempts to retry flushing again, which helps reduce connection requests to the destination. You can disable exponential backoff and use the _periodic_ retry method instead, which retries flushing the chunks at a specified interval. - -These parameters can help you determine the trade-offs between latency and throughput. - -* To optimize Fluentd for throughput, you could use these parameters to reduce network packet count by configuring larger buffers and queues, delaying flushes, and setting longer times between retries. Be aware that larger buffers require more space on the node file system. - -* To optimize for low latency, you could use the parameters to send data as soon as possible, avoid the build-up of batches, have shorter queues and buffers, and use more frequent flush and retries. - -You can configure the chunking and flushing behavior using the following parameters in the `ClusterLogging` custom resource (CR). The parameters are then automatically added to the Fluentd config map for use by Fluentd. - -[NOTE] -==== -These parameters are: - -* Not relevant to most users. The default settings should give good general performance. -* Only for advanced users with detailed knowledge of Fluentd configuration and performance. -* Only for performance tuning. They have no effect on functional aspects of logging. -==== - -.Advanced Fluentd Configuration Parameters -[options="header"] -|=== - -|Parameter |Description |Default - -|`chunkLimitSize` -|The maximum size of each chunk. Fluentd stops writing data to a chunk when it reaches this size. Then, Fluentd sends the chunk to the queue and opens a new chunk. -|`8m` - -|`totalLimitSize` -|The maximum size of the buffer, which is the total size of the stage and the queue. If the buffer size exceeds this value, Fluentd stops adding data to chunks and fails with an error. All data not in chunks is lost. -|Approximately 15% of the node disk distributed across all outputs. - -|`flushInterval` -|The interval between chunk flushes. You can use `s` (seconds), `m` (minutes), `h` (hours), or `d` (days). -|`1s` - -|`flushMode` -a| The method to perform flushes: - -* `lazy`: Flush chunks based on the `timekey` parameter. You cannot modify the `timekey` parameter. -* `interval`: Flush chunks based on the `flushInterval` parameter. -* `immediate`: Flush chunks immediately after data is added to a chunk. -|`interval` - -|`flushThreadCount` -|The number of threads that perform chunk flushing. Increasing the number of threads improves the flush throughput, which hides network latency. -|`2` - -|`overflowAction` -a|The chunking behavior when the queue is full: - -* `throw_exception`: Raise an exception to show in the log. -* `block`: Stop data chunking until the full buffer issue is resolved. -* `drop_oldest_chunk`: Drop the oldest chunk to accept new incoming chunks. Older chunks have less value than newer chunks. -|`block` - -|`retryMaxInterval` -|The maximum time in seconds for the `exponential_backoff` retry method. -|`300s` - -|`retryType` -a|The retry method when flushing fails: - -* `exponential_backoff`: Increase the time between flush retries. Fluentd doubles the time it waits until the next retry until the `retry_max_interval` parameter is reached. -* `periodic`: Retries flushes periodically, based on the `retryWait` parameter. -|`exponential_backoff` - -|`retryTimeOut` -|The maximum time interval to attempt retries before the record is discarded. -|`60m` - -|`retryWait` -|The time in seconds before the next chunk flush. -|`1s` - -|=== - -For more information on the Fluentd chunk lifecycle, see link:https://docs.fluentd.org/buffer[Buffer Plugins] in the Fluentd documentation. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] -+ ----- -$ oc edit ClusterLogging instance ----- - -. Add or modify any of the following parameters: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - name: instance - namespace: openshift-logging -spec: - collection: - fluentd: - buffer: - chunkLimitSize: 8m <1> - flushInterval: 5s <2> - flushMode: interval <3> - flushThreadCount: 3 <4> - overflowAction: throw_exception <5> - retryMaxInterval: "300s" <6> - retryType: periodic <7> - retryWait: 1s <8> - totalLimitSize: 32m <9> -# ... ----- -<1> Specify the maximum size of each chunk before it is queued for flushing. -<2> Specify the interval between chunk flushes. -<3> Specify the method to perform chunk flushes: `lazy`, `interval`, or `immediate`. -<4> Specify the number of threads to use for chunk flushes. -<5> Specify the chunking behavior when the queue is full: `throw_exception`, `block`, or `drop_oldest_chunk`. -<6> Specify the maximum interval in seconds for the `exponential_backoff` chunk flushing method. -<7> Specify the retry type when chunk flushing fails: `exponential_backoff` or `periodic`. -<8> Specify the time in seconds before the next chunk flush. -<9> Specify the maximum size of the chunk buffer. - -. Verify that the Fluentd pods are redeployed: -+ -[source,terminal] ----- -$ oc get pods -l component=collector -n openshift-logging ----- - -. Check that the new values are in the `fluentd` config map: -+ -[source,terminal] ----- -$ oc extract configmap/collector-config --confirm ----- -+ -.Example fluentd.conf -[source,terminal] ----- - - @type file - path '/var/lib/fluentd/default' - flush_mode interval - flush_interval 5s - flush_thread_count 3 - retry_type periodic - retry_wait 1s - retry_max_interval 300s - retry_timeout 60m - queued_chunks_limit_size "#{ENV['BUFFER_QUEUE_LIMIT'] || '32'}" - total_limit_size "#{ENV['TOTAL_LIMIT_SIZE_PER_BUFFER'] || '8589934592'}" - chunk_limit_size 8m - overflow_action throw_exception - disable_chunk_backup true - ----- diff --git a/modules/cluster-logging-configuration-of-json-log-data-for-default-elasticsearch.adoc b/modules/cluster-logging-configuration-of-json-log-data-for-default-elasticsearch.adoc deleted file mode 100644 index 38b4e93e7fa2..000000000000 --- a/modules/cluster-logging-configuration-of-json-log-data-for-default-elasticsearch.adoc +++ /dev/null @@ -1,117 +0,0 @@ -[id="cluster-logging-configuration-of-json-log-data-for-default-elasticsearch_{context}"] -= Configuring JSON log data for Elasticsearch - -If your JSON logs follow more than one schema, storing them in a single index might cause type conflicts and cardinality problems. To avoid that, you must configure the `ClusterLogForwarder` custom resource (CR) to group each schema into a single output definition. This way, each schema is forwarded to a separate index. - -[IMPORTANT] -==== -If you forward JSON logs to the default Elasticsearch instance managed by OpenShift Logging, it generates new indices based on your configuration. To avoid performance issues associated with having too many indices, consider keeping the number of possible schemas low by standardizing to common schemas. -==== - -.Structure types - -You can use the following structure types in the `ClusterLogForwarder` CR to construct index names for the Elasticsearch log store: - -* `structuredTypeKey` is the name of a message field. The value of that field is used to construct the index name. -** `kubernetes.labels.` is the Kubernetes pod label whose value is used to construct the index name. -** `openshift.labels.` is the `pipeline.label.` element in the `ClusterLogForwarder` CR whose value is used to construct the index name. -** `kubernetes.container_name` uses the container name to construct the index name. -* `structuredTypeName`: If the `structuredTypeKey` field is not set or its key is not present, the `structuredTypeName` value is used as the structured type. When you use both the `structuredTypeKey` field and the `structuredTypeName` field together, the `structuredTypeName` value provides a fallback index name if the key in the `structuredTypeKey` field is missing from the JSON log data. - -[NOTE] -==== -Although you can set the value of `structuredTypeKey` to any field shown in the "Log Record Fields" topic, the most useful fields are shown in the preceding list of structure types. -==== - -.A structuredTypeKey: kubernetes.labels. example - -Suppose the following: - -* Your cluster is running application pods that produce JSON logs in two different formats, "apache" and "google". -* The user labels these application pods with `logFormat=apache` and `logFormat=google`. -* You use the following snippet in your `ClusterLogForwarder` CR YAML file. - -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: -# ... -spec: -# ... - outputDefaults: - elasticsearch: - structuredTypeKey: kubernetes.labels.logFormat <1> - structuredTypeName: nologformat - pipelines: - - inputRefs: - - application - outputRefs: - - default - parse: json <2> ----- -<1> Uses the value of the key-value pair that is formed by the Kubernetes `logFormat` label. -<2> Enables parsing JSON logs. - -In that case, the following structured log record goes to the `app-apache-write` index: - -[source] ----- -{ - "structured":{"name":"fred","home":"bedrock"}, - "kubernetes":{"labels":{"logFormat": "apache", ...}} -} ----- - -And the following structured log record goes to the `app-google-write` index: - -[source] ----- -{ - "structured":{"name":"wilma","home":"bedrock"}, - "kubernetes":{"labels":{"logFormat": "google", ...}} -} ----- - -.A structuredTypeKey: openshift.labels. example - -Suppose that you use the following snippet in your `ClusterLogForwarder` CR YAML file. - -[source,yaml] ----- -outputDefaults: - elasticsearch: - structuredTypeKey: openshift.labels.myLabel <1> - structuredTypeName: nologformat -pipelines: - - name: application-logs - inputRefs: - - application - - audit - outputRefs: - - elasticsearch-secure - - default - parse: json - labels: - myLabel: myValue <2> ----- -<1> Uses the value of the key-value pair that is formed by the OpenShift `myLabel` label. -<2> The `myLabel` element gives its string value, `myValue`, to the structured log record. - -In that case, the following structured log record goes to the `app-myValue-write` index: - -[source] ----- -{ - "structured":{"name":"fred","home":"bedrock"}, - "openshift":{"labels":{"myLabel": "myValue", ...}} -} ----- - -.Additional considerations - -* The Elasticsearch _index_ for structured records is formed by prepending "app-" to the structured type and appending "-write". -* Unstructured records are not sent to the structured index. They are indexed as usual in the application, infrastructure, or audit indices. -* If there is no non-empty structured type, forward an _unstructured_ record with no `structured` field. - -It is important not to overload Elasticsearch with too many indices. Only use distinct structured types for distinct log _formats_, *not* for each application or namespace. For example, most Apache applications use the same JSON log format and structured type, such as `LogApache`. diff --git a/modules/cluster-logging-cpu-memory.adoc b/modules/cluster-logging-cpu-memory.adoc deleted file mode 100644 index 0fc270daab8b..000000000000 --- a/modules/cluster-logging-cpu-memory.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-collector.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-memory-limits_{context}"] -= Configuring CPU and memory limits - -The {logging} components allow for adjustments to both the CPU and memory limits. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc -n openshift-logging edit ClusterLogging instance ----- -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: openshift-logging - -... - -spec: - managementState: "Managed" - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - resources: <1> - limits: - memory: 16Gi - requests: - cpu: 200m - memory: 16Gi - storage: - storageClassName: "gp2" - size: "200G" - redundancyPolicy: "SingleRedundancy" - visualization: - type: "kibana" - kibana: - resources: <2> - limits: - memory: 1Gi - requests: - cpu: 500m - memory: 1Gi - proxy: - resources: <2> - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi - replicas: 2 - collection: - logs: - type: "fluentd" - fluentd: - resources: <3> - limits: - memory: 736Mi - requests: - cpu: 200m - memory: 736Mi ----- -<1> Specify the CPU and memory limits and requests for the log store as needed. For Elasticsearch, you must adjust both the request value and the limit value. -<2> Specify the CPU and memory limits and requests for the log visualizer as needed. -<3> Specify the CPU and memory limits and requests for the log collector as needed. diff --git a/modules/cluster-logging-dashboards-access.adoc b/modules/cluster-logging-dashboards-access.adoc deleted file mode 100644 index 52c31afb421e..000000000000 --- a/modules/cluster-logging-dashboards-access.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_visualization/cluster-logging-dashboards.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-dashboards-access_{context}"] -= Accessing the Elasticsearch and OpenShift Logging dashboards - -You can view the *Logging/Elasticsearch Nodes* and *OpenShift Logging* dashboards in the -ifndef::openshift-rosa,openshift-dedicated[] -{product-title} web console. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -{cluster-manager-url}. -endif::[] - -.Procedure - -To launch the dashboards: - -ifndef::openshift-rosa,openshift-dedicated[] -. In the {product-title} web console, click *Observe* -> *Dashboards*. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -. In the {product-title} {hybrid-console}, click *Observe* -> *Dashboards*. -endif::[] - -. On the *Dashboards* page, select *Logging/Elasticsearch Nodes* or *OpenShift Logging* from the *Dashboard* menu. -+ -For the *Logging/Elasticsearch Nodes* dashboard, you can select the Elasticsearch node you want to view and set the data resolution. -+ -The appropriate dashboard is displayed, showing multiple charts of data. - -. Optional: Select a different time range to display or refresh rate for the data from the *Time Range* and *Refresh Interval* menus. diff --git a/modules/cluster-logging-dashboards-es.adoc b/modules/cluster-logging-dashboards-es.adoc deleted file mode 100644 index a5ef10c44a85..000000000000 --- a/modules/cluster-logging-dashboards-es.adoc +++ /dev/null @@ -1,196 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_visualization/cluster-logging-dashboards.adoc - -[id="cluster-logging-dashboards-es_{context}"] -= Charts on the Logging/Elasticsearch nodes dashboard - -The *Logging/Elasticsearch Nodes* dashboard contains charts that show details about your Elasticsearch instance, many at node-level, for further diagnostics. - -Elasticsearch status:: - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about the status of your Elasticsearch instance. - -.Elasticsearch status fields -[options="header"] -|=== -|Metric|Description - -|Cluster status -a|The cluster health status during the selected time period, using the Elasticsearch green, yellow, and red statuses: - -* 0 - Indicates that the Elasticsearch instance is in green status, which means that all shards are allocated. -* 1 - Indicates that the Elasticsearch instance is in yellow status, which means that replica shards for at least one shard are not allocated. -* 2 - Indicates that the Elasticsearch instance is in red status, which means that at least one primary shard and its replicas are not allocated. - -|Cluster nodes -|The total number of Elasticsearch nodes in the cluster. - -|Cluster data nodes -|The number of Elasticsearch data nodes in the cluster. - -|Cluster pending tasks -|The number of cluster state changes that are not finished and are waiting in a cluster queue, for example, index creation, index deletion, or shard allocation. A growing trend indicates that the cluster is not able to keep up with changes. - -|=== - -Elasticsearch cluster index shard status:: - -Each Elasticsearch index is a logical group of one or more shards, which are basic units of persisted data. There are two types of index shards: primary shards, and replica shards. When a document is indexed into an index, it is stored in one of its primary shards and copied into every replica of that shard. The number of primary shards is specified when the index is created, and the number cannot change during index lifetime. You can change the number of replica shards at any time. - -The index shard can be in several states depending on its lifecycle phase or events occurring in the cluster. When the shard is able to perform search and indexing requests, the shard is active. If the shard cannot perform these requests, the shard is non–active. A shard might be non-active if the shard is initializing, reallocating, unassigned, and so forth. - -Index shards consist of a number of smaller internal blocks, called index segments, which are physical representations of the data. An index segment is a relatively small, immutable Lucene index that is created when Lucene commits newly-indexed data. Lucene, a search library used by Elasticsearch, merges index segments into larger segments in the background to keep the total number of segments low. If the process of merging segments is slower than the speed at which new segments are created, it could indicate a problem. - -When Lucene performs data operations, such as a search operation, Lucene performs the operation against the index segments in the relevant index. For that purpose, each segment contains specific data structures that are loaded in the memory and mapped. Index mapping can have a significant impact on the memory used by segment data structures. - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about the Elasticsearch index shards. - -.Elasticsearch cluster shard status charts -[options="header"] - -|=== -|Metric|Description - -|Cluster active shards -|The number of active primary shards and the total number of shards, including replicas, in the cluster. If the number of shards grows higher, the cluster performance can start degrading. - -|Cluster initializing shards -|The number of non-active shards in the cluster. A non-active shard is one that is initializing, being reallocated to a different node, or is unassigned. A cluster typically has non–active shards for short periods. A growing number of non–active shards over longer periods could indicate a problem. - -|Cluster relocating shards -|The number of shards that Elasticsearch is relocating to a new node. Elasticsearch relocates nodes for multiple reasons, such as high memory use on a node or after a new node is added to the cluster. - -|Cluster unassigned shards -|The number of unassigned shards. Elasticsearch shards might be unassigned for reasons such as a new index being added or the failure of a node. - -|=== - -Elasticsearch node metrics:: - -Each Elasticsearch node has a finite amount of resources that can be used to process tasks. When all the resources are being used and Elasticsearch attempts to perform a new task, Elasticsearch puts the tasks into a queue until some resources become available. - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about resource usage for a selected node and the number of tasks waiting in the Elasticsearch queue. - -.Elasticsearch node metric charts -[options="header"] -|=== -|Metric|Description - -|ThreadPool tasks -|The number of waiting tasks in individual queues, shown by task type. A long–term accumulation of tasks in any queue could indicate node resource shortages or some other problem. - -|CPU usage -|The amount of CPU being used by the selected Elasticsearch node as a percentage of the total CPU allocated to the host container. - -|Memory usage -|The amount of memory being used by the selected Elasticsearch node. - -|Disk usage -|The total disk space being used for index data and metadata on the selected Elasticsearch node. - -|Documents indexing rate -|The rate that documents are indexed on the selected Elasticsearch node. - -|Indexing latency -|The time taken to index the documents on the selected Elasticsearch node. Indexing latency can be affected by many factors, such as JVM Heap memory and overall load. A growing latency indicates a resource capacity shortage in the instance. - -|Search rate -|The number of search requests run on the selected Elasticsearch node. - -|Search latency -|The time taken to complete search requests on the selected Elasticsearch node. Search latency can be affected by many factors. A growing latency indicates a resource capacity shortage in the instance. - -|Documents count (with replicas) -|The number of Elasticsearch documents stored on the selected Elasticsearch node, including documents stored in both the primary shards and replica shards that are allocated on the node. - -|Documents deleting rate -|The number of Elasticsearch documents being deleted from any of the index shards that are allocated to the selected Elasticsearch node. - -|Documents merging rate -|The number of Elasticsearch documents being merged in any of index shards that are allocated to the selected Elasticsearch node. - -|=== - -Elasticsearch node fielddata:: - -link:https://www.elastic.co/guide/en/elasticsearch/reference/6.8/fielddata.html[_Fielddata_] is an Elasticsearch data structure that holds lists of terms in an index and is kept in the JVM Heap. Because fielddata building is an expensive operation, Elasticsearch caches the fielddata structures. Elasticsearch can evict a fielddata cache when the underlying index segment is deleted or merged, or if there is not enough JVM HEAP memory for all the fielddata caches. - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about Elasticsearch fielddata. - -.Elasticsearch node fielddata charts -[options="header"] -|=== -|Metric|Description - -|Fielddata memory size -|The amount of JVM Heap used for the fielddata cache on the selected Elasticsearch node. - -|Fielddata evictions -|The number of fielddata structures that were deleted from the selected Elasticsearch node. - -|=== - -Elasticsearch node query cache:: - -If the data stored in the index does not change, search query results are cached in a node-level query cache for reuse by Elasticsearch. - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about the Elasticsearch node query cache. - -.Elasticsearch node query charts -[options="header"] -|=== -|Metric|Description - -|Query cache size -|The total amount of memory used for the query cache for all the shards allocated to the selected Elasticsearch node. - -|Query cache evictions -|The number of query cache evictions on the selected Elasticsearch node. - -|Query cache hits -|The number of query cache hits on the selected Elasticsearch node. - -|Query cache misses -|The number of query cache misses on the selected Elasticsearch node. - -|=== - -Elasticsearch index throttling:: - -When indexing documents, Elasticsearch stores the documents in index segments, which are physical representations of the data. At the same time, Elasticsearch periodically merges smaller segments into a larger segment as a way to optimize resource use. If the indexing is faster then the ability to merge segments, the merge process does not complete quickly enough, which can lead to issues with searches and performance. To prevent this situation, Elasticsearch throttles indexing, typically by reducing the number of threads allocated to indexing down to a single thread. - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about Elasticsearch index throttling. - -.Index throttling charts -[options="header"] -|=== -|Metric|Description - -|Indexing throttling -|The amount of time that Elasticsearch has been throttling the indexing operations on the selected Elasticsearch node. - -|Merging throttling -|The amount of time that Elasticsearch has been throttling the segment merge operations on the selected Elasticsearch node. - -|=== - -Node JVM Heap statistics:: - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about JVM Heap operations. - -.JVM Heap statistic charts -[options="header"] -|=== -|Metric|Description - -|Heap used -|The amount of the total allocated JVM Heap space that is used on the selected Elasticsearch node. - -|GC count -|The number of garbage collection operations that have been run on the selected Elasticsearch node, by old and young garbage collection. - -|GC time -|The amount of time that the JVM spent running garbage collection operations on the selected Elasticsearch node, by old and young garbage collection. - -|=== diff --git a/modules/cluster-logging-dashboards-logging.adoc b/modules/cluster-logging-dashboards-logging.adoc deleted file mode 100644 index f4f54650870d..000000000000 --- a/modules/cluster-logging-dashboards-logging.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_visualization/cluster-logging-dashboards.adoc - -:_mod-docs-content-type: CONCEPT -[id="cluster-logging-dashboards-logging_{context}"] -= About the OpenShift Logging dashboard - -The *OpenShift Logging* dashboard contains charts that show details about your Elasticsearch instance at a cluster-level that you can use to diagnose and anticipate problems. - -.OpenShift Logging charts -[options="header"] -|=== -|Metric|Description - -|Elastic Cluster Status -a|The current Elasticsearch status: - -* ONLINE - Indicates that the Elasticsearch instance is online. -* OFFLINE - Indicates that the Elasticsearch instance is offline. - -|Elastic Nodes -|The total number of Elasticsearch nodes in the Elasticsearch instance. - -|Elastic Shards -|The total number of Elasticsearch shards in the Elasticsearch instance. - -|Elastic Documents -|The total number of Elasticsearch documents in the Elasticsearch instance. - -|Total Index Size on Disk -|The total disk space that is being used for the Elasticsearch indices. - -|Elastic Pending Tasks -|The total number of Elasticsearch changes that have not been completed, such as index creation, index mapping, shard allocation, or shard failure. - -|Elastic JVM GC time -|The amount of time that the JVM spent executing Elasticsearch garbage collection operations in the cluster. - -|Elastic JVM GC Rate -|The total number of times that JVM executed garbage activities per second. - -|Elastic Query/Fetch Latency Sum -a|* Query latency: The average time each Elasticsearch search query takes to execute. -* Fetch latency: The average time each Elasticsearch search query spends fetching data. - -Fetch latency typically takes less time than query latency. If fetch latency is consistently increasing, it might indicate slow disks, data enrichment, or large requests with too many results. - -|Elastic Query Rate -|The total queries executed against the Elasticsearch instance per second for each Elasticsearch node. - -|CPU -|The amount of CPU used by Elasticsearch, Fluentd, and Kibana, shown for each component. - -|Elastic JVM Heap Used -|The amount of JVM memory used. In a healthy cluster, the graph shows regular drops as memory is freed by JVM garbage collection. - -|Elasticsearch Disk Usage -|The total disk space used by the Elasticsearch instance for each Elasticsearch node. - -|File Descriptors In Use -|The total number of file descriptors used by Elasticsearch, Fluentd, and Kibana. - -|FluentD emit count -|The total number of Fluentd messages per second for the Fluentd default output, and the retry count for the default output. - -|FluentD Buffer Usage -|The percent of the Fluentd buffer that is being used for chunks. A full buffer might indicate that Fluentd is not able to process the number of logs received. - -|Elastic rx bytes -|The total number of bytes that Elasticsearch has received from FluentD, the Elasticsearch nodes, and other sources. - -|Elastic Index Failure Rate -|The total number of times per second that an Elasticsearch index fails. A high rate might indicate an issue with indexing. - -|FluentD Output Error Rate -|The total number of times per second that FluentD is not able to output logs. - -|=== diff --git a/modules/cluster-logging-deploy-es-cli.adoc b/modules/cluster-logging-deploy-es-cli.adoc deleted file mode 100644 index 323ee7fc2ec4..000000000000 --- a/modules/cluster-logging-deploy-es-cli.adoc +++ /dev/null @@ -1,145 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_storage/installing-log-storage.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-deploy-es-cli_{context}"] -= Installing the {es-op} by using the CLI - -You can use the {oc-first} to install the {es-op}. - -.Prerequisites - -* Ensure that you have the necessary persistent storage for Elasticsearch. Note that each Elasticsearch node requires its own storage volume. -+ -[NOTE] -==== -If you use a local volume for persistent storage, do not use a raw block volume, which is described with `volumeMode: block` in the `LocalVolume` object. Elasticsearch cannot use raw block volumes. -==== -+ -Elasticsearch is a memory-intensive application. By default, {product-title} installs three Elasticsearch nodes with memory requests and limits of 16 GB. This initial set of three {product-title} nodes might not have enough memory to run Elasticsearch within your cluster. If you experience memory issues that are related to Elasticsearch, add more Elasticsearch nodes to your cluster rather than increasing the memory on existing nodes. - -ifdef::openshift-origin[] -* Ensure that you have downloaded the {cluster-manager-url-pull} as shown in "Obtaining the installation program" in the installation documentation for your platform. -+ -If you have the pull secret, add the `redhat-operators` catalog to the `OperatorHub` custom resource (CR) as shown in *Configuring {product-title} to use Red Hat Operators*. -endif::[] - -* You have administrator permissions. -* You have installed the {oc-first}. - -.Procedure - -. Create a `Namespace` object as a YAML file: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-operators-redhat <1> - annotations: - openshift.io/node-selector: "" - labels: - openshift.io/cluster-monitoring: "true" <2> ----- -<1> You must specify the `openshift-operators-redhat` namespace. To prevent possible conflicts with metrics, configure the Prometheus Cluster Monitoring stack to scrape metrics from the `openshift-operators-redhat` namespace and not the `openshift-operators` namespace. The `openshift-operators` namespace might contain community Operators, which are untrusted and could publish a metric with the same name as -ifdef::openshift-rosa[] - a ROSA -endif::[] -ifdef::openshift-dedicated[] - an {product-title} -endif::[] -metric, which would cause conflicts. -<2> String. You must specify this label as shown to ensure that cluster monitoring scrapes the `openshift-operators-redhat` namespace. - -. Apply the `Namespace` object by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- - -. Create an `OperatorGroup` object as a YAML file: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: openshift-operators-redhat - namespace: openshift-operators-redhat <1> -spec: {} ----- -<1> You must specify the `openshift-operators-redhat` namespace. - -. Apply the `OperatorGroup` object by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- - -. Create a `Subscription` object to subscribe the namespace to the {es-op}: -+ -.Example Subscription -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: elasticsearch-operator - namespace: openshift-operators-redhat <1> -spec: - channel: stable-x.y <2> - installPlanApproval: Automatic <3> - source: redhat-operators <4> - sourceNamespace: openshift-marketplace - name: elasticsearch-operator ----- -<1> You must specify the `openshift-operators-redhat` namespace. -<2> Specify `stable`, or `stable-x.y` as the channel. See the following note. -<3> `Automatic` allows the Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. `Manual` requires a user with appropriate credentials to approve the Operator update. -<4> Specify `redhat-operators`. If your {product-title} cluster is installed on a restricted network, also known as a disconnected cluster, -specify the name of the `CatalogSource` object created when you configured the Operator Lifecycle Manager (OLM). -+ -[NOTE] -==== -Specifying `stable` installs the current version of the latest stable release. Using `stable` with `installPlanApproval: "Automatic"` automatically upgrades your Operators to the latest stable major and minor release. - -Specifying `stable-x.y` installs the current minor version of a specific major release. Using `stable-x.y` with `installPlanApproval: "Automatic"` automatically upgrades your Operators to the latest stable minor release within the major release. -==== - -. Apply the subscription by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- -+ -The {es-op} is installed to the `openshift-operators-redhat` namespace and copied to each project in the cluster. - -.Verification - -. Run the following command: -+ -[source,terminal] ----- -$ oc get csv -n --all-namespaces ----- - -. Observe the output and confirm that pods for the {es-op} exist in each namespace -+ -.Example output -[source,terminal] ----- -NAMESPACE NAME DISPLAY VERSION REPLACES PHASE -default elasticsearch-operator.v5.8.1 OpenShift Elasticsearch Operator 5.8.1 elasticsearch-operator.v5.8.0 Succeeded -kube-node-lease elasticsearch-operator.v5.8.1 OpenShift Elasticsearch Operator 5.8.1 elasticsearch-operator.v5.8.0 Succeeded -kube-public elasticsearch-operator.v5.8.1 OpenShift Elasticsearch Operator 5.8.1 elasticsearch-operator.v5.8.0 Succeeded -kube-system elasticsearch-operator.v5.8.1 OpenShift Elasticsearch Operator 5.8.1 elasticsearch-operator.v5.8.0 Succeeded -non-destructive-test elasticsearch-operator.v5.8.1 OpenShift Elasticsearch Operator 5.8.1 elasticsearch-operator.v5.8.0 Succeeded -openshift-apiserver-operator elasticsearch-operator.v5.8.1 OpenShift Elasticsearch Operator 5.8.1 elasticsearch-operator.v5.8.0 Succeeded -openshift-apiserver elasticsearch-operator.v5.8.1 OpenShift Elasticsearch Operator 5.8.1 elasticsearch-operator.v5.8.0 Succeeded -... ----- diff --git a/modules/cluster-logging-elasticsearch-audit.adoc b/modules/cluster-logging-elasticsearch-audit.adoc deleted file mode 100644 index 9c01bd20314a..000000000000 --- a/modules/cluster-logging-elasticsearch-audit.adoc +++ /dev/null @@ -1,86 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-elasticsearch.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-elasticsearch-audit_{context}"] -= Forwarding audit logs to the log store - -include::snippets/audit-logs-default.adoc[] - -.Procedure - -To use the Log Forward API to forward audit logs to the internal Elasticsearch instance: - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -* Create a CR to send all log types to the internal Elasticsearch instance. You can use the following example without making any changes: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - pipelines: <1> - - name: all-to-default - inputRefs: - - infrastructure - - application - - audit - outputRefs: - - default ----- -<1> A pipeline defines the type of logs to forward using the specified output. The default output forwards logs to the internal Elasticsearch instance. -+ -[NOTE] -==== -You must specify all three types of logs in the pipeline: application, infrastructure, and audit. If you do not specify a log type, those logs are not stored and will be lost. -==== -+ -* If you have an existing `ClusterLogForwarder` CR, add a pipeline to the default output for the audit logs. You do not need to define the default output. For example: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - outputs: - - name: elasticsearch-insecure - type: "elasticsearch" - url: http://elasticsearch-insecure.messaging.svc.cluster.local - insecure: true - - name: elasticsearch-secure - type: "elasticsearch" - url: https://elasticsearch-secure.messaging.svc.cluster.local - secret: - name: es-audit - - name: secureforward-offcluster - type: "fluentdForward" - url: https://secureforward.offcluster.com:24224 - secret: - name: secureforward - pipelines: - - name: container-logs - inputRefs: - - application - outputRefs: - - secureforward-offcluster - - name: infra-logs - inputRefs: - - infrastructure - outputRefs: - - elasticsearch-insecure - - name: audit-logs - inputRefs: - - audit - outputRefs: - - elasticsearch-secure - - default <1> ----- -<1> This pipeline sends the audit logs to the internal Elasticsearch instance in addition to an external instance. diff --git a/modules/cluster-logging-elasticsearch-exposing.adoc b/modules/cluster-logging-elasticsearch-exposing.adoc deleted file mode 100644 index b3b8da576c9e..000000000000 --- a/modules/cluster-logging-elasticsearch-exposing.adoc +++ /dev/null @@ -1,178 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-elasticsearch.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-elasticsearch-exposing_{context}"] -= Exposing the log store service as a route - -By default, the log store that is deployed with {logging} is not accessible from outside the logging cluster. You can enable a route with re-encryption termination for external access to the log store service for those tools that access its data. - -Externally, you can access the log store by creating a reencrypt route, your {product-title} token and the installed log store CA certificate. Then, access a node that hosts the log store service with a cURL request that contains: - -* The `Authorization: Bearer ${token}` -* The Elasticsearch reencrypt route and an link:https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html[Elasticsearch API request]. - -Internally, you can access the log store service using the log store cluster IP, -which you can get by using either of the following commands: - -[source,terminal] ----- -$ oc get service elasticsearch -o jsonpath={.spec.clusterIP} -n openshift-logging ----- - -.Example output -[source,terminal] ----- -172.30.183.229 ----- - -[source,terminal] ----- -$ oc get service elasticsearch -n openshift-logging ----- - -.Example output -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -elasticsearch ClusterIP 172.30.183.229 9200/TCP 22h ----- - -You can check the cluster IP address with a command similar to the following: - -[source,terminal] ----- -$ oc exec elasticsearch-cdm-oplnhinv-1-5746475887-fj2f8 -n openshift-logging -- curl -tlsv1.2 --insecure -H "Authorization: Bearer ${token}" "https://172.30.183.229:9200/_cat/health" ----- - -.Example output -[source,terminal] ----- - % Total % Received % Xferd Average Speed Time Time Time Current - Dload Upload Total Spent Left Speed -100 29 100 29 0 0 108 0 --:--:-- --:--:-- --:--:-- 108 ----- - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -* You must have access to the project to be able to access to the logs. - -.Procedure - -To expose the log store externally: - -. Change to the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc project openshift-logging ----- - -. Extract the CA certificate from the log store and write to the *_admin-ca_* file: -+ -[source,terminal] ----- -$ oc extract secret/elasticsearch --to=. --keys=admin-ca ----- -+ -.Example output -[source,terminal] ----- -admin-ca ----- - -. Create the route for the log store service as a YAML file: -+ -.. Create a YAML file with the following: -+ -[source,yaml] ----- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - name: elasticsearch - namespace: openshift-logging -spec: - host: - to: - kind: Service - name: elasticsearch - tls: - termination: reencrypt - destinationCACertificate: | <1> ----- -<1> Add the log store CA certifcate or use the command in the next step. You do not have to set the `spec.tls.key`, `spec.tls.certificate`, and `spec.tls.caCertificate` parameters required by some reencrypt routes. - -.. Run the following command to add the log store CA certificate to the route YAML you created in the previous step: -+ -[source,terminal] ----- -$ cat ./admin-ca | sed -e "s/^/ /" >> .yaml ----- - -.. Create the route: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -.Example output -[source,terminal] ----- -route.route.openshift.io/elasticsearch created ----- -+ -//For an example reencrypt route object, see Re-encryption Termination. -//+ -//This line ^^ will be linked when the topic is available. - -. Check that the Elasticsearch service is exposed: - -.. Get the token of this service account to be used in the request: -+ -[source,terminal] ----- -$ token=$(oc whoami -t) ----- - -.. Set the *elasticsearch* route you created as an environment variable. -+ -[source,terminal] ----- -$ routeES=`oc get route elasticsearch -o jsonpath={.spec.host}` ----- - -.. To verify the route was successfully created, run the following command that accesses Elasticsearch through the exposed route: -+ -[source,terminal] ----- -curl -tlsv1.2 --insecure -H "Authorization: Bearer ${token}" "https://${routeES}" ----- -+ -The response appears similar to the following: -+ -.Example output -[source,json] ----- -{ - "name" : "elasticsearch-cdm-i40ktba0-1", - "cluster_name" : "elasticsearch", - "cluster_uuid" : "0eY-tJzcR3KOdpgeMJo-MQ", - "version" : { - "number" : "6.8.1", - "build_flavor" : "oss", - "build_type" : "zip", - "build_hash" : "Unknown", - "build_date" : "Unknown", - "build_snapshot" : true, - "lucene_version" : "7.7.0", - "minimum_wire_compatibility_version" : "5.6.0", - "minimum_index_compatibility_version" : "5.0.0" -}, - "" : "" -} ----- diff --git a/modules/cluster-logging-elasticsearch-ha.adoc b/modules/cluster-logging-elasticsearch-ha.adoc deleted file mode 100644 index 448812244603..000000000000 --- a/modules/cluster-logging-elasticsearch-ha.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-elasticsearch.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-elasticsearch-ha_{context}"] -= Configuring replication policy for the log store - -You can define how Elasticsearch shards are replicated across data nodes in the cluster. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc -n openshift-logging edit ClusterLogging instance ----- -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - -.... - -spec: - logStore: - type: "elasticsearch" - elasticsearch: - redundancyPolicy: "SingleRedundancy" <1> ----- -<1> Specify a redundancy policy for the shards. The change is applied upon saving the changes. -+ -* *FullRedundancy*. Elasticsearch fully replicates the primary shards for each index -to every data node. This provides the highest safety, but at the cost of the highest amount of disk required and the poorest performance. -* *MultipleRedundancy*. Elasticsearch fully replicates the primary shards for each index to half of the data nodes. -This provides a good tradeoff between safety and performance. -* *SingleRedundancy*. Elasticsearch makes one copy of the primary shards for each index. -Logs are always available and recoverable as long as at least two data nodes exist. -Better performance than MultipleRedundancy, when using 5 or more nodes. You cannot -apply this policy on deployments of single Elasticsearch node. -* *ZeroRedundancy*. Elasticsearch does not make copies of the primary shards. -Logs might be unavailable or lost in the event a node is down or fails. -Use this mode when you are more concerned with performance than safety, or have -implemented your own disk/PVC backup/restore strategy. - -[NOTE] -==== -The number of primary shards for the index templates is equal to the number of Elasticsearch data nodes. -==== - diff --git a/modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc b/modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc deleted file mode 100644 index b1982b462ca5..000000000000 --- a/modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-elasticsearch-storage.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-elasticsearch-persistent-storage-empty_{context}"] -= Configuring the log store for emptyDir storage - -You can use emptyDir with your log store, which creates an ephemeral -deployment in which all of a pod's data is lost upon restart. - -[NOTE] -==== -When using emptyDir, if log storage is restarted or redeployed, you will lose data. -==== - -.Prerequisites -//Find & replace the below according to SME feedback. -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Edit the `ClusterLogging` CR to specify emptyDir: -+ -[source,yaml] ----- - spec: - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - storage: {} ----- diff --git a/modules/cluster-logging-elasticsearch-retention.adoc b/modules/cluster-logging-elasticsearch-retention.adoc deleted file mode 100644 index e99b91be31c8..000000000000 --- a/modules/cluster-logging-elasticsearch-retention.adoc +++ /dev/null @@ -1,101 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-elasticsearch.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-elasticsearch-retention_{context}"] -= Configuring log retention time - -You can configure a _retention policy_ that specifies how long the default Elasticsearch log store keeps indices for each of the three log sources: infrastructure logs, application logs, and audit logs. - -To configure the retention policy, you set a `maxAge` parameter for each log source in the `ClusterLogging` custom resource (CR). The CR applies these values to the Elasticsearch rollover schedule, which determines when Elasticsearch deletes the rolled-over indices. - -Elasticsearch rolls over an index, moving the current index and creating a new index, when an index matches any of the following conditions: - -* The index is older than the `rollover.maxAge` value in the `Elasticsearch` CR. -* The index size is greater than 40 GB × the number of primary shards. -* The index doc count is greater than 40960 KB × the number of primary shards. - -Elasticsearch deletes the rolled-over indices based on the retention policy you configure. If you do not create a retention policy for any log sources, logs are deleted after seven days by default. - -.Prerequisites -* The {clo} and the {es-op} must be installed. - -.Procedure - -To configure the log retention time: - -. Edit the `ClusterLogging` CR to add or modify the `retentionPolicy` parameter: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -... -spec: - managementState: "Managed" - logStore: - type: "elasticsearch" - retentionPolicy: <1> - application: - maxAge: 1d - infra: - maxAge: 7d - audit: - maxAge: 7d - elasticsearch: - nodeCount: 3 -... ----- -<1> Specify the time that Elasticsearch should retain each log source. Enter an integer and a time designation: weeks(w), hours(h/H), minutes(m) and seconds(s). For example, `1d` for one day. Logs older than the `maxAge` are deleted. By default, logs are retained for seven days. - -. You can verify the settings in the `Elasticsearch` custom resource (CR). -+ -For example, the Red Hat OpenShift Logging Operator updated the following `Elasticsearch` CR to configure a retention policy that includes settings to roll over active indices for the infrastructure logs every eight hours and the rolled-over indices are deleted seven days after rollover. {product-title} checks every 15 minutes to determine if the indices need to be rolled over. -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "Elasticsearch" -metadata: - name: "elasticsearch" -spec: -... - indexManagement: - policies: <1> - - name: infra-policy - phases: - delete: - minAge: 7d <2> - hot: - actions: - rollover: - maxAge: 8h <3> - pollInterval: 15m <4> -... ----- -<1> For each log source, the retention policy indicates when to delete and roll over logs for that source. -<2> When {product-title} deletes the rolled-over indices. This setting is the `maxAge` you set in the `ClusterLogging` CR. -<3> The index age for {product-title} to consider when rolling over the indices. This value is determined from the `maxAge` you set in the `ClusterLogging` CR. -<4> When {product-title} checks if the indices should be rolled over. This setting is the default and cannot be changed. -+ -[NOTE] -==== -Modifying the `Elasticsearch` CR is not supported. All changes to the retention policies must be made in the `ClusterLogging` CR. -==== -+ -The OpenShift Elasticsearch Operator deploys a cron job to roll over indices for each mapping using the defined policy, scheduled using the `pollInterval`. -+ -[source,terminal] ----- -$ oc get cronjob ----- -+ -.Example output -[source,terminal] ----- -NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE -elasticsearch-im-app */15 * * * * False 0 4s -elasticsearch-im-audit */15 * * * * False 0 4s -elasticsearch-im-infra */15 * * * * False 0 4s ----- diff --git a/modules/cluster-logging-elasticsearch-rules.adoc b/modules/cluster-logging-elasticsearch-rules.adoc deleted file mode 100644 index ead76991f3e4..000000000000 --- a/modules/cluster-logging-elasticsearch-rules.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging_alerts/default-logging-alerts.adoc - -:_mod-docs-content-type: REFERENCE -[id="cluster-logging-elasticsearch-rules_{context}"] -= Elasticsearch alerting rules - -You can view these alerting rules in the {product-title} web console. - -.Alerting rules -[cols="3,6,1",options="header"] -|=== -|Alert -|Description -|Severity - -|`ElasticsearchClusterNotHealthy` -|The cluster health status has been RED for at least 2 minutes. The cluster does not accept writes, shards may be missing, or the master - node has not been elected yet. -|Critical - -|`ElasticsearchClusterNotHealthy` -|The cluster health status has been YELLOW for at least 20 minutes. Some shard replicas are not allocated. -|Warning - -|`ElasticsearchDiskSpaceRunningLow` -|The cluster is expected to be out of disk space within the next 6 hours. -|Critical - -|`ElasticsearchHighFileDescriptorUsage` -|The cluster is predicted to be out of file descriptors within the next hour. -|Warning - -|`ElasticsearchJVMHeapUseHigh` -|The JVM Heap usage on the specified node is high. -|Alert - -|`ElasticsearchNodeDiskWatermarkReached` -|The specified node has hit the low watermark due to low free disk space. Shards can not be allocated to this node anymore. You should consider adding more disk space to the node. -|Info - -|`ElasticsearchNodeDiskWatermarkReached` -|The specified node has hit the high watermark due to low free disk space. Some shards will be re-allocated to different -nodes if possible. Make sure more disk space is added to the node or drop old indices allocated to this node. -|Warning - -|`ElasticsearchNodeDiskWatermarkReached` -|The specified node has hit the flood watermark due to low free disk space. Every index that has a shard allocated on this node is enforced a read-only block. The index block must be manually released when the disk use falls below the high watermark. -|Critical - -|`ElasticsearchJVMHeapUseHigh` -|The JVM Heap usage on the specified node is too high. -|Alert - -|`ElasticsearchWriteRequestsRejectionJumps` -|Elasticsearch is experiencing an increase in write rejections on the specified node. This node might not be keeping up with the indexing speed. -|Warning - -|`AggregatedLoggingSystemCPUHigh` -|The CPU used by the system on the specified node is too high. -|Alert - -|`ElasticsearchProcessCPUHigh` -|The CPU used by Elasticsearch on the specified node is too high. -|Alert -|=== diff --git a/modules/cluster-logging-elasticsearch-scaledown.adoc b/modules/cluster-logging-elasticsearch-scaledown.adoc deleted file mode 100644 index 8d6326827bd1..000000000000 --- a/modules/cluster-logging-elasticsearch-scaledown.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-log-store.adoc - -[id="cluster-logging-elasticsearch-scaledown_{context}"] -= Scaling down Elasticsearch pods - -Reducing the number of Elasticsearch pods in your cluster can result in data loss or Elasticsearch performance degradation. - -If you scale down, you should scale down by one pod at a time and allow the cluster to re-balance the shards and replicas. After the Elasticsearch health status returns to `green`, you can scale down by another pod. - -[NOTE] -==== -If your Elasticsearch cluster is set to `ZeroRedundancy`, you should not scale down your Elasticsearch pods. -==== diff --git a/modules/cluster-logging-elasticsearch-storage.adoc b/modules/cluster-logging-elasticsearch-storage.adoc deleted file mode 100644 index 03d8949fade0..000000000000 --- a/modules/cluster-logging-elasticsearch-storage.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-elasticsearch.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-elasticsearch-storage_{context}"] -= Configuring persistent storage for the log store - -Elasticsearch requires persistent storage. The faster the storage, the faster the Elasticsearch performance. - -[WARNING] -==== -Using NFS storage as a volume or a persistent volume (or via NAS such as -Gluster) is not supported for Elasticsearch storage, as Lucene relies on file -system behavior that NFS does not supply. Data corruption and other problems can -occur. -==== - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Edit the `ClusterLogging` CR to specify that each data node in the cluster is bound to a Persistent Volume Claim. -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" -# ... -spec: - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - storage: - storageClassName: "gp2" - size: "200G" ----- - -This example specifies each data node in the cluster is bound to a Persistent Volume Claim that requests "200G" of AWS General Purpose SSD (gp2) storage. - -[NOTE] -==== -If you use a local volume for persistent storage, do not use a raw block volume, which is described with `volumeMode: block` in the `LocalVolume` object. Elasticsearch cannot use raw block volumes. -==== diff --git a/modules/cluster-logging-eventrouter-deploy.adoc b/modules/cluster-logging-eventrouter-deploy.adoc deleted file mode 100644 index 0857649c0a2f..000000000000 --- a/modules/cluster-logging-eventrouter-deploy.adoc +++ /dev/null @@ -1,203 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/cluster-logging-eventrouter.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-eventrouter-deploy_{context}"] -= Deploying and configuring the Event Router - -Use the following steps to deploy the Event Router into your cluster. You should always deploy the Event Router to the `openshift-logging` project to ensure it collects events from across the cluster. - -[NOTE] -==== -The Event Router image is not a part of the {clo} and must be downloaded separately. -==== - -The following `Template` object creates the service account, cluster role, and cluster role binding required for the Event Router. The template also configures and deploys the Event Router pod. You can either use this template without making changes or edit the template to change the deployment object CPU and memory requests. - -.Prerequisites - -* You need proper permissions to create service accounts and update cluster role bindings. For example, you can run the following template with a user that has the *cluster-admin* role. - -* The {clo} must be installed. - -.Procedure - -. Create a template for the Event Router: -+ -[source,yaml] ----- -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - name: eventrouter-template - annotations: - description: "A pod forwarding kubernetes events to OpenShift Logging stack." - tags: "events,EFK,logging,cluster-logging" -objects: - - kind: ServiceAccount <1> - apiVersion: v1 - metadata: - name: eventrouter - namespace: ${NAMESPACE} - - kind: ClusterRole <2> - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: event-reader - rules: - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "watch", "list"] - - kind: ClusterRoleBinding <3> - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: event-reader-binding - subjects: - - kind: ServiceAccount - name: eventrouter - namespace: ${NAMESPACE} - roleRef: - kind: ClusterRole - name: event-reader - - kind: ConfigMap <4> - apiVersion: v1 - metadata: - name: eventrouter - namespace: ${NAMESPACE} - data: - config.json: |- - { - "sink": "stdout" - } - - kind: Deployment <5> - apiVersion: apps/v1 - metadata: - name: eventrouter - namespace: ${NAMESPACE} - labels: - component: "eventrouter" - logging-infra: "eventrouter" - provider: "openshift" - spec: - selector: - matchLabels: - component: "eventrouter" - logging-infra: "eventrouter" - provider: "openshift" - replicas: 1 - template: - metadata: - labels: - component: "eventrouter" - logging-infra: "eventrouter" - provider: "openshift" - name: eventrouter - spec: - serviceAccount: eventrouter - containers: - - name: kube-eventrouter - image: ${IMAGE} - imagePullPolicy: IfNotPresent - resources: - requests: - cpu: ${CPU} - memory: ${MEMORY} - volumeMounts: - - name: config-volume - mountPath: /etc/eventrouter - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - volumes: - - name: config-volume - configMap: - name: eventrouter -parameters: - - name: IMAGE <6> - displayName: Image - value: "registry.redhat.io/openshift-logging/eventrouter-rhel9:v0.4" - - name: CPU <7> - displayName: CPU - value: "100m" - - name: MEMORY <8> - displayName: Memory - value: "128Mi" - - name: NAMESPACE - displayName: Namespace - value: "openshift-logging" <9> ----- -<1> Creates a Service Account in the `openshift-logging` project for the Event Router. -<2> Creates a ClusterRole to monitor for events in the cluster. -<3> Creates a ClusterRoleBinding to bind the ClusterRole to the service account. -<4> Creates a config map in the `openshift-logging` project to generate the required `config.json` file. -<5> Creates a deployment in the `openshift-logging` project to generate and configure the Event Router pod. -<6> Specifies the image, identified by a tag such as `v0.4`. -<7> Specifies the minimum amount of CPU to allocate to the Event Router pod. Defaults to `100m`. -<8> Specifies the minimum amount of memory to allocate to the Event Router pod. Defaults to `128Mi`. -<9> Specifies the `openshift-logging` project to install objects in. - -. Use the following command to process and apply the template: -+ -[source,terminal] ----- -$ oc process -f | oc apply -n openshift-logging -f - ----- -+ -For example: -+ -[source,terminal] ----- -$ oc process -f eventrouter.yaml | oc apply -n openshift-logging -f - ----- -+ -.Example output -[source,terminal] ----- -serviceaccount/eventrouter created -clusterrole.rbac.authorization.k8s.io/event-reader created -clusterrolebinding.rbac.authorization.k8s.io/event-reader-binding created -configmap/eventrouter created -deployment.apps/eventrouter created ----- - -. Validate that the Event Router installed in the `openshift-logging` project: -+ -.. View the new Event Router pod: -+ -[source,terminal] ----- -$ oc get pods --selector component=eventrouter -o name -n openshift-logging ----- -+ -.Example output -[source,terminal] ----- -pod/cluster-logging-eventrouter-d649f97c8-qvv8r ----- - -.. View the events collected by the Event Router: -+ -[source,terminal] ----- -$ oc logs -n openshift-logging ----- -+ -For example: -+ -[source,terminal] ----- -$ oc logs cluster-logging-eventrouter-d649f97c8-qvv8r -n openshift-logging ----- -+ -.Example output -[source,terminal] ----- -{"verb":"ADDED","event":{"metadata":{"name":"openshift-service-catalog-controller-manager-remover.1632d931e88fcd8f","namespace":"openshift-service-catalog-removed","selfLink":"/api/v1/namespaces/openshift-service-catalog-removed/events/openshift-service-catalog-controller-manager-remover.1632d931e88fcd8f","uid":"787d7b26-3d2f-4017-b0b0-420db4ae62c0","resourceVersion":"21399","creationTimestamp":"2020-09-08T15:40:26Z"},"involvedObject":{"kind":"Job","namespace":"openshift-service-catalog-removed","name":"openshift-service-catalog-controller-manager-remover","uid":"fac9f479-4ad5-4a57-8adc-cb25d3d9cf8f","apiVersion":"batch/v1","resourceVersion":"21280"},"reason":"Completed","message":"Job completed","source":{"component":"job-controller"},"firstTimestamp":"2020-09-08T15:40:26Z","lastTimestamp":"2020-09-08T15:40:26Z","count":1,"type":"Normal"}} ----- -+ -You can also use Kibana to view events by creating an index pattern using the Elasticsearch `infra` index. diff --git a/modules/cluster-logging-forwarding-json-logs-to-the-default-elasticsearch.adoc b/modules/cluster-logging-forwarding-json-logs-to-the-default-elasticsearch.adoc deleted file mode 100644 index 5eb3143f876b..000000000000 --- a/modules/cluster-logging-forwarding-json-logs-to-the-default-elasticsearch.adoc +++ /dev/null @@ -1,56 +0,0 @@ -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-forwarding-json-logs-to-the-default-elasticsearch_{context}"] -= Forwarding JSON logs to the Elasticsearch log store - -For an Elasticsearch log store, if your JSON log entries _follow different schemas_, configure the `ClusterLogForwarder` custom resource (CR) to group each JSON schema into a single output definition. This way, Elasticsearch uses a separate index for each schema. - -[IMPORTANT] -==== -Because forwarding different schemas to the same index can cause type conflicts and cardinality problems, you must perform this configuration before you forward data to the Elasticsearch store. - -To avoid performance issues associated with having too many indices, consider keeping the number of possible schemas low by standardizing to common schemas. -==== - -.Procedure - -. Add the following snippet to your `ClusterLogForwarder` CR YAML file. -+ -[source,yaml] ----- -outputDefaults: - elasticsearch: - structuredTypeKey: - structuredTypeName: -pipelines: -- inputRefs: - - application - outputRefs: default - parse: json ----- - -. Use `structuredTypeKey` field to specify one of the log record fields. - -. Use `structuredTypeName` field to specify a name. -+ -[IMPORTANT] -==== -To parse JSON logs, you must set both the `structuredTypeKey` and `structuredTypeName` fields. -==== - -. For `inputRefs`, specify which log types to forward by using that pipeline, such as `application,` `infrastructure`, or `audit`. - -. Add the `parse: json` element to pipelines. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -The Red Hat OpenShift Logging Operator redeploys the collector pods. However, if they do not redeploy, delete the collector pods to force them to redeploy. -+ -[source,terminal] ----- -$ oc delete pod --selector logging-infra=collector ----- diff --git a/modules/cluster-logging-forwarding-separate-indices.adoc b/modules/cluster-logging-forwarding-separate-indices.adoc deleted file mode 100644 index 240671f7cdf9..000000000000 --- a/modules/cluster-logging-forwarding-separate-indices.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module is included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/log-forwarding - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-forwarding-separate-indices_{context}"] -= Forwarding JSON logs from containers in the same pod to separate indices - -You can forward structured logs from different containers within the same pod to different indices. To use this feature, you must configure the pipeline with multi-container support and annotate the pods. Logs are written to indices with a prefix of `app-`. It is recommended that Elasticsearch be configured with aliases to accommodate this. - -[IMPORTANT] -==== -JSON formatting of logs varies by application. Because creating too many indices impacts performance, limit your use of this feature to creating indices for logs that have incompatible JSON formats. Use queries to separate logs from different namespaces, or applications with compatible JSON formats. -==== - -.Prerequisites - -* {logging-title-uc}: 5.5 - -.Procedure -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - outputDefaults: - elasticsearch: - structuredTypeKey: kubernetes.labels.logFormat <1> - structuredTypeName: nologformat - enableStructuredContainerLogs: true <2> - pipelines: - - inputRefs: - - application - name: application-logs - outputRefs: - - default - parse: json ----- -<1> Uses the value of the key-value pair that is formed by the Kubernetes `logFormat` label. -<2> Enables multi-container outputs. - -. Create or edit a YAML file that defines the `Pod` CR object: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - annotations: - containerType.logging.openshift.io/heavy: heavy <1> - containerType.logging.openshift.io/low: low -spec: - containers: - - name: heavy <2> - image: heavyimage - - name: low - image: lowimage ----- -<1> Format: `containerType.logging.openshift.io/: ` -<2> Annotation names must match container names - -[WARNING] -==== -This configuration might significantly increase the number of shards on the cluster. -==== - -.Additional resources -* link:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/[Kubernetes Annotations] diff --git a/modules/cluster-logging-json-log-forwarding.adoc b/modules/cluster-logging-json-log-forwarding.adoc deleted file mode 100644 index b078b6fc1734..000000000000 --- a/modules/cluster-logging-json-log-forwarding.adoc +++ /dev/null @@ -1,37 +0,0 @@ -[id="cluster-logging-json-log-forwarding_{context}"] -= Parsing JSON logs - -You can use a `ClusterLogForwarder` object to parse JSON logs into a structured object and forward them to a supported output. - -To illustrate how this works, suppose that you have the following structured JSON log entry: - -.Example structured JSON log entry -[source,yaml] ----- -{"level":"info","name":"fred","home":"bedrock"} ----- - -To enable parsing JSON log, you add `parse: json` to a pipeline in the `ClusterLogForwarder` CR, as shown in the following example: - -.Example snippet showing `parse: json` -[source,yaml] ----- -pipelines: -- inputRefs: [ application ] - outputRefs: myFluentd - parse: json ----- - -When you enable parsing JSON logs by using `parse: json`, the CR copies the JSON-structured log entry in a `structured` field, as shown in the following example: - -.Example `structured` output containing the structured JSON log entry -[source,yaml] ----- -{"structured": { "level": "info", "name": "fred", "home": "bedrock" }, - "more fields..."} ----- - -[IMPORTANT] -==== -If the log entry does not contain valid structured JSON, the `structured` field is absent. -==== diff --git a/modules/cluster-logging-kibana-scaling.adoc b/modules/cluster-logging-kibana-scaling.adoc deleted file mode 100644 index a6aa97f0ae3d..000000000000 --- a/modules/cluster-logging-kibana-scaling.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-visualizer.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-kibana-scaling_{context}"] -= Scaling redundancy for the log visualizer nodes - -You can scale the pod that hosts the log visualizer for redundancy. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc -n openshift-logging edit ClusterLogging instance ----- -+ -[source,yaml] ----- -$ oc edit ClusterLogging instance - -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: openshift-logging -.... - -spec: - visualization: - type: "kibana" - kibana: - replicas: 1 <1> ----- -<1> Specify the number of Kibana nodes. - diff --git a/modules/cluster-logging-log-store-status-comp.adoc b/modules/cluster-logging-log-store-status-comp.adoc deleted file mode 100644 index 448f0565cb74..000000000000 --- a/modules/cluster-logging-log-store-status-comp.adoc +++ /dev/null @@ -1,203 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-elasticsearch.adoc - -[id="cluster-logging-elasticsearch-status-comp_{context}"] -= Viewing the status of the log store components - -You can view the status for a number of the log store components. - -Elasticsearch indices:: -You can view the status of the Elasticsearch indices. - -. Get the name of an Elasticsearch pod: -+ -[source,terminal] ----- -$ oc get pods --selector component=elasticsearch -o name ----- -+ -.Example output -[source,terminal] ----- -pod/elasticsearch-cdm-1godmszn-1-6f8495-vp4lw -pod/elasticsearch-cdm-1godmszn-2-5769cf-9ms2n -pod/elasticsearch-cdm-1godmszn-3-f66f7d-zqkz7 ----- - -. Get the status of the indices: -+ -[source,terminal] ----- -$ oc exec elasticsearch-cdm-4vjor49p-2-6d4d7db474-q2w7z -- indices ----- -+ -.Example output -[source,terminal] ----- -Defaulting container name to elasticsearch. -Use 'oc describe pod/elasticsearch-cdm-4vjor49p-2-6d4d7db474-q2w7z -n openshift-logging' to see all of the containers in this pod. - -green open infra-000002 S4QANnf1QP6NgCegfnrnbQ 3 1 119926 0 157 78 -green open audit-000001 8_EQx77iQCSTzFOXtxRqFw 3 1 0 0 0 0 -green open .security iDjscH7aSUGhIdq0LheLBQ 1 1 5 0 0 0 -green open .kibana_-377444158_kubeadmin yBywZ9GfSrKebz5gWBZbjw 3 1 1 0 0 0 -green open infra-000001 z6Dpe__ORgiopEpW6Yl44A 3 1 871000 0 874 436 -green open app-000001 hIrazQCeSISewG3c2VIvsQ 3 1 2453 0 3 1 -green open .kibana_1 JCitcBMSQxKOvIq6iQW6wg 1 1 0 0 0 0 -green open .kibana_-1595131456_user1 gIYFIEGRRe-ka0W3okS-mQ 3 1 1 0 0 0 ----- - - -Log store pods:: -You can view the status of the pods that host the log store. - -. Get the name of a pod: -+ -[source,terminal] ----- -$ oc get pods --selector component=elasticsearch -o name ----- -+ -.Example output -[source,terminal] ----- -pod/elasticsearch-cdm-1godmszn-1-6f8495-vp4lw -pod/elasticsearch-cdm-1godmszn-2-5769cf-9ms2n -pod/elasticsearch-cdm-1godmszn-3-f66f7d-zqkz7 ----- - -. Get the status of a pod: -+ -[source,terminal] ----- -$ oc describe pod elasticsearch-cdm-1godmszn-1-6f8495-vp4lw ----- -+ -The output includes the following status information: -+ -.Example output -[source,terminal] ----- -.... -Status: Running - -.... - -Containers: - elasticsearch: - Container ID: cri-o://b7d44e0a9ea486e27f47763f5bb4c39dfd2 - State: Running - Started: Mon, 08 Jun 2020 10:17:56 -0400 - Ready: True - Restart Count: 0 - Readiness: exec [/usr/share/elasticsearch/probe/readiness.sh] delay=10s timeout=30s period=5s #success=1 #failure=3 - -.... - - proxy: - Container ID: cri-o://3f77032abaddbb1652c116278652908dc01860320b8a4e741d06894b2f8f9aa1 - State: Running - Started: Mon, 08 Jun 2020 10:18:38 -0400 - Ready: True - Restart Count: 0 - -.... - -Conditions: - Type Status - Initialized True - Ready True - ContainersReady True - PodScheduled True - -.... - -Events: ----- - -Log storage pod deployment configuration:: -You can view the status of the log store deployment configuration. - -. Get the name of a deployment configuration: -+ -[source,terminal] ----- -$ oc get deployment --selector component=elasticsearch -o name ----- -+ -.Example output -[source,terminal] ----- -deployment.extensions/elasticsearch-cdm-1gon-1 -deployment.extensions/elasticsearch-cdm-1gon-2 -deployment.extensions/elasticsearch-cdm-1gon-3 ----- - -. Get the deployment configuration status: -+ -[source,terminal] ----- -$ oc describe deployment elasticsearch-cdm-1gon-1 ----- -+ -The output includes the following status information: -+ -.Example output -[source,terminal] ----- -.... - Containers: - elasticsearch: - Image: registry.redhat.io/openshift-logging/elasticsearch6-rhel8 - Readiness: exec [/usr/share/elasticsearch/probe/readiness.sh] delay=10s timeout=30s period=5s #success=1 #failure=3 - -.... - -Conditions: - Type Status Reason - ---- ------ ------ - Progressing Unknown DeploymentPaused - Available True MinimumReplicasAvailable - -.... - -Events: ----- - -Log store replica set:: -You can view the status of the log store replica set. - -. Get the name of a replica set: -+ -[source,terminal] ----- -$ oc get replicaSet --selector component=elasticsearch -o name - -replicaset.extensions/elasticsearch-cdm-1gon-1-6f8495 -replicaset.extensions/elasticsearch-cdm-1gon-2-5769cf -replicaset.extensions/elasticsearch-cdm-1gon-3-f66f7d ----- - -. Get the status of the replica set: -+ -[source,terminal] ----- -$ oc describe replicaSet elasticsearch-cdm-1gon-1-6f8495 ----- -+ -The output includes the following status information: -+ -.Example output -[source,terminal] ----- -.... - Containers: - elasticsearch: - Image: registry.redhat.io/openshift-logging/elasticsearch6-rhel8@sha256:4265742c7cdd85359140e2d7d703e4311b6497eec7676957f455d6908e7b1c25 - Readiness: exec [/usr/share/elasticsearch/probe/readiness.sh] delay=10s timeout=30s period=5s #success=1 #failure=3 - -.... - -Events: ----- diff --git a/modules/cluster-logging-log-store-status-viewing.adoc b/modules/cluster-logging-log-store-status-viewing.adoc deleted file mode 100644 index 6e26cadd0314..000000000000 --- a/modules/cluster-logging-log-store-status-viewing.adoc +++ /dev/null @@ -1,245 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-log-store.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-log-store-comp-viewing_{context}"] -= Viewing the status of the Elasticsearch log store - -You can view the status of the Elasticsearch log store. - -.Prerequisites - -* The {clo} and {es-op} are installed. - -.Procedure - -. Change to the `openshift-logging` project by running the following command: -+ -[source,terminal] ----- -$ oc project openshift-logging ----- - -. To view the status: - -.. Get the name of the Elasticsearch log store instance by running the following command: -+ -[source,terminal] ----- -$ oc get Elasticsearch ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -elasticsearch 5h9m ----- - -.. Get the Elasticsearch log store status by running the following command: -+ -[source,terminal] ----- -$ oc get Elasticsearch -o yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc get Elasticsearch elasticsearch -n openshift-logging -o yaml ----- -+ -The output includes information similar to the following: -+ -.Example output -[source,yaml] ----- -status: <1> - cluster: <2> - activePrimaryShards: 30 - activeShards: 60 - initializingShards: 0 - numDataNodes: 3 - numNodes: 3 - pendingTasks: 0 - relocatingShards: 0 - status: green - unassignedShards: 0 - clusterHealth: "" - conditions: [] <3> - nodes: <4> - - deploymentName: elasticsearch-cdm-zjf34ved-1 - upgradeStatus: {} - - deploymentName: elasticsearch-cdm-zjf34ved-2 - upgradeStatus: {} - - deploymentName: elasticsearch-cdm-zjf34ved-3 - upgradeStatus: {} - pods: <5> - client: - failed: [] - notReady: [] - ready: - - elasticsearch-cdm-zjf34ved-1-6d7fbf844f-sn422 - - elasticsearch-cdm-zjf34ved-2-dfbd988bc-qkzjz - - elasticsearch-cdm-zjf34ved-3-c8f566f7c-t7zkt - data: - failed: [] - notReady: [] - ready: - - elasticsearch-cdm-zjf34ved-1-6d7fbf844f-sn422 - - elasticsearch-cdm-zjf34ved-2-dfbd988bc-qkzjz - - elasticsearch-cdm-zjf34ved-3-c8f566f7c-t7zkt - master: - failed: [] - notReady: [] - ready: - - elasticsearch-cdm-zjf34ved-1-6d7fbf844f-sn422 - - elasticsearch-cdm-zjf34ved-2-dfbd988bc-qkzjz - - elasticsearch-cdm-zjf34ved-3-c8f566f7c-t7zkt - shardAllocationEnabled: all ----- -<1> In the output, the cluster status fields appear in the `status` stanza. -<2> The status of the Elasticsearch log store: -+ -* The number of active primary shards. -* The number of active shards. -* The number of shards that are initializing. -* The number of Elasticsearch log store data nodes. -* The total number of Elasticsearch log store nodes. -* The number of pending tasks. -* The Elasticsearch log store status: `green`, `red`, `yellow`. -* The number of unassigned shards. -<3> Any status conditions, if present. The Elasticsearch log store status indicates the reasons from the scheduler if a pod could not be placed. Any events related to the following conditions are shown: -* Container Waiting for both the Elasticsearch log store and proxy containers. -* Container Terminated for both the Elasticsearch log store and proxy containers. -* Pod unschedulable. -Also, a condition is shown for a number of issues; see *Example condition messages*. -<4> The Elasticsearch log store nodes in the cluster, with `upgradeStatus`. -<5> The Elasticsearch log store client, data, and master pods in the cluster, listed under `failed`, `notReady`, or `ready` state. - -[id="cluster-logging-elasticsearch-status-message_{context}"] -== Example condition messages - -The following are examples of some condition messages from the `Status` section of the Elasticsearch instance. - -The following status message indicates that a node has exceeded the configured low watermark, and no shard will be allocated to this node. - -[source,yaml] ----- -status: - nodes: - - conditions: - - lastTransitionTime: 2019-03-15T15:57:22Z - message: Disk storage usage for node is 27.5gb (36.74%). Shards will be not - be allocated on this node. - reason: Disk Watermark Low - status: "True" - type: NodeStorage - deploymentName: example-elasticsearch-cdm-0-1 - upgradeStatus: {} ----- - -The following status message indicates that a node has exceeded the configured high watermark, and shards will be relocated to other nodes. - -[source,yaml] ----- -status: - nodes: - - conditions: - - lastTransitionTime: 2019-03-15T16:04:45Z - message: Disk storage usage for node is 27.5gb (36.74%). Shards will be relocated - from this node. - reason: Disk Watermark High - status: "True" - type: NodeStorage - deploymentName: example-elasticsearch-cdm-0-1 - upgradeStatus: {} ----- - -The following status message indicates that the Elasticsearch log store node selector in the custom resource (CR) does not match any nodes in the cluster: - -[source,yaml] ----- -status: - nodes: - - conditions: - - lastTransitionTime: 2019-04-10T02:26:24Z - message: '0/8 nodes are available: 8 node(s) didn''t match node selector.' - reason: Unschedulable - status: "True" - type: Unschedulable ----- - -The following status message indicates that the Elasticsearch log store CR uses a non-existent persistent volume claim (PVC). - -[source,yaml] ----- -status: - nodes: - - conditions: - - last Transition Time: 2019-04-10T05:55:51Z - message: pod has unbound immediate PersistentVolumeClaims (repeated 5 times) - reason: Unschedulable - status: True - type: Unschedulable ----- - -The following status message indicates that your Elasticsearch log store cluster does not have enough nodes to support the redundancy policy. - -[source,yaml] ----- -status: - clusterHealth: "" - conditions: - - lastTransitionTime: 2019-04-17T20:01:31Z - message: Wrong RedundancyPolicy selected. Choose different RedundancyPolicy or - add more nodes with data roles - reason: Invalid Settings - status: "True" - type: InvalidRedundancy ----- - -This status message indicates your cluster has too many control plane nodes: - -[source,yaml] ----- -status: - clusterHealth: green - conditions: - - lastTransitionTime: '2019-04-17T20:12:34Z' - message: >- - Invalid master nodes count. Please ensure there are no more than 3 total - nodes with master roles - reason: Invalid Settings - status: 'True' - type: InvalidMasters ----- - - -The following status message indicates that Elasticsearch storage does not support the change you tried to make. - -For example: -[source,yaml] ----- -status: - clusterHealth: green - conditions: - - lastTransitionTime: "2021-05-07T01:05:13Z" - message: Changing the storage structure for a custom resource is not supported - reason: StorageStructureChangeIgnored - status: 'True' - type: StorageStructureChangeIgnored ----- - -The `reason` and `type` fields specify the type of unsupported change: - -`StorageClassNameChangeIgnored`:: Unsupported change to the storage class name. -`StorageSizeChangeIgnored`:: Unsupported change the storage size. -`StorageStructureChangeIgnored`:: Unsupported change between ephemeral and persistent storage structures. -+ -[IMPORTANT] -==== -If you try to configure the `ClusterLogging` CR to switch from ephemeral to persistent storage, the {es-op} creates a persistent volume claim (PVC) but does not create a persistent volume (PV). To clear the `StorageStructureChangeIgnored` status, you must revert the change to the `ClusterLogging` CR and delete the PVC. -==== diff --git a/modules/cluster-logging-logstore-limits.adoc b/modules/cluster-logging-logstore-limits.adoc deleted file mode 100644 index 70d71aadbfde..000000000000 --- a/modules/cluster-logging-logstore-limits.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-elasticsearch.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-logstore-limits_{context}"] -= Configuring CPU and memory requests for the log store - -Each component specification allows for adjustments to both the CPU and memory requests. -You should not have to manually adjust these values as the OpenShift Elasticsearch -Operator sets values sufficient for your environment. - -[NOTE] -==== -In large-scale clusters, the default memory limit for the Elasticsearch proxy container might not be sufficient, causing the proxy container to be OOMKilled. If you experience this issue, increase the memory requests and limits for the Elasticsearch proxy. -==== - -Each Elasticsearch node can operate with a lower memory setting though this is *not* recommended for production deployments. -For production use, you should have no less than the default 16Gi allocated to each pod. Preferably you should allocate as much as possible, up to 64Gi per pod. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc edit ClusterLogging instance ----- -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" -.... -spec: - logStore: - type: "elasticsearch" - elasticsearch:<1> - resources: - limits: <2> - memory: "32Gi" - requests: <3> - cpu: "1" - memory: "16Gi" - proxy: <4> - resources: - limits: - memory: 100Mi - requests: - memory: 100Mi ----- -<1> Specify the CPU and memory requests for Elasticsearch as needed. If you leave these values blank, -the OpenShift Elasticsearch Operator sets default values that should be sufficient for most deployments. The default values are `16Gi` for the memory request and `1` for the CPU request. -<2> The maximum amount of resources a pod can use. -<3> The minimum resources required to schedule a pod. -<4> Specify the CPU and memory requests for the Elasticsearch proxy as needed. If you leave these values blank, the OpenShift Elasticsearch Operator sets default values that are sufficient for most deployments. The default values are `256Mi` for the memory request and `100m` for the CPU request. - -When adjusting the amount of Elasticsearch memory, the same value should be used for both `requests` and `limits`. - -For example: - -[source,yaml] ----- - resources: - limits: <1> - memory: "32Gi" - requests: <2> - cpu: "8" - memory: "32Gi" ----- -<1> The maximum amount of the resource. -<2> The minimum amount required. - -Kubernetes generally adheres the node configuration and does not allow Elasticsearch to use the specified limits. -Setting the same value for the `requests` and `limits` ensures that Elasticsearch can use the memory you want, assuming the node has the memory available. diff --git a/modules/cluster-logging-maintenance-support-list-6x.adoc b/modules/cluster-logging-maintenance-support-list-6x.adoc deleted file mode 100644 index 3e35f5affa22..000000000000 --- a/modules/cluster-logging-maintenance-support-list-6x.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.0/log60-cluster-logging-support.adoc -// * observability/logging/logging-6.1/log61-cluster-logging-support.adoc -// * observability/logging/logging-6.2/log62-cluster-logging-support.adoc - -:_mod-docs-content-type: REFERENCE -[id="cluster-logging-maintenance-support-list_{context}"] -= Unsupported configurations - -You must set the Red{nbsp}Hat OpenShift Logging Operator to the `Unmanaged` state to modify the following components: - -* The collector configuration file - -* The collector daemonset - -Explicitly unsupported cases include: - -* *Configuring the logging collector using environment variables*. You cannot use environment variables to modify the log collector. - -* *Configuring how the log collector normalizes logs*. You cannot modify default log normalization. diff --git a/modules/cluster-logging-maintenance-support-list.adoc b/modules/cluster-logging-maintenance-support-list.adoc deleted file mode 100644 index 9754da52d593..000000000000 --- a/modules/cluster-logging-maintenance-support-list.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-support.adoc - -[id="cluster-logging-maintenance-support-list_{context}"] -= Unsupported configurations - -You must set the Red{nbsp}Hat OpenShift Logging Operator to the `Unmanaged` state to modify the following components: - -* The `fluent.conf` file - -* The Fluentd daemon set - -* The `vector.toml` file for Vector collector deployments - - -Explicitly unsupported cases include: - -* *Configuring the collected log location*. You cannot change the location of the log collector output file, which by default is `/var/log/fluentd/fluentd.log`. - -* *Throttling log collection*. You cannot throttle down the rate at which the logs are read in by the log collector. - -* *Configuring the logging collector using environment variables*. You cannot use environment variables to modify the log collector. - -* *Configuring how the log collector normalizes logs*. You cannot modify default log normalization. diff --git a/modules/cluster-logging-manual-rollout-rolling.adoc b/modules/cluster-logging-manual-rollout-rolling.adoc deleted file mode 100644 index 96dc5bcded41..000000000000 --- a/modules/cluster-logging-manual-rollout-rolling.adoc +++ /dev/null @@ -1,207 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_storage/logging-config-es-store.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-manual-rollout-rolling_{context}"] -= Performing an Elasticsearch rolling cluster restart - -Perform a rolling restart when you change the `elasticsearch` config map or any of the `elasticsearch-*` deployment configurations. - -Also, a rolling restart is recommended if the nodes on which an Elasticsearch pod runs requires a reboot. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -To perform a rolling cluster restart: - -. Change to the `openshift-logging` project: -+ ----- -$ oc project openshift-logging ----- - -. Get the names of the Elasticsearch pods: -+ ----- -$ oc get pods -l component=elasticsearch ----- - -. Scale down the collector pods so they stop sending new logs to Elasticsearch: -+ -[source,terminal] ----- -$ oc -n openshift-logging patch daemonset/collector -p '{"spec":{"template":{"spec":{"nodeSelector":{"logging-infra-collector": "false"}}}}}' ----- - -. Perform a shard synced flush using the {product-title} link:https://github.com/openshift/origin-aggregated-logging/tree/master/elasticsearch#es_util[*es_util*] tool to ensure there are no pending operations waiting to be written to disk prior to shutting down: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch -- es_util --query="_flush/synced" -XPOST ----- -+ -For example: -+ ----- -$ oc exec -c elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_util --query="_flush/synced" -XPOST ----- -+ -.Example output -+ ----- -{"_shards":{"total":4,"successful":4,"failed":0},".security":{"total":2,"successful":2,"failed":0},".kibana_1":{"total":2,"successful":2,"failed":0}} ----- - -. Prevent shard balancing when purposely bringing down nodes using the {product-title} es_util tool: -+ ----- -$ oc exec -c elasticsearch -- es_util --query="_cluster/settings" -XPUT -d '{ "persistent": { "cluster.routing.allocation.enable" : "primaries" } }' ----- -+ -For example: -+ ----- -$ oc exec elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_util --query="_cluster/settings" -XPUT -d '{ "persistent": { "cluster.routing.allocation.enable" : "primaries" } }' ----- -+ -.Example output -[source,terminal] ----- -{"acknowledged":true,"persistent":{"cluster":{"routing":{"allocation":{"enable":"primaries"}}}},"transient": ----- - -. After the command is complete, for each deployment you have for an ES cluster: - -.. By default, the {product-title} Elasticsearch cluster blocks rollouts to their nodes. Use the following command to allow rollouts -and allow the pod to pick up the changes: -+ ----- -$ oc rollout resume deployment/ ----- -+ -For example: -+ ----- -$ oc rollout resume deployment/elasticsearch-cdm-0-1 ----- -+ -.Example output -+ ----- -deployment.extensions/elasticsearch-cdm-0-1 resumed ----- -+ -A new pod is deployed. After the pod has a ready container, you can -move on to the next deployment. -+ ----- -$ oc get pods -l component=elasticsearch- ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6k 2/2 Running 0 22h -elasticsearch-cdm-5ceex6ts-2-f799564cb-l9mj7 2/2 Running 0 22h -elasticsearch-cdm-5ceex6ts-3-585968dc68-k7kjr 2/2 Running 0 22h ----- - -.. After the deployments are complete, reset the pod to disallow rollouts: -+ ----- -$ oc rollout pause deployment/ ----- -+ -For example: -+ ----- -$ oc rollout pause deployment/elasticsearch-cdm-0-1 ----- -+ -.Example output -+ ----- -deployment.extensions/elasticsearch-cdm-0-1 paused ----- -+ -.. Check that the Elasticsearch cluster is in a `green` or `yellow` state: -+ ----- -$ oc exec -c elasticsearch -- es_util --query=_cluster/health?pretty=true ----- -+ -[NOTE] -==== -If you performed a rollout on the Elasticsearch pod you used in the previous commands, the pod no longer exists and you need a new pod name here. -==== -+ -For example: -+ ----- -$ oc exec elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_util --query=_cluster/health?pretty=true ----- -+ ----- -{ - "cluster_name" : "elasticsearch", - "status" : "yellow", <1> - "timed_out" : false, - "number_of_nodes" : 3, - "number_of_data_nodes" : 3, - "active_primary_shards" : 8, - "active_shards" : 16, - "relocating_shards" : 0, - "initializing_shards" : 0, - "unassigned_shards" : 1, - "delayed_unassigned_shards" : 0, - "number_of_pending_tasks" : 0, - "number_of_in_flight_fetch" : 0, - "task_max_waiting_in_queue_millis" : 0, - "active_shards_percent_as_number" : 100.0 -} ----- -<1> Make sure this parameter value is `green` or `yellow` before proceeding. - -. If you changed the Elasticsearch configuration map, repeat these steps for each Elasticsearch pod. - -. After all the deployments for the cluster have been rolled out, re-enable shard balancing: -+ ----- -$ oc exec -c elasticsearch -- es_util --query="_cluster/settings" -XPUT -d '{ "persistent": { "cluster.routing.allocation.enable" : "all" } }' ----- -+ -For example: -+ ----- -$ oc exec elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_util --query="_cluster/settings" -XPUT -d '{ "persistent": { "cluster.routing.allocation.enable" : "all" } }' ----- -+ -.Example output -[source,terminal] ----- -{ - "acknowledged" : true, - "persistent" : { }, - "transient" : { - "cluster" : { - "routing" : { - "allocation" : { - "enable" : "all" - } - } - } - } -} ----- - -. Scale up the collector pods so they send new logs to Elasticsearch. -+ -[source,terminal] ----- -$ oc -n openshift-logging patch daemonset/collector -p '{"spec":{"template":{"spec":{"nodeSelector":{"logging-infra-collector": "true"}}}}}' ----- diff --git a/modules/cluster-logging-must-gather-about.adoc b/modules/cluster-logging-must-gather-about.adoc deleted file mode 100644 index d27d0c947daf..000000000000 --- a/modules/cluster-logging-must-gather-about.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-support.adoc - -:_mod-docs-content-type: CONCEPT -[id="about-must-gather_{context}"] -= About the must-gather tool - -The `oc adm must-gather` CLI command collects the information from your cluster that is most likely needed for debugging issues. - -For your {logging}, `must-gather` collects the following information: - -* Project-level resources, including pods, configuration maps, service accounts, roles, role bindings, and events at the project level -* Cluster-level resources, including nodes, roles, and role bindings at the cluster level -* OpenShift Logging resources in the `openshift-logging` and `openshift-operators-redhat` namespaces, including health status for the log collector, the log store, and the log visualizer - -When you run `oc adm must-gather`, a new pod is created on the cluster. The data is collected on that pod and saved in a new directory that starts with `must-gather.local`. This directory is created in the current working directory. diff --git a/modules/cluster-logging-must-gather-collecting.adoc b/modules/cluster-logging-must-gather-collecting.adoc deleted file mode 100644 index 36ea531933b2..000000000000 --- a/modules/cluster-logging-must-gather-collecting.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-support.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-must-gather-collecting_{context}"] -= Collecting {logging} data - -You can use the `oc adm must-gather` CLI command to collect information about {logging}. - -.Procedure - -To collect {logging} information with `must-gather`: - -. Navigate to the directory where you want to store the `must-gather` information. - -. Run the `oc adm must-gather` command against the {logging} image: -+ -ifndef::openshift-origin[] -[source,terminal] ----- -$ oc adm must-gather --image=$(oc -n openshift-logging get deployment.apps/cluster-logging-operator -o jsonpath='{.spec.template.spec.containers[?(@.name == "cluster-logging-operator")].image}') ----- -endif::openshift-origin[] -ifdef::openshift-origin[] -[source,terminal] ----- -$ oc adm must-gather --image=quay.io/openshift/origin-cluster-logging-operator ----- -endif::openshift-origin[] -+ -The `must-gather` tool creates a new directory that starts with `must-gather.local` within the current directory. For example: -`must-gather.local.4157245944708210408`. - -. Create a compressed file from the `must-gather` directory that was just created. For example, on a computer that uses a Linux operating system, run the following command: -+ -[source,terminal] ----- -$ tar -cvaf must-gather.tar.gz must-gather.local.4157245944708210408 ----- - -. Attach the compressed file to your support case on the link:https://access.redhat.com/[Red Hat Customer Portal]. diff --git a/modules/cluster-logging-removing-unused-components-if-no-elasticsearch.adoc b/modules/cluster-logging-removing-unused-components-if-no-elasticsearch.adoc deleted file mode 100644 index 8671766110e5..000000000000 --- a/modules/cluster-logging-removing-unused-components-if-no-elasticsearch.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-collector.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-removing-unused-components-if-no-elasticsearch_{context}"] -= Removing unused components if you do not use the default Elasticsearch log store - -As an administrator, in the rare case that you forward logs to a third-party log store and do not use the default Elasticsearch log store, you can remove several unused components from your logging cluster. - -In other words, if you do not use the default Elasticsearch log store, you can remove the internal Elasticsearch `logStore` and Kibana `visualization` components from the `ClusterLogging` custom resource (CR). Removing these components is optional but saves resources. - -.Prerequisites - -* Verify that your log forwarder does not send log data to the default internal Elasticsearch cluster. Inspect the `ClusterLogForwarder` CR YAML file that you used to configure log forwarding. Verify that it _does not_ have an `outputRefs` element that specifies `default`. For example: -+ -[source,yaml] ----- -outputRefs: -- default ----- - -[WARNING] -==== -Suppose the `ClusterLogForwarder` CR forwards log data to the internal Elasticsearch cluster, and you remove the `logStore` component from the `ClusterLogging` CR. In that case, the internal Elasticsearch cluster will not be present to store the log data. This absence can cause data loss. -==== - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc edit ClusterLogging instance ----- - -. If they are present, remove the `logStore` and `visualization` stanzas from the `ClusterLogging` CR. - -. Preserve the `collection` stanza of the `ClusterLogging` CR. The result should look similar to the following example: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: "openshift-logging" -spec: - managementState: "Managed" - collection: - type: "fluentd" - fluentd: {} ----- - -. Verify that the collector pods are redeployed: -+ -[source,terminal] ----- -$ oc get pods -l component=collector -n openshift-logging ----- diff --git a/modules/cluster-logging-systemd-scaling.adoc b/modules/cluster-logging-systemd-scaling.adoc deleted file mode 100644 index 226aba96b691..000000000000 --- a/modules/cluster-logging-systemd-scaling.adoc +++ /dev/null @@ -1,127 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/config/cluster-logging-systemd - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-systemd-scaling_{context}"] -= Configuring systemd-journald for OpenShift Logging - -As you scale up your project, the default logging environment might need some -adjustments. - -For example, if you are missing logs, you might have to increase the rate limits for journald. -You can adjust the number of messages to retain for a specified period of time to ensure that -OpenShift Logging does not use excessive resources without dropping logs. - -You can also determine if you want the logs compressed, how long to retain logs, how or if the logs are stored, -and other settings. - -.Procedure - -. Create a Butane config file, `40-worker-custom-journald.bu`, that includes an `/etc/systemd/journald.conf` file with the required settings. -+ -[NOTE] -==== -include::snippets/butane-version.adoc[] -==== -+ -[source,yaml,subs="attributes+"] ----- -variant: openshift -version: {product-version}.0 -metadata: - name: 40-worker-custom-journald - labels: - machineconfiguration.openshift.io/role: "worker" -storage: - files: - - path: /etc/systemd/journald.conf - mode: 0644 <1> - overwrite: true - contents: - inline: | - Compress=yes <2> - ForwardToConsole=no <3> - ForwardToSyslog=no - MaxRetentionSec=1month <4> - RateLimitBurst=10000 <5> - RateLimitIntervalSec=30s - Storage=persistent <6> - SyncIntervalSec=1s <7> - SystemMaxUse=8G <8> - SystemKeepFree=20% <9> - SystemMaxFileSize=10M <10> ----- -+ -<1> Set the permissions for the `journald.conf` file. It is recommended to set `0644` permissions. -<2> Specify whether you want logs compressed before they are written to the file system. -Specify `yes` to compress the message or `no` to not compress. The default is `yes`. -<3> Configure whether to forward log messages. Defaults to `no` for each. Specify: -* `ForwardToConsole` to forward logs to the system console. -* `ForwardToKMsg` to forward logs to the kernel log buffer. -* `ForwardToSyslog` to forward to a syslog daemon. -* `ForwardToWall` to forward messages as wall messages to all logged-in users. -<4> Specify the maximum time to store journal entries. Enter a number to specify seconds. Or -include a unit: "year", "month", "week", "day", "h" or "m". Enter `0` to disable. The default is `1month`. -<5> Configure rate limiting. If more logs are received than what is specified in `RateLimitBurst` during the time interval defined by `RateLimitIntervalSec`, all further messages within the interval are dropped until the interval is over. It is recommended to set `RateLimitIntervalSec=30s` and `RateLimitBurst=10000`, which are the defaults. -<6> Specify how logs are stored. The default is `persistent`: -* `volatile` to store logs in memory in `/run/log/journal/`. These logs are lost after rebooting. -* `persistent` to store logs to disk in `/var/log/journal/`. systemd creates the directory if it does not exist. -* `auto` to store logs in `/var/log/journal/` if the directory exists. If it does not exist, systemd temporarily stores logs in `/run/systemd/journal`. -* `none` to not store logs. systemd drops all logs. -<7> Specify the timeout before synchronizing journal files to disk for *ERR*, *WARNING*, *NOTICE*, *INFO*, and *DEBUG* logs. -systemd immediately syncs after receiving a *CRIT*, *ALERT*, or *EMERG* log. The default is `1s`. -<8> Specify the maximum size the journal can use. The default is `8G`. -<9> Specify how much disk space systemd must leave free. The default is `20%`. -<10> Specify the maximum size for individual journal files stored persistently in `/var/log/journal`. The default is `10M`. -+ -[NOTE] -==== -If you are removing the rate limit, you might see increased CPU utilization on the -system logging daemons as it processes any messages that would have previously -been throttled. -==== -+ -For more information on systemd settings, see link:https://www.freedesktop.org/software/systemd/man/journald.conf.html[https://www.freedesktop.org/software/systemd/man/journald.conf.html]. The default settings listed on that page might not apply to {product-title}. -+ -// Defaults from https://github.com/openshift/openshift-ansible/pull/3753/files#diff-40b7a7231e77d95ca6009dc9bcc0f470R33-R34 - -. Use Butane to generate a `MachineConfig` object file, `40-worker-custom-journald.yaml`, containing the configuration to be delivered to the nodes: -+ -[source,terminal] ----- -$ butane 40-worker-custom-journald.bu -o 40-worker-custom-journald.yaml ----- - -. Apply the machine config. For example: -+ -[source,terminal] ----- -$ oc apply -f 40-worker-custom-journald.yaml ----- -+ -The controller detects the new `MachineConfig` object and generates a new `rendered-worker-` version. - -. Monitor the status of the rollout of the new rendered configuration to each node: -+ -[source,terminal] ----- -$ oc describe machineconfigpool/worker ----- -+ -.Example output -[source,terminal] ----- -Name: worker -Namespace: -Labels: machineconfiguration.openshift.io/mco-built-in= -Annotations: -API Version: machineconfiguration.openshift.io/v1 -Kind: MachineConfigPool - -... - -Conditions: - Message: - Reason: All nodes are updating to rendered-worker-913514517bcea7c93bd446f4830bc64e ----- diff --git a/modules/cluster-logging-visualizer-indices.adoc b/modules/cluster-logging-visualizer-indices.adoc deleted file mode 100644 index 1b6deaab2d61..000000000000 --- a/modules/cluster-logging-visualizer-indices.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/log_visualizer/logging-kibana.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-visualizer-indices_{context}"] -= Defining Kibana index patterns - -An index pattern defines the Elasticsearch indices that you want to visualize. To explore and visualize data in Kibana, you must create an index pattern. - -.Prerequisites - -* A user must have the `cluster-admin` role, the `cluster-reader` role, or both roles to view the *infra* and *audit* indices in Kibana. The default `kubeadmin` user has proper permissions to view these indices. -+ -If you can view the pods and logs in the `default`, `kube-` and `openshift-` projects, you should be able to access these indices. You can use the following command to check if the current user has appropriate permissions: -+ -[source,terminal] ----- -$ oc auth can-i get pods --subresource log -n ----- -+ -.Example output -[source,terminal] ----- -yes ----- -+ -[NOTE] -==== -The audit logs are not stored in the internal {product-title} Elasticsearch instance by default. To view the audit logs in Kibana, you must use the Log Forwarding API to configure a pipeline that uses the `default` output for audit logs. -==== - -* Elasticsearch documents must be indexed before you can create index patterns. This is done automatically, but it might take a few minutes in a new or updated cluster. - -.Procedure - -To define index patterns and create visualizations in Kibana: - -. In the {product-title} console, click the Application Launcher {launch} and select *Logging*. - -. Create your Kibana index patterns by clicking *Management* -> *Index Patterns* -> *Create index pattern*: - -** Each user must manually create index patterns when logging into Kibana the first time to see logs for their projects. Users must create an index pattern named `app` and use the `@timestamp` time field to view their container logs. - -** Each admin user must create index patterns when logged into Kibana the first time for the `app`, `infra`, and `audit` indices using the `@timestamp` time field. - -. Create Kibana Visualizations from the new index patterns. diff --git a/modules/cluster-logging-visualizer-kibana.adoc b/modules/cluster-logging-visualizer-kibana.adoc deleted file mode 100644 index 0ae118f203cb..000000000000 --- a/modules/cluster-logging-visualizer-kibana.adoc +++ /dev/null @@ -1,122 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/viewing/cluster-logging-visualizer.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-visualizer-kibana_{context}"] -= Viewing cluster logs in Kibana - -You view cluster logs in the Kibana web console. The methods for viewing and visualizing your data in Kibana that are beyond the scope of this documentation. For more information, refer to the link:https://www.elastic.co/guide/en/kibana/6.8/tutorial-sample-discover.html[Kibana documentation]. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -* Kibana index patterns must exist. - -* A user must have the `cluster-admin` role, the `cluster-reader` role, or both roles to view the *infra* and *audit* indices in Kibana. The default `kubeadmin` user has proper permissions to view these indices. -+ -If you can view the pods and logs in the `default`, `kube-` and `openshift-` projects, you should be able to access these indices. You can use the following command to check if the current user has appropriate permissions: -+ -[source,terminal] ----- -$ oc auth can-i get pods --subresource log -n ----- -+ -.Example output -[source,terminal] ----- -yes ----- -+ -[NOTE] -==== -The audit logs are not stored in the internal {product-title} Elasticsearch instance by default. To view the audit logs in Kibana, you must use the Log Forwarding API to configure a pipeline that uses the `default` output for audit logs. -==== - -.Procedure - -To view logs in Kibana: - -. In the {product-title} console, click the Application Launcher {launch} and select *Logging*. - -. Log in using the same credentials you use to log in to the {product-title} console. -+ -The Kibana interface launches. - -. In Kibana, click *Discover*. - -. Select the index pattern you created from the drop-down menu in the top-left corner: *app*, *audit*, or *infra*. -+ -The log data displays as time-stamped documents. - -. Expand one of the time-stamped documents. - -. Click the *JSON* tab to display the log entry for that document. -+ -.Sample infrastructure log entry in Kibana -[%collapsible] -==== -[source,terminal] ----- -{ - "_index": "infra-000001", - "_type": "_doc", - "_id": "YmJmYTBlNDkZTRmLTliMGQtMjE3NmFiOGUyOWM3", - "_version": 1, - "_score": null, - "_source": { - "docker": { - "container_id": "f85fa55bbef7bb783f041066be1e7c267a6b88c4603dfce213e32c1" - }, - "kubernetes": { - "container_name": "registry-server", - "namespace_name": "openshift-marketplace", - "pod_name": "redhat-marketplace-n64gc", - "container_image": "registry.redhat.io/redhat/redhat-marketplace-index:v4.7", - "container_image_id": "registry.redhat.io/redhat/redhat-marketplace-index@sha256:65fc0c45aabb95809e376feb065771ecda9e5e59cc8b3024c4545c168f", - "pod_id": "8f594ea2-c866-4b5c-a1c8-a50756704b2a", - "host": "ip-10-0-182-28.us-east-2.compute.internal", - "master_url": "https://kubernetes.default.svc", - "namespace_id": "3abab127-7669-4eb3-b9ef-44c04ad68d38", - "namespace_labels": { - "openshift_io/cluster-monitoring": "true" - }, - "flat_labels": [ - "catalogsource_operators_coreos_com/update=redhat-marketplace" - ] - }, - "message": "time=\"2020-09-23T20:47:03Z\" level=info msg=\"serving registry\" database=/database/index.db port=50051", - "level": "unknown", - "hostname": "ip-10-0-182-28.internal", - "pipeline_metadata": { - "collector": { - "ipaddr4": "10.0.182.28", - "inputname": "fluent-plugin-systemd", - "name": "fluentd", - "received_at": "2020-09-23T20:47:15.007583+00:00", - "version": "1.7.4 1.6.0" - } - }, - "@timestamp": "2020-09-23T20:47:03.422465+00:00", - "viaq_msg_id": "YmJmYTBlNDktMDMGQtMjE3NmFiOGUyOWM3", - "openshift": { - "labels": { - "logging": "infra" - } - } - }, - "fields": { - "@timestamp": [ - "2020-09-23T20:47:03.422Z" - ], - "pipeline_metadata.collector.received_at": [ - "2020-09-23T20:47:15.007Z" - ] - }, - "sort": [ - 1600894023422 - ] -} ----- -==== diff --git a/modules/configuring-log-storage-cr.adoc b/modules/configuring-log-storage-cr.adoc deleted file mode 100644 index 16709f3579e4..000000000000 --- a/modules/configuring-log-storage-cr.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-deploying.adoc -// * logging/cluster-logging-loki.adoc - -:_mod-docs-content-type: PROCEDURE -[id="configuring-log-storage-cr_{context}"] -= Configuring log storage - -You can configure which log storage type your {logging} uses by modifying the `ClusterLogging` custom resource (CR). - -.Prerequisites - -* You have administrator permissions. -* You have installed the {oc-first}. -* You have installed the {clo} and an internal log store that is either the LokiStack or Elasticsearch. -* You have created a `ClusterLogging` CR. - -include::snippets/logging-elastic-dep-snip.adoc[] - -.Procedure - -. Modify the `ClusterLogging` CR `logStore` spec: -+ -.`ClusterLogging` CR example -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: -# ... -spec: -# ... - logStore: - type: <1> - elasticsearch: <2> - nodeCount: - resources: {} - storage: {} - redundancyPolicy: <3> - lokistack: <4> - name: {} -# ... ----- -<1> Specify the log store type. This can be either `lokistack` or `elasticsearch`. -<2> Optional configuration options for the Elasticsearch log store. -<3> Specify the redundancy type. This value can be `ZeroRedundancy`, `SingleRedundancy`, `MultipleRedundancy`, or `FullRedundancy`. -<4> Optional configuration options for LokiStack. -+ -.Example `ClusterLogging` CR to specify LokiStack as the log store -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - name: instance - namespace: openshift-logging -spec: - managementState: Managed - logStore: - type: lokistack - lokistack: - name: logging-loki -# ... ----- - -. Apply the `ClusterLogging` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/configuring-log-visualizer.adoc b/modules/configuring-log-visualizer.adoc deleted file mode 100644 index beb7d08ea63f..000000000000 --- a/modules/configuring-log-visualizer.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_visualization/log-visualization.adoc -// * observability/logging/cluster-logging-deploying.adoc - -:_mod-docs-content-type: PROCEDURE -[id="configuring-log-visualizer_{context}"] -= Configuring the log visualizer - -You can configure which log visualizer type your {logging} uses by modifying the `ClusterLogging` custom resource (CR). - -.Prerequisites - -* You have administrator permissions. -* You have installed the {oc-first}. -* You have installed the {clo}. -* You have created a `ClusterLogging` CR. - -[IMPORTANT] -==== -If you want to use the {product-title} web console for visualization, you must enable the {log-plug}. See the documentation about "Log visualization with the web console". -==== - -.Procedure - -. Modify the `ClusterLogging` CR `visualization` spec: -+ -.`ClusterLogging` CR example -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: -# ... -spec: -# ... - visualization: - type: <1> - kibana: <2> - resources: {} - nodeSelector: {} - proxy: {} - replicas: {} - tolerations: {} - ocpConsole: <3> - logsLimit: {} - timeout: {} -# ... ----- -<1> The type of visualizer you want to use for your {logging}. This can be either `kibana` or `ocp-console`. The Kibana console is only compatible with deployments that use Elasticsearch log storage, while the {product-title} console is only compatible with LokiStack deployments. -<2> Optional configurations for the Kibana console. -<3> Optional configurations for the {product-title} web console. - -. Apply the `ClusterLogging` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/configuring-logging-collector.adoc b/modules/configuring-logging-collector.adoc deleted file mode 100644 index 7328f04225a9..000000000000 --- a/modules/configuring-logging-collector.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-deploying.adoc -// * observability/logging/log_collection_forwarding/cluster-logging-collector.adoc - -:_mod-docs-content-type: PROCEDURE -[id="configuring-logging-collector_{context}"] -= Configuring the log collector - -You can configure which log collector type your {logging} uses by modifying the `ClusterLogging` custom resource (CR). - -include::snippets/logging-fluentd-dep-snip.adoc[] - -.Prerequisites - -* You have administrator permissions. -* You have installed the {oc-first}. -* You have installed the {clo}. -* You have created a `ClusterLogging` CR. - -.Procedure - -. Modify the `ClusterLogging` CR `collection` spec: -+ -.`ClusterLogging` CR example -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: -# ... -spec: -# ... - collection: - type: <1> - resources: {} - tolerations: {} -# ... ----- -<1> The log collector type you want to use for the {logging}. This can be `vector` or `fluentd`. - -. Apply the `ClusterLogging` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/configuring-logging-loki-ruler.adoc b/modules/configuring-logging-loki-ruler.adoc deleted file mode 100644 index 00bfae081851..000000000000 --- a/modules/configuring-logging-loki-ruler.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging_alerts/custom-logging-alerts.adoc - -:_mod-docs-content-type: PROCEDURE -[id="configuring-logging-loki-ruler_{context}"] -= Configuring the ruler - -When the LokiStack ruler component is enabled, users can define a group of link:https://grafana.com/docs/loki/latest/query/[LogQL] expressions that trigger logging alerts or recorded metrics. - -Administrators can enable the ruler by modifying the `LokiStack` custom resource (CR). - -.Prerequisites - -* You have installed the {clo} and the {loki-op}. -* You have created a `LokiStack` CR. -* You have administrator permissions. - -.Procedure - -* Enable the ruler by ensuring that the `LokiStack` CR contains the following spec configuration: -+ -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: - namespace: -spec: -# ... - rules: - enabled: true <1> - selector: - matchLabels: - openshift.io/: "true" <2> - namespaceSelector: - matchLabels: - openshift.io/: "true" <3> ----- -<1> Enable Loki alerting and recording rules in your cluster. -<2> Add a custom label that can be added to namespaces where you want to enable the use of logging alerts and metrics. -<3> Add a custom label that can be added to namespaces where you want to enable the use of logging alerts and metrics. diff --git a/modules/creating-logfilesmetricexporter.adoc b/modules/creating-logfilesmetricexporter.adoc deleted file mode 100644 index 0ba34fe0ec13..000000000000 --- a/modules/creating-logfilesmetricexporter.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/cluster-logging-collector.adoc - -:_mod-docs-content-type: PROCEDURE -[id="creating-logfilesmetricexporter_{context}"] -= Creating a LogFileMetricExporter resource - -In {logging} version 5.8 and newer versions, the LogFileMetricExporter is no longer deployed with the collector by default. You must manually create a `LogFileMetricExporter` custom resource (CR) to generate metrics from the logs produced by running containers. - -If you do not create the `LogFileMetricExporter` CR, you may see a *No datapoints found* message in the {product-title} web console dashboard for *Produced Logs*. - -.Prerequisites - -* You have administrator permissions. -* You have installed the {clo}. -* You have installed the {oc-first}. - -.Procedure - -. Create a `LogFileMetricExporter` CR as a YAML file: -+ -.Example `LogFileMetricExporter` CR -[source,yaml] ----- -apiVersion: logging.openshift.io/v1alpha1 -kind: LogFileMetricExporter -metadata: - name: instance - namespace: openshift-logging -spec: - nodeSelector: {} # <1> - resources: # <2> - limits: - cpu: 500m - memory: 256Mi - requests: - cpu: 200m - memory: 128Mi - tolerations: [] # <3> -# ... ----- -<1> Optional: The `nodeSelector` stanza defines which nodes the pods are scheduled on. -<2> The `resources` stanza defines resource requirements for the `LogFileMetricExporter` CR. -<3> Optional: The `tolerations` stanza defines the tolerations that the pods accept. - -. Apply the `LogFileMetricExporter` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- - -.Verification - -A `logfilesmetricexporter` pod runs concurrently with a `collector` pod on each node. - -* Verify that the `logfilesmetricexporter` pods are running in the namespace where you have created the `LogFileMetricExporter` CR, by running the following command and observing the output: -+ -[source,terminal] ----- -$ oc get pods -l app.kubernetes.io/component=logfilesmetricexporter -n openshift-logging ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -logfilesmetricexporter-9qbjj 1/1 Running 0 2m46s -logfilesmetricexporter-cbc4v 1/1 Running 0 2m46s ----- diff --git a/modules/enabling-log-console-plugin.adoc b/modules/enabling-log-console-plugin.adoc deleted file mode 100644 index 8317c8769fd0..000000000000 --- a/modules/enabling-log-console-plugin.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_visualization/log-visualization-ocp-console.adoc - -:_mod-docs-content-type: PROCEDURE -[id="enabling-log-console-plugin_{context}"] -= Enabling the {log-plug} after you have installed the {clo} - -You can enable the {log-plug} as part of the {clo} installation, but you can also enable the plugin if you have already installed the {clo} with the plugin disabled. - -.Prerequisites - -* You have administrator permissions. -* You have installed the {clo} and selected *Disabled* for the *Console plugin*. -* You have access to the {product-title} web console. - -.Procedure - -. In the {product-title} web console *Administrator* perspective, navigate to *Ecosystem* -> *Installed Operators*. -. Click *Red Hat OpenShift Logging*. This takes you to the Operator *Details* page. -. In the *Details* page, click *Disabled* for the *Console plugin* option. -. In the *Console plugin enablement* dialog, select *Enable*. -. Click *Save*. -. Verify that the *Console plugin* option now shows *Enabled*. -. The web console displays a pop-up window when changes have been applied. The window prompts you to reload the web console. Refresh the browser when you see the pop-up window to apply the changes. diff --git a/modules/es-cluster-health-is-red.adoc b/modules/es-cluster-health-is-red.adoc deleted file mode 100644 index e01cbee90f53..000000000000 --- a/modules/es-cluster-health-is-red.adoc +++ /dev/null @@ -1,172 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/troubleshooting/troubleshooting-logging-alerts.adoc - -:_mod-docs-content-type: PROCEDURE -[id="es-cluster-health-is-red_{context}"] -= Elasticsearch cluster health status is red - -At least one primary shard and its replicas are not allocated to a node. Use the following procedure to troubleshoot this alert. - -include::snippets/es-pod-var-logging.adoc[] - -.Procedure - -. Check the Elasticsearch cluster health and verify that the cluster `status` is red by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME -- health ----- - -. List the nodes that have joined the cluster by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query=_cat/nodes?v ----- - -. List the Elasticsearch pods and compare them with the nodes in the command output from the previous step, by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-logging get pods -l component=elasticsearch ----- - -. If some of the Elasticsearch nodes have not joined the cluster, perform the following steps. - -.. Confirm that Elasticsearch has an elected master node by running the following command and observing the output: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query=_cat/master?v ----- - -.. Review the pod logs of the elected master node for issues by running the following command and observing the output: -+ -[source,terminal] ----- -$ oc logs -c elasticsearch -n openshift-logging ----- - -.. Review the logs of nodes that have not joined the cluster for issues by running the following command and observing the output: -+ -[source,terminal] ----- -$ oc logs -c elasticsearch -n openshift-logging ----- - -. If all the nodes have joined the cluster, check if the cluster is in the process of recovering by running the following command and observing the output: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query=_cat/recovery?active_only=true ----- -+ -If there is no command output, the recovery process might be delayed or stalled by pending tasks. - -. Check if there are pending tasks by running the following command and observing the output: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- health | grep number_of_pending_tasks ----- - -. If there are pending tasks, monitor their status. If their status changes and indicates that the cluster is recovering, continue waiting. The recovery time varies according to the size of the cluster and other factors. Otherwise, if the status of the pending tasks does not change, this indicates that the recovery has stalled. - -. If it seems like the recovery has stalled, check if the `cluster.routing.allocation.enable` value is set to `none`, by running the following command and observing the output: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query=_cluster/settings?pretty ----- - -. If the `cluster.routing.allocation.enable` value is set to `none`, set it to `all`, by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query=_cluster/settings?pretty \ - -X PUT -d '{"persistent": {"cluster.routing.allocation.enable":"all"}}' ----- - -. Check if any indices are still red by running the following command and observing the output: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query=_cat/indices?v ----- - -. If any indices are still red, try to clear them by performing the following steps. - -.. Clear the cache by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query=/_cache/clear?pretty ----- - -.. Increase the max allocation retries by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query=/_settings?pretty \ - -X PUT -d '{"index.allocation.max_retries":10}' ----- - -.. Delete all the scroll items by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query=_search/scroll/_all -X DELETE ----- - -.. Increase the timeout by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query=/_settings?pretty \ - -X PUT -d '{"index.unassigned.node_left.delayed_timeout":"10m"}' ----- - -. If the preceding steps do not clear the red indices, delete the indices individually. - -.. Identify the red index name by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query=_cat/indices?v ----- - -.. Delete the red index by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query= -X DELETE ----- - -. If there are no red indices and the cluster status is red, check for a continuous heavy processing load on a data node. - -.. Check if the Elasticsearch JVM Heap usage is high by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query=_nodes/stats?pretty ----- -+ -In the command output, review the `node_name.jvm.mem.heap_used_percent` field to determine the JVM Heap usage. - -.. Check for high CPU utilization. For more information about CPU utilitzation, see the {product-title} "Reviewing monitoring dashboards" documentation. diff --git a/modules/es-disk-space-low.adoc b/modules/es-disk-space-low.adoc deleted file mode 100644 index c7fcb94a9e7c..000000000000 --- a/modules/es-disk-space-low.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/troubleshooting/troubleshooting-logging-alerts.adoc - -:_mod-docs-content-type: PROCEDURE -[id="es-disk-space-low_{context}"] -= Elasticsearch disk space is running low - -Elasticsearch is predicted to run out of disk space within the next 6 hours based on current disk usage. Use the following procedure to troubleshoot this alert. - -.Procedure - -. Get the disk space of the Elasticsearch node: -+ -[source,terminal] ----- -$ for pod in `oc -n openshift-logging get po -l component=elasticsearch -o jsonpath='{.items[*].metadata.name}'`; \ - do echo $pod; oc -n openshift-logging exec -c elasticsearch $pod \ - -- df -h /elasticsearch/persistent; done ----- - -. In the command output, check the `Avail` column to determine the free disk space on that node. -+ -.Example output -[source,terminal] ----- -elasticsearch-cdm-kcrsda6l-1-586cc95d4f-h8zq8 -Filesystem Size Used Avail Use% Mounted on -/dev/nvme1n1 19G 522M 19G 3% /elasticsearch/persistent -elasticsearch-cdm-kcrsda6l-2-5b548fc7b-cwwk7 -Filesystem Size Used Avail Use% Mounted on -/dev/nvme2n1 19G 522M 19G 3% /elasticsearch/persistent -elasticsearch-cdm-kcrsda6l-3-5dfc884d99-59tjw -Filesystem Size Used Avail Use% Mounted on -/dev/nvme3n1 19G 528M 19G 3% /elasticsearch/persistent ----- - -. Increase the disk space on all nodes. If increasing the disk space is not possible, try adding a new data node to the cluster, or decrease the total cluster redundancy policy. - -. To check the current `redundancyPolicy`, run the following command: -+ -[source,terminal] ----- -$ oc -n openshift-logging get es elasticsearch -o jsonpath='{.spec.redundancyPolicy}' ----- -+ -If you are using a `ClusterLogging` resource on your cluster, run the following command: -+ -[source,terminal] ----- -$ oc -n openshift-logging get cl \ - -o jsonpath='{.items[*].spec.logStore.elasticsearch.redundancyPolicy}' ----- -+ -If the cluster `redundancyPolicy` value is higher than the `SingleRedundancy` value, set it to the `SingleRedundancy` value and save this change. - -. If the preceding steps do not fix the issue, delete the old indices. -.. Check the status of all indices on Elasticsearch by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME -- indices ----- - -.. Identify an old index that can be deleted. -.. Delete the index by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query= -X DELETE ----- diff --git a/modules/es-node-disk-flood-watermark-reached.adoc b/modules/es-node-disk-flood-watermark-reached.adoc deleted file mode 100644 index 15475715ce84..000000000000 --- a/modules/es-node-disk-flood-watermark-reached.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/troubleshooting/troubleshooting-logging-alerts.adoc - -:_mod-docs-content-type: PROCEDURE -[id="es-node-disk-flood-watermark-reached_{context}"] -= Elasticsearch node disk flood watermark reached - -Elasticsearch enforces a read-only index block on every index that has both of these conditions: - -* One or more shards are allocated to the node. -* One or more disks exceed the https://www.elastic.co/guide/en/elasticsearch/reference/6.8/disk-allocator.html[flood stage]. - -Use the following procedure to troubleshoot this alert. - -include::snippets/es-pod-var-logging.adoc[] - -.Procedure - -. Get the disk space of the Elasticsearch node: -+ -[source,terminal] ----- -$ for pod in `oc -n openshift-logging get po -l component=elasticsearch -o jsonpath='{.items[*].metadata.name}'`; \ - do echo $pod; oc -n openshift-logging exec -c elasticsearch $pod \ - -- df -h /elasticsearch/persistent; done ----- - -. In the command output, check the `Avail` column to determine the free disk space on that node. -+ -.Example output -[source,terminal] ----- -elasticsearch-cdm-kcrsda6l-1-586cc95d4f-h8zq8 -Filesystem Size Used Avail Use% Mounted on -/dev/nvme1n1 19G 522M 19G 3% /elasticsearch/persistent -elasticsearch-cdm-kcrsda6l-2-5b548fc7b-cwwk7 -Filesystem Size Used Avail Use% Mounted on -/dev/nvme2n1 19G 522M 19G 3% /elasticsearch/persistent -elasticsearch-cdm-kcrsda6l-3-5dfc884d99-59tjw -Filesystem Size Used Avail Use% Mounted on -/dev/nvme3n1 19G 528M 19G 3% /elasticsearch/persistent ----- - -. Increase the disk space on all nodes. If increasing the disk space is not possible, try adding a new data node to the cluster, or decrease the total cluster redundancy policy. - -. To check the current `redundancyPolicy`, run the following command: -+ -[source,terminal] ----- -$ oc -n openshift-logging get es elasticsearch \ - -o jsonpath='{.spec.redundancyPolicy}' ----- -+ -If you are using a `ClusterLogging` resource on your cluster, run the following command: -+ -[source,terminal] ----- -$ oc -n openshift-logging get cl \ - -o jsonpath='{.items[*].spec.logStore.elasticsearch.redundancyPolicy}' ----- -+ -If the cluster `redundancyPolicy` value is higher than the `SingleRedundancy` value, set it to the `SingleRedundancy` value and save this change. - -. If the preceding steps do not fix the issue, delete the old indices. -.. Check the status of all indices on Elasticsearch by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME -- indices ----- - -.. Identify an old index that can be deleted. -.. Delete the index by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query= -X DELETE ----- - -. Continue freeing up and monitoring the disk space. After the used disk space drops below 90%, unblock writing to this node by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query=_all/_settings?pretty \ - -X PUT -d '{"index.blocks.read_only_allow_delete": null}' ----- diff --git a/modules/es-node-disk-high-watermark-reached.adoc b/modules/es-node-disk-high-watermark-reached.adoc deleted file mode 100644 index 231469ee9331..000000000000 --- a/modules/es-node-disk-high-watermark-reached.adoc +++ /dev/null @@ -1,78 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/troubleshooting/troubleshooting-logging-alerts.adoc - -:_mod-docs-content-type: PROCEDURE -[id="es-node-disk-high-watermark-reached_{context}"] -= Elasticsearch node disk high watermark reached - -Elasticsearch attempts to relocate shards away from a node that has reached the high watermark to a node with low disk usage that has not crossed any watermark threshold limits. - -To allocate shards to a particular node, you must free up some space on that node. If increasing the disk space is not possible, try adding a new data node to the cluster, or decrease the total cluster redundancy policy. - -include::snippets/es-pod-var-logging.adoc[] - -.Procedure - -. Identify the node on which Elasticsearch is deployed by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-logging get po -o wide ----- - -. Check the disk space on each node: -+ -[source,terminal] ----- -$ for pod in `oc -n openshift-logging get po -l component=elasticsearch -o jsonpath='{.items[*].metadata.name}'`; \ - do echo $pod; oc -n openshift-logging exec -c elasticsearch $pod \ - -- df -h /elasticsearch/persistent; done ----- - -. Check if the cluster is rebalancing: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query=_cluster/health?pretty | grep relocating_shards ----- -+ -If the command output shows relocating shards, the high watermark has been exceeded. The default value of the high watermark is 90%. - -. Increase the disk space on all nodes. If increasing the disk space is not possible, try adding a new data node to the cluster, or decrease the total cluster redundancy policy. - -. To check the current `redundancyPolicy`, run the following command: -+ -[source,terminal] ----- -$ oc -n openshift-logging get es elasticsearch \ - -o jsonpath='{.spec.redundancyPolicy}' ----- -+ -If you are using a `ClusterLogging` resource on your cluster, run the following command: -+ -[source,terminal] ----- -$ oc -n openshift-logging get cl \ - -o jsonpath='{.items[*].spec.logStore.elasticsearch.redundancyPolicy}' ----- -+ -If the cluster `redundancyPolicy` value is higher than the `SingleRedundancy` value, set it to the `SingleRedundancy` value and save this change. - -. If the preceding steps do not fix the issue, delete the old indices. -.. Check the status of all indices on Elasticsearch by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME -- indices ----- - -.. Identify an old index that can be deleted. -.. Delete the index by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query= -X DELETE ----- diff --git a/modules/es-node-disk-low-watermark-reached.adoc b/modules/es-node-disk-low-watermark-reached.adoc deleted file mode 100644 index 8e72e3e48950..000000000000 --- a/modules/es-node-disk-low-watermark-reached.adoc +++ /dev/null @@ -1,90 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/troubleshooting/troubleshooting-logging-alerts.adoc - -:_mod-docs-content-type: PROCEDURE -[id="es-node-disk-low-watermark-reached_{context}"] -= Elasticsearch node disk low watermark reached - -Elasticsearch does not allocate shards to nodes that reach the low watermark. - -include::snippets/es-pod-var-logging.adoc[] - -.Procedure - -. Identify the node on which Elasticsearch is deployed by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-logging get po -o wide ----- - -. Check if there are unassigned shards by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query=_cluster/health?pretty | grep unassigned_shards ----- - -. If there are unassigned shards, check the disk space on each node, by running the following command: -+ -[source,terminal] ----- -$ for pod in `oc -n openshift-logging get po -l component=elasticsearch -o jsonpath='{.items[*].metadata.name}'`; \ - do echo $pod; oc -n openshift-logging exec -c elasticsearch $pod \ - -- df -h /elasticsearch/persistent; done ----- - -. In the command output, check the `Use` column to determine the used disk percentage on that node. -+ -.Example output -[source,terminal] ----- -elasticsearch-cdm-kcrsda6l-1-586cc95d4f-h8zq8 -Filesystem Size Used Avail Use% Mounted on -/dev/nvme1n1 19G 522M 19G 3% /elasticsearch/persistent -elasticsearch-cdm-kcrsda6l-2-5b548fc7b-cwwk7 -Filesystem Size Used Avail Use% Mounted on -/dev/nvme2n1 19G 522M 19G 3% /elasticsearch/persistent -elasticsearch-cdm-kcrsda6l-3-5dfc884d99-59tjw -Filesystem Size Used Avail Use% Mounted on -/dev/nvme3n1 19G 528M 19G 3% /elasticsearch/persistent ----- -+ -If the used disk percentage is above 85%, the node has exceeded the low watermark, and shards can no longer be allocated to this node. - -. To check the current `redundancyPolicy`, run the following command: -+ -[source,terminal] ----- -$ oc -n openshift-logging get es elasticsearch \ - -o jsonpath='{.spec.redundancyPolicy}' ----- -+ -If you are using a `ClusterLogging` resource on your cluster, run the following command: -+ -[source,terminal] ----- -$ oc -n openshift-logging get cl \ - -o jsonpath='{.items[*].spec.logStore.elasticsearch.redundancyPolicy}' ----- -+ -If the cluster `redundancyPolicy` value is higher than the `SingleRedundancy` value, set it to the `SingleRedundancy` value and save this change. - -. If the preceding steps do not fix the issue, delete the old indices. -.. Check the status of all indices on Elasticsearch by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME -- indices ----- - -.. Identify an old index that can be deleted. -.. Delete the index by running the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch $ES_POD_NAME \ - -- es_util --query= -X DELETE ----- diff --git a/modules/log-collection-rbac-permissions.adoc b/modules/log-collection-rbac-permissions.adoc deleted file mode 100644 index 80e18026b482..000000000000 --- a/modules/log-collection-rbac-permissions.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/log-forwarding.adoc - -:_mod-docs-content-type: PROCEDURE -[id="log-collection-rbac-permissions_{context}"] -= Authorizing log collection RBAC permissions - -In logging 5.8 and later, the {clo} provides `collect-audit-logs`, `collect-application-logs`, and `collect-infrastructure-logs` cluster roles, which enable the collector to collect audit logs, application logs, and infrastructure logs respectively. - -You can authorize RBAC permissions for log collection by binding the required cluster roles to a service account. - -.Prerequisites - -* The {clo} is installed in the `openshift-logging` namespace. -* You have administrator permissions. - -.Procedure - -. Create a service account for the collector. If you want to write logs to storage that requires a token for authentication, you must include a token in the service account. - -. Bind the appropriate cluster roles to the service account: -+ -.Example binding command -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user system:serviceaccount:: ----- diff --git a/modules/log-collector-http-server.adoc b/modules/log-collector-http-server.adoc deleted file mode 100644 index 36db65a0f57f..000000000000 --- a/modules/log-collector-http-server.adoc +++ /dev/null @@ -1,90 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/cluster-logging-collector.adoc - - -//This file is for Logging 5.x - -:_mod-docs-content-type: PROCEDURE -[id="log-collector-http-server_{context}"] -= Configuring the collector to receive audit logs as an HTTP server - -You can configure your log collector to listen for HTTP connections and receive audit logs as an HTTP server by specifying `http` as a receiver input in the `ClusterLogForwarder` custom resource (CR). This enables you to use a common log store for audit logs that are collected from both inside and outside of your {product-title} cluster. - -.Prerequisites - -* You have administrator permissions. -* You have installed the {oc-first}. -* You have installed the {clo}. -* You have created a `ClusterLogForwarder` CR. - -.Procedure - -. Modify the `ClusterLogForwarder` CR to add configuration for the `http` receiver input: -+ --- -.Example `ClusterLogForwarder` CR if you are using a multi log forwarder deployment -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: -# ... -spec: - serviceAccountName: - inputs: - - name: http-receiver # <1> - receiver: - type: http # <2> - http: - format: kubeAPIAudit # <3> - port: 8443 # <4> - pipelines: # <5> - - name: http-pipeline - inputRefs: - - http-receiver -# ... ----- -<1> Specify a name for your input receiver. -<2> Specify the input receiver type as `http`. -<3> Currently, only the `kube-apiserver` webhook format is supported for `http` input receivers. -<4> Optional: Specify the port that the input receiver listens on. This must be a value between `1024` and `65535`. The default value is `8443` if this is not specified. -<5> Configure a pipeline for your input receiver. --- -+ --- -.Example `ClusterLogForwarder` CR if you are using a legacy deployment -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - inputs: - - name: http-receiver # <1> - receiver: - type: http # <2> - http: - format: kubeAPIAudit # <3> - port: 8443 # <4> - pipelines: # <5> - - inputRefs: - - http-receiver - name: http-pipeline -# ... ----- -<1> Specify a name for your input receiver. -<2> Specify the input receiver type as `http`. -<3> Currently, only the `kube-apiserver` webhook format is supported for `http` input receivers. -<4> Optional: Specify the port that the input receiver listens on. This must be a value between `1024` and `65535`. The default value is `8443` if this is not specified. -<5> Configure a pipeline for your input receiver. --- - -. Apply the changes to the `ClusterLogForwarder` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/log-collector-resources-scheduling.adoc b/modules/log-collector-resources-scheduling.adoc deleted file mode 100644 index 6f6e00d67bfe..000000000000 --- a/modules/log-collector-resources-scheduling.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/log-forwarding.adoc - -:_mod-docs-content-type: PROCEDURE -[id="log-collector-resources-scheduling_{context}"] -= Configuring resources and scheduling for logging collectors - -Administrators can modify the resources or scheduling of the collector by creating a `ClusterLogging` custom resource (CR) that is in the same namespace and has the same name as the `ClusterLogForwarder` CR that it supports. - -The applicable stanzas for the `ClusterLogging` CR when using multiple log forwarders in a deployment are `managementState` and `collection`. All other stanzas are ignored. - -.Prerequisites - -* You have administrator permissions. -* You have installed the {clo} version 5.8 or newer. -* You have created a `ClusterLogForwarder` CR. - -.Procedure - -. Create a `ClusterLogging` CR that supports your existing `ClusterLogForwarder` CR: -+ -.Example `ClusterLogging` CR YAML -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - name: # <1> - namespace: # <2> -spec: - managementState: "Managed" - collection: - type: "vector" - tolerations: - - key: "logging" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 6000 - resources: - limits: - memory: 1Gi - requests: - cpu: 100m - memory: 1Gi - nodeSelector: - collector: needed -# ... ----- -<1> The name must be the same name as the `ClusterLogForwarder` CR. -<2> The namespace must be the same namespace as the `ClusterLogForwarder` CR. - -. Apply the `ClusterLogging` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/log-forwarding-collector-outputs.adoc b/modules/log-forwarding-collector-outputs.adoc deleted file mode 100644 index cebf34ecd626..000000000000 --- a/modules/log-forwarding-collector-outputs.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/log-forwarding.adoc - -:_mod-docs-content-type: REFERENCE -[id="log-forwarding-collector-outputs_{context}"] -= Collector outputs - -The following collector outputs are supported: - -.Supported outputs -[options="header"] -|========================================================== -| Feature | Fluentd | Vector -| Elasticsearch v6-v8 | ✓ | ✓ -| Fluent forward | ✓ | -| Syslog RFC3164 | ✓ | ✓ (Logging 5.7+) -| Syslog RFC5424 | ✓ | ✓ (Logging 5.7+) -| Kafka | ✓ | ✓ -| Amazon Cloudwatch | ✓ | ✓ -| Amazon Cloudwatch STS| ✓ | ✓ -| Loki | ✓ | ✓ -| HTTP | ✓ | ✓ (Logging 5.7+) -| {gcp-full} Logging | ✓ | ✓ -| Splunk | | ✓ (Logging 5.6+) -|========================================================== diff --git a/modules/log-forwarding-implementations.adoc b/modules/log-forwarding-implementations.adoc deleted file mode 100644 index 92b5934d0f39..000000000000 --- a/modules/log-forwarding-implementations.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/log-forwarding.adoc - -:_mod-docs-content-type: CONCEPT -[id="log-forwarding-implementations_{context}"] -= Log forwarding implementations - -There are two log forwarding implementations available: the legacy implementation, and the multi log forwarder feature. - -[IMPORTANT] -==== -Only the Vector collector is supported for use with the multi log forwarder feature. The Fluentd collector can only be used with legacy implementations. -==== - -[id="log-forwarding-implementations-legacy_{context}"] -== Legacy implementation - -In legacy implementations, you can only use one log forwarder in your cluster. The `ClusterLogForwarder` resource in this mode must be named `instance`, and must be created in the `openshift-logging` namespace. The `ClusterLogForwarder` resource also requires a corresponding `ClusterLogging` resource named `instance` in the `openshift-logging` namespace. - -[id="log-forwarding-implementations-multi-clf_{context}"] -== Multi log forwarder feature - -The multi log forwarder feature is available in logging 5.8 and later, and provides the following functionality: - -* Administrators can control which users are allowed to define log collection and which logs they are allowed to collect. -* Users who have the required permissions are able to specify additional log collection configurations. -* Administrators who are migrating from the deprecated Fluentd collector to the Vector collector can deploy a new log forwarder separately from their existing deployment. The existing and new log forwarders can operate simultaneously while workloads are being migrated. - -In multi log forwarder implementations, you are not required to create a corresponding `ClusterLogging` resource for your `ClusterLogForwarder` resource. You can create multiple `ClusterLogForwarder` resources using any name, in any namespace, with the following exceptions: - -* You cannot create a `ClusterLogForwarder` resource named `instance` in the `openshift-logging` namespace, because this is reserved for a log forwarder that supports the legacy workflow using the Fluentd collector. -* You cannot create a `ClusterLogForwarder` resource named `collector` in the `openshift-logging` namespace, because this is reserved for the collector. diff --git a/modules/log6x-6-1-0-rn.adoc b/modules/log6x-6-1-0-rn.adoc deleted file mode 100644 index 9e9fb090df14..000000000000 --- a/modules/log6x-6-1-0-rn.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -//log6x-release-notes-6.1 -= Logging 6.1.0 Release Notes -:_mod-docs-content-type: REFERENCE -[id="logging-release-notes-6-1-0_{context}"] - -This release includes link:https://access.redhat.com/errata/RHBA-2024:9038[{logging-uc} {for} Bug Fix Release 6.1.0]. - -[id="openshift-logging-release-notes-6-1-0-enhancements"] -== New Features and Enhancements - -=== Log Collection - -* This enhancement adds the source `iostream` to the attributes sent from collected container logs. The value is set to either `stdout` or `stderr` based on how the collector received it. (link:https://issues.redhat.com/browse/LOG-5292[LOG-5292]) - -* With this update, the default memory limit for the collector increases from 1024 Mi to 2048 Mi. Users should adjust resource limits based on their cluster’s specific needs and specifications. (link:https://issues.redhat.com/browse/LOG-6072[LOG-6072]) - -* With this update, users can now set the syslog output delivery mode of the `ClusterLogForwarder` CR to either `AtLeastOnce` or `AtMostOnce.` (link:https://issues.redhat.com/browse/LOG-6355[LOG-6355]) - -=== Log Storage - -* With this update, the new `1x.pico` LokiStack size supports clusters with fewer workloads and lower log volumes (up to 50GB/day). (link:https://issues.redhat.com/browse/LOG-5939[LOG-5939]) - -[id="logging-release-notes-6-1-0-technology-preview-features"] -== Technology Preview - -:FeatureName: The OpenTelemetry Protocol (OTLP) output log forwarder -include::snippets/technology-preview.adoc[] - -* With this update, OpenTelemetry logs can now be forwarded using the `OTel` (OpenTelemetry) data model to a Red Hat Managed LokiStack instance. To enable this feature, add the `observability.openshift.io/tech-preview-otlp-output: "enabled"` annotation to your `ClusterLogForwarder` configuration. For additional configuration information, see link:https://github.com/openshift/cluster-logging-operator/blob/master/docs/features/logforwarding/outputs/opentelemetry-lokistack-forwarding.adoc[OTLP Forwarding]. - -* With this update, a `dataModel` field has been added to the `lokiStack` output specification. Set the `dataModel` to `Otel` to configure log forwarding using the OpenTelemetry data format. The default is set to `Viaq`. For information about data mapping see link:https://opentelemetry.io/docs/specs/otlp/[OTLP Specification]. - -[id="logging-release-notes-6-1-0-bug-fixes_{context}"] -== Bug Fixes -None. - -[id="logging-release-notes-6-1-0-CVEs_{context}"] -== CVEs - -* link:https://access.redhat.com/security/cve/CVE-2024-6119[CVE-2024-6119] -* link:https://access.redhat.com/security/cve/CVE-2024-6232[CVE-2024-6232] diff --git a/modules/log6x-6-1-1-rn.adoc b/modules/log6x-6-1-1-rn.adoc deleted file mode 100644 index 02f4f28ad1a7..000000000000 --- a/modules/log6x-6-1-1-rn.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.1/log6x-release-notes-6.1.adoc - -:_mod-docs-content-type: REFERENCE -[id="logging-release-notes-6-1-1_{context}"] -= Logging 6.1.1 Release Notes - -This release includes link:https://access.redhat.com/errata/RHBA-2024:10992[{logging-uc} {for} Bug Fix Release 6.1.1]. - -[id="logging-release-notes-6-1-1-enhancements_{context}"] -== New Features and Enhancements - -* With this update, the {loki-op} supports configuring the workload identity federation on the {gcp-first} by using the Cluster Credential Operator (CCO) in {product-title} 4.17 or later. (link:https://issues.redhat.com/browse/LOG-6420[LOG-6420]) - -[id="logging-release-notes-6-1-1-bug-fixes_{context}"] -== Bug Fixes - -* Before this update, the collector was discarding longer audit log messages with the following error message: *Internal log [Found line that exceeds max_line_bytes; discarding.]*. With this update, the discarding of longer audit messages is avoided by increasing the audit configuration thresholds: The maximum line size, `max_line_bytes`, is `3145728` bytes. The maximum number of bytes read during a read cycle, `max_read_bytes`, is `262144` bytes. (link:https://issues.redhat.com/browse/LOG-6379[LOG-6379]) - -* Before this update, an input receiver service was repeatedly created and deleted, causing issues with mounting the TLS secrets. With this update, the service is created once and only deleted if it is not defined in the `ClusterLogForwarder` custom resource. (link:https://issues.redhat.com/browse/LOG-6383[LOG-6383]) - -* Before this update, pipeline validation might have entered an infinite loop if a name was a substring of another name. With this update, stricter name equality checks prevent the infinite loop. (link:https://issues.redhat.com/browse/LOG-6405[LOG-6405]) - -* Before this update, the collector alerting rules included the summary and message fields. With this update, the collector alerting rules include the summary and description fields. (link:https://issues.redhat.com/browse/LOG-6407[LOG-6407]) - -* Before this update, setting up the custom audit inputs in the `ClusterLogForwarder` custom resource with configured `LokiStack` output caused errors due to the nil pointer dereference. With this update, the Operator performs the nil checks, preventing such errors. (link:https://issues.redhat.com/browse/LOG-6449[LOG-6449]) - -* Before this update, the `ValidLokistackOTLPOutputs` condition appeared in the status of the `ClusterLogForwarder` custom resource even when the output type is not `LokiStack`. With this update, the `ValidLokistackOTLPOutputs` condition is removed, and the validation messages for the existing output conditions are corrected. (link:https://issues.redhat.com/browse/LOG-6469[LOG-6469]) - -* Before this update, the collector did not correctly mount the `/var/log/oauth-server/` path, which prevented the collection of the audit logs. With this update, the volume mount is added, and the audit logs are collected as expected. (link:https://issues.redhat.com/browse/LOG-6484[LOG-6484]) - -* Before this update, the `must-gather` script of the {clo} might have failed to gather the LokiStack data. With this update, the `must-gather` script is fixed, and the LokiStack data is gathered reliably. (link:https://issues.redhat.com/browse/LOG-6498[LOG-6498]) - -* Before this update, the collector did not correctly mount the `oauth-apiserver` audit log file. As a result, such audit logs were not collected. With this update, the volume mount is correctly mounted, and the logs are collected as expected. (link:https://issues.redhat.com/browse/LOG-6533[LOG-6533]) - -[id="logging-release-notes-6-1-1-CVEs_{context}"] -== CVEs - -* link:https://access.redhat.com/security/cve/CVE-2019-12900[CVE-2019-12900] -* link:https://access.redhat.com/security/cve/CVE-2024-2511[CVE-2024-2511] -* link:https://access.redhat.com/security/cve/CVE-2024-3596[CVE-2024-3596] -* link:https://access.redhat.com/security/cve/CVE-2024-4603[CVE-2024-4603] -* link:https://access.redhat.com/security/cve/CVE-2024-4741[CVE-2024-4741] -* link:https://access.redhat.com/security/cve/CVE-2024-5535[CVE-2024-5535] -* link:https://access.redhat.com/security/cve/CVE-2024-10963[CVE-2024-10963] -* link:https://access.redhat.com/security/cve/CVE-2024-50602[CVE-2024-50602] diff --git a/modules/log6x-6-1-2-rn.adoc b/modules/log6x-6-1-2-rn.adoc deleted file mode 100644 index 5edeb9dbf921..000000000000 --- a/modules/log6x-6-1-2-rn.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.1/log6x-release-notes-6.1.adoc - -:_mod-docs-content-type: REFERENCE -[id="logging-release-notes-6-1-2_{context}"] -= Logging 6.1.2 Release Notes - -This release includes link:https://access.redhat.com/errata/RHBA-2025:1229[{logging-uc} {for} Bug Fix Release 6.1.2]. - -[id="logging-release-notes-6-1-2-enhancements_{context}"] -== New Features and Enhancements - -* This enhancement adds `OTel` semantic stream labels to the `lokiStack` output so that you can query logs by using both `ViaQ` and `OTel` stream labels. -(link:https://issues.redhat.com/browse/LOG-6579[LOG-6579]) - -[id="logging-release-notes-6-1-2-bug-fixes_{context}"] -== Bug Fixes - -* Before this update, the collector alerting rules contained summary and message fields. With this update, the collector alerting rules contain summary and description fields. -(link:https://issues.redhat.com/browse/LOG-6126[LOG-6126]) - -* Before this update, the collector metrics dashboard could get removed after an Operator upgrade due to a race condition during the transition from the old to the new pod deployment. With this update, labels are added to the dashboard `ConfigMap` to identify the upgraded deployment as the current owner so that it will not be removed. -(link:https://issues.redhat.com/browse/LOG-6280[LOG-6280]) - -* Before this update, when you included infrastructure namespaces in application inputs, their `log_type` would be set to `application`. With this update, the `log_type` of infrastructure namespaces included in application inputs is set to `infrastructure`. -(link:https://issues.redhat.com/browse/LOG-6373[LOG-6373]) - -* Before this update, the Cluster Logging Operator used a cached client to fetch the `SecurityContextConstraint` cluster resource, which could result in an error when the cache is invalid. With this update, the Operator now always retrieves data from the API server instead of using a cache. -(link:https://issues.redhat.com/browse/LOG-6418[LOG-6418]) - -* Before this update, the logging `must-gather` did not collect resources such as `UIPlugin`, `ClusterLogForwarder`, `LogFileMetricExporter`, and `LokiStack`. With this update, the `must-gather` now collects all of these resources and places them in their respective namespace directory instead of the `cluster-logging` directory. -(link:https://issues.redhat.com/browse/LOG-6422[LOG-6422]) - -* Before this update, the Vector startup script attempted to delete buffer lock files during startup. With this update, the Vector startup script no longer attempts to delete buffer lock files during startup. -(link:https://issues.redhat.com/browse/LOG-6506[LOG-6506]) - -* Before this update, the API documentation incorrectly claimed that `lokiStack` outputs would default the target namespace, which could prevent the collector from writing to that output. With this update, this claim has been removed from the API documentation and the Cluster Logging Operator now validates that a target namespace is present. -(link:https://issues.redhat.com/browse/LOG-6573[LOG-6573]) - -* Before this update, the Cluster Logging Operator could deploy the collector with output configurations that were not referenced by any inputs. With this update, a validation check for the `ClusterLogForwarder` resource prevents the Operator from deploying the collector. -(link:https://issues.redhat.com/browse/LOG-6585[LOG-6585]) - -[id="logging-release-notes-6-1-2-CVEs_{context}"] -== CVEs - -* link:https://access.redhat.com/security/cve/CVE-2019-12900[CVE-2019-12900] diff --git a/modules/log6x-6-2-0-rn.adoc b/modules/log6x-6-2-0-rn.adoc deleted file mode 100644 index 846d0fa1493e..000000000000 --- a/modules/log6x-6-2-0-rn.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.2/log6x-release-notes-6.2.adoc - -:_mod-docs-content-type: REFERENCE -[id="logging-release-notes-6-2-0_{context}"] -= Logging 6.2.0 Release Notes - -//// -TOFIX -This release includes link:https://access.redhat.com/errata/RHBA-2024:9038[{logging-uc} {for} Bug Fix Release 6.2.0]. -//// - -[id="openshift-logging-release-notes-6-2-0-enhancements_{context}"] -== New Features and Enhancements - -//// -TOFIX: -=== Log Collection - -* This enhancement adds the source `iostream` to the attributes sent from collected container logs. The value is set to either `stdout` or `stderr` based on how the collector received it. (link:https://issues.redhat.com/browse/LOG-5292[LOG-5292]) - -* With this update, the default memory limit for the collector increases from 1024 Mi to 2048 Mi. Users should adjust resource limits based on their cluster’s specific needs and specifications. (link:https://issues.redhat.com/browse/LOG-6072[LOG-6072]) - -* With this update, users can now set the syslog output delivery mode of the `ClusterLogForwarder` CR to either `AtLeastOnce` or `AtMostOnce.` (link:https://issues.redhat.com/browse/LOG-6355[LOG-6355]) - -=== Log Storage - -* With this update, the new `1x.pico` LokiStack size supports clusters with fewer workloads and lower log volumes (up to 50GB/day). (link:https://issues.redhat.com/browse/LOG-5939[LOG-5939]) - -//// - -[id="logging-release-notes-6-2-0-technology-preview-features_{context}"] -== Technology Preview - -:FeatureName: The OpenTelemetry Protocol (OTLP) output log forwarder -include::snippets/technology-preview.adoc[] - -//// -* With this update, a `dataModel` field has been added to the `lokiStack` output specification. Set the `dataModel` to `Otel` to configure log forwarding using the OpenTelemetry data format. The default is set to `Viaq`. For information about data mapping see link:https://opentelemetry.io/docs/specs/otlp/[OTLP Specification]. - -//// - -[id="logging-release-notes-6-2-0-bug-fixes_{context}"] -== Bug Fixes -//// -TOFIX: -None. -//// - -[id="logging-release-notes-6-2-0-CVEs_{context}"] -== CVEs -//// -TOFIX: -* link:https://access.redhat.com/security/cve/CVE-2024-6119[CVE-2024-6119] -* link:https://access.redhat.com/security/cve/CVE-2024-6232[CVE-2024-6232] -//// \ No newline at end of file diff --git a/modules/log6x-audit-log-filtering.adoc b/modules/log6x-audit-log-filtering.adoc deleted file mode 100644 index 495c2307a574..000000000000 --- a/modules/log6x-audit-log-filtering.adoc +++ /dev/null @@ -1,118 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.0/log6x-clf.adoc - -:_mod-docs-content-type: CONCEPT -[id="log6x-audit-filtering_{context}"] -= Overview of API audit filter -OpenShift API servers generate audit events for each API call, detailing the request, response, and the identity of the requester, leading to large volumes of data. The API Audit filter uses rules to enable the exclusion of non-essential events and the reduction of event size, facilitating a more manageable audit trail. Rules are checked in order, and checking stops at the first match. The amount of data that is included in an event is determined by the value of the `level` field: - -* `None`: The event is dropped. -* `Metadata`: Audit metadata is included, request and response bodies are removed. -* `Request`: Audit metadata and the request body are included, the response body is removed. -* `RequestResponse`: All data is included: metadata, request body and response body. The response body can be very large. For example, `oc get pods -A` generates a response body containing the YAML description of every pod in the cluster. - -The `ClusterLogForwarder` custom resource (CR) uses the same format as the standard link:https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/#audit-policy[Kubernetes audit policy], while providing the following additional functions: - -Wildcards:: Names of users, groups, namespaces, and resources can have a leading or trailing `\*` asterisk character. For example, the namespace `openshift-\*` matches `openshift-apiserver` or `openshift-authentication`. Resource `\*/status` matches `Pod/status` or `Deployment/status`. - -Default Rules:: Events that do not match any rule in the policy are filtered as follows: -* Read-only system events such as `get`, `list`, and `watch` are dropped. -* Service account write events that occur within the same namespace as the service account are dropped. -* All other events are forwarded, subject to any configured rate limits. - -To disable these defaults, either end your rules list with a rule that has only a `level` field or add an empty rule. - -Omit Response Codes:: A list of integer status codes to omit. You can drop events based on the HTTP status code in the response by using the `OmitResponseCodes` field, which lists HTTP status codes for which no events are created. The default value is `[404, 409, 422, 429]`. If the value is an empty list, `[]`, then no status codes are omitted. - -The `ClusterLogForwarder` CR audit policy acts in addition to the {product-title} audit policy. The `ClusterLogForwarder` CR audit filter changes what the log collector forwards and provides the ability to filter by verb, user, group, namespace, or resource. You can create multiple filters to send different summaries of the same audit stream to different places. For example, you can send a detailed stream to the local cluster log store and a less detailed stream to a remote site. - -[NOTE] -==== -You must have a cluster role `collect-audit-logs` to collect the audit logs. The following example provided is intended to illustrate the range of rules possible in an audit policy and is not a recommended configuration. -==== - -.Example audit policy -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: - namespace: -spec: - serviceAccount: - name: - pipelines: - - name: my-pipeline - inputRefs: audit # <1> - filterRefs: my-policy # <2> - filters: - - name: my-policy - type: kubeAPIAudit - kubeAPIAudit: - # Don't generate audit events for all requests in RequestReceived stage. - omitStages: - - "RequestReceived" - - rules: - # Log pod changes at RequestResponse level - - level: RequestResponse - resources: - - group: "" - resources: ["pods"] - - # Log "pods/log", "pods/status" at Metadata level - - level: Metadata - resources: - - group: "" - resources: ["pods/log", "pods/status"] - - # Don't log requests to a configmap called "controller-leader" - - level: None - resources: - - group: "" - resources: ["configmaps"] - resourceNames: ["controller-leader"] - - # Don't log watch requests by the "system:kube-proxy" on endpoints or services - - level: None - users: ["system:kube-proxy"] - verbs: ["watch"] - resources: - - group: "" # core API group - resources: ["endpoints", "services"] - - # Don't log authenticated requests to certain non-resource URL paths. - - level: None - userGroups: ["system:authenticated"] - nonResourceURLs: - - "/api*" # Wildcard matching. - - "/version" - - # Log the request body of configmap changes in kube-system. - - level: Request - resources: - - group: "" # core API group - resources: ["configmaps"] - # This rule only applies to resources in the "kube-system" namespace. - # The empty string "" can be used to select non-namespaced resources. - namespaces: ["kube-system"] - - # Log configmap and secret changes in all other namespaces at the Metadata level. - - level: Metadata - resources: - - group: "" # core API group - resources: ["secrets", "configmaps"] - - # Log all other resources in core and extensions at the Request level. - - level: Request - resources: - - group: "" # core API group - - group: "extensions" # Version of group should NOT be included. - - # A catch-all rule to log all other requests at the Metadata level. - - level: Metadata ----- -<1> The log types that are collected. The value for this field can be `audit` for audit logs, `application` for application logs, `infrastructure` for infrastructure logs, or a named input that has been defined for your application. -<2> The name of your audit policy. diff --git a/modules/log6x-cluster-logging-collector-limits.adoc b/modules/log6x-cluster-logging-collector-limits.adoc deleted file mode 100644 index 2e2dcde0f65b..000000000000 --- a/modules/log6x-cluster-logging-collector-limits.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-collector.adoc - -:_mod-docs-content-type: PROCEDURE -[id="log6x-cluster-logging-collector-limits_{context}"] -= Configure log collector CPU and memory limits - -Use the log collector to adjust the CPU and memory limits. - -.Procedure - -* Edit the `ClusterLogForwarder` custom resource (CR): -+ -[source,terminal] ----- -$ oc -n openshift-logging edit ClusterLogging instance ----- -+ -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - collector: - resources: - limits: <1> - memory: 736Mi - requests: - cpu: 100m - memory: 736Mi -# ... ----- -<1> Specify the CPU and memory limits and requests as needed. The values shown are the default values. diff --git a/modules/log6x-cluster-logging-collector-log-forward-syslog.adoc b/modules/log6x-cluster-logging-collector-log-forward-syslog.adoc deleted file mode 100644 index 62ea2e8aec36..000000000000 --- a/modules/log6x-cluster-logging-collector-log-forward-syslog.adoc +++ /dev/null @@ -1,123 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.2/log6x-clf-6.2.adoc - -:_mod-docs-content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-syslog-6x_{context}"] -= Forwarding logs using the syslog protocol - -You can use the syslog link:https://tools.ietf.org/html/rfc3164[RFC3164] or link:https://tools.ietf.org/html/rfc5424[RFC5424] protocol to send a copy of your logs to an external log aggregator that is configured to accept the protocol instead of, or in addition to, the default Elasticsearch log store. You are responsible for configuring the external log aggregator, such as a syslog server, to receive the logs from {product-title}. - -To configure log forwarding using the syslog protocol, you must create a `ClusterLogForwarder` custom resource (CR) with one or more outputs to the syslog servers, and pipelines that use those outputs. The syslog output can use a UDP, TCP, or TLS connection. - -.Prerequisites - -* You must have a logging server that is configured to receive the logging data using the specified protocol or format. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: collector -spec: - managementState: Managed - outputs: - - name: rsyslog-east # <1> - syslog: - appName: # <2> - enrichment: KubernetesMinimal - facility: # <3> - msgId: # <4> - payloadKey: # <5> - procId: # <6> - rfc: # <7> - severity: informational # <8> - tuning: - deliveryMode: # <9> - url: # <10> - tls: # <11> - ca: - key: ca-bundle.crt - secretName: syslog-secret - type: syslog - pipelines: - - inputRefs: # <12> - - application - name: syslog-east # <13> - outputRefs: - - rsyslog-east - serviceAccount: # <14> - name: logcollector ----- -<1> Specify a name for the output. -<2> Optional: Specify the value for the `APP-NAME` part of the syslog message header. The value must conform with link:https://datatracker.ietf.org/doc/html/rfc5424[The Syslog Protocol]. The value can be a combination of static and dynamic values consisting of field paths followed by `||`, and then followed by another field path or a static value. The maximum length of the final values is truncated to 48 characters. You must encase a dynamic value curly brackets and the value must be followed with a static fallback value separated with `||`. Static values can only contain alphanumeric characters along with dashes, underscores, dots and forward slashes. Example value: -{.||"none"}. -<3> Optional: Specify the value for `Facility` part of the syslog-msg header. -<4> Optional: Specify the value for `MSGID` part of the syslog-msg header. The value can be a combination of static and dynamic values consisting of field paths followed by `||`, and then followed by another field path or a static value. The maximum length of the final values is truncated to 32 characters. You must encase a dynamic value curly brackets and the value must be followed with a static fallback value separated with `||`. Static values can only contain alphanumeric characters along with dashes, underscores, dots and forward slashes. Example value: -{.||"none"}. -<5> Optional: Specify the record field to use as the payload. The `payloadKey` value must be a single field path encased in single curly brackets `{}`. Example: {.}. -<6> Optional: Specify the value for the `PROCID` part of the syslog message header. The value must conform with link:https://datatracker.ietf.org/doc/html/rfc5424[The Syslog Protocol]. The value can be a combination of static and dynamic values consisting of field paths followed by `||`, and then followed by another field path or a static value. The maximum length of the final values is truncated to 48 characters. You must encase a dynamic value curly brackets and the value must be followed with a static fallback value separated with `||`. Static values can only contain alphanumeric characters along with dashes, underscores, dots and forward slashes. Example value: -{.||"none"}. -<7> Optional: Set the RFC that the generated messages conform to. The value can be `RFC3164` or `RFC5424`. -<8> Optional: Set the severity level for the message. For more information, see link:https://datatracker.ietf.org/doc/html/rfc5424#section-6.2.1[The Syslog Protocol]. -<9> Optional: Set the delivery mode for log forwarding. The value can be either `AtLeastOnce`, or `AtMostOnce`. -<10> Specify the absolute URL with a scheme. Valid schemes are: `tcp`, `tls`, and `udp`. For example: `tls://syslog-receiver.example.com:6514`. -<11> Specify the settings for controlling options of the transport layer security (TLS) client connections. -<12> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<13> Specify a name for the pipeline. -<14> The name of your service account. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -[id="cluster-logging-collector-log-forward-examples-syslog-log-source_{context}"] -== Adding log source information to the message output - -You can add `namespace_name`, `pod_name`, and `container_name` elements to the `message` field of the record by adding the `enrichment` field to your `ClusterLogForwarder` custom resource (CR). - -[source,yaml] ----- -# ... - spec: - outputs: - - name: syslogout - syslog: - enrichment: KubernetesMinimal - facility: user - payloadKey: message - rfc: RFC3164 - severity: debug - type: syslog - url: tls://syslog-receiver.example.com:6514 - pipelines: - - inputRefs: - - application - name: test-app - outputRefs: - - syslogout -# ... ----- - -[NOTE] -==== -This configuration is compatible with both RFC3164 and RFC5424. -==== - -.Example syslog message output with `enrichment: None` -[source, text] ----- - 2025-03-03T11:48:01+00:00 example-worker-x syslogsyslogserverd846bb9b: {...} ----- - -.Example syslog message output with `enrichment: KubernetesMinimal` - -[source, text] ----- -2025-03-03T11:48:01+00:00 example-worker-x syslogsyslogserverd846bb9b: namespace_name=cakephp-project container_name=mysql pod_name=mysql-1-wr96h,message: {...} ----- diff --git a/modules/log6x-collection-setup.adoc b/modules/log6x-collection-setup.adoc deleted file mode 100644 index 4ca53123e765..000000000000 --- a/modules/log6x-collection-setup.adoc +++ /dev/null @@ -1,205 +0,0 @@ -// Module included in the following assemblies: -// -// observability/logging/logging-6.0/log6x-clf.adoc - -:_mod-docs-content-type: PROCEDURE -[id="log6x-collection-setup_{context}"] -= Setting up log collection - -This release of Cluster Logging requires administrators to explicitly grant log collection permissions to the service account associated with *ClusterLogForwarder*. This was not required in previous releases for the legacy logging scenario consisting of a *ClusterLogging* and, optionally, a *ClusterLogForwarder.logging.openshift.io* resource. - -The {clo} provides `collect-audit-logs`, `collect-application-logs`, and `collect-infrastructure-logs` cluster roles, which enable the collector to collect audit logs, application logs, and infrastructure logs respectively. - -Setup log collection by binding the required cluster roles to your service account. - -== Legacy service accounts -To use the existing legacy service account `logcollector`, create the following *ClusterRoleBinding*: - -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user collect-application-logs system:serviceaccount:openshift-logging:logcollector ----- - -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user collect-infrastructure-logs system:serviceaccount:openshift-logging:logcollector ----- - -Additionally, create the following *ClusterRoleBinding* if collecting audit logs: - -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user collect-audit-logs system:serviceaccount:openshift-logging:logcollector ----- - - -== Creating service accounts -.Prerequisites - -* The {clo} is installed in the `openshift-logging` namespace. -* You have administrator permissions. - -.Procedure - -. Create a service account for the collector. If you want to write logs to storage that requires a token for authentication, you must include a token in the service account. - -. Bind the appropriate cluster roles to the service account: -+ -.Example binding command -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user system:serviceaccount:: ----- - -=== Cluster Role Binding for your Service Account -The role_binding.yaml file binds the ClusterLogging operator's ClusterRole to a specific ServiceAccount, allowing it to manage Kubernetes resources cluster-wide. - -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: manager-rolebinding -roleRef: <1> - apiGroup: rbac.authorization.k8s.io <2> - kind: ClusterRole <3> - name: cluster-logging-operator <4> -subjects: <5> - - kind: ServiceAccount <6> - name: cluster-logging-operator <7> - namespace: openshift-logging <8> ----- -<1> roleRef: References the ClusterRole to which the binding applies. -<2> apiGroup: Indicates the RBAC API group, specifying that the ClusterRole is part of Kubernetes' RBAC system. -<3> kind: Specifies that the referenced role is a ClusterRole, which applies cluster-wide. -<4> name: The name of the ClusterRole being bound to the ServiceAccount, here cluster-logging-operator. -<5> subjects: Defines the entities (users or service accounts) that are being granted the permissions from the ClusterRole. -<6> kind: Specifies that the subject is a ServiceAccount. -<7> Name: The name of the ServiceAccount being granted the permissions. -<8> namespace: Indicates the namespace where the ServiceAccount is located. - -=== Writing application logs -The write-application-logs-clusterrole.yaml file defines a ClusterRole that grants permissions to write application logs to the Loki logging application. - -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cluster-logging-write-application-logs -rules: <1> - - apiGroups: <2> - - loki.grafana.com <3> - resources: <4> - - application <5> - resourceNames: <6> - - logs <7> - verbs: <8> - - create <9> ----- -<1> rules: Specifies the permissions granted by this ClusterRole. -<2> apiGroups: Refers to the API group loki.grafana.com, which relates to the Loki logging system. -<3> loki.grafana.com: The API group for managing Loki-related resources. -<4> resources: The resource type that the ClusterRole grants permission to interact with. -<5> application: Refers to the application resources within the Loki logging system. -<6> resourceNames: Specifies the names of resources that this role can manage. -<7> logs: Refers to the log resources that can be created. -<8> verbs: The actions allowed on the resources. -<9> create: Grants permission to create new logs in the Loki system. - - -=== Writing audit logs -The write-audit-logs-clusterrole.yaml file defines a ClusterRole that grants permissions to create audit logs in the Loki logging system. -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cluster-logging-write-audit-logs -rules: <1> - - apiGroups: <2> - - loki.grafana.com <3> - resources: <4> - - audit <5> - resourceNames: <6> - - logs <7> - verbs: <8> - - create <9> ----- -<1> rules: Defines the permissions granted by this ClusterRole. -<2> apiGroups: Specifies the API group loki.grafana.com. -<3> loki.grafana.com: The API group responsible for Loki logging resources. -<4> resources: Refers to the resource type this role manages, in this case, audit. -<5> audit: Specifies that the role manages audit logs within Loki. -<6> resourceNames: Defines the specific resources that the role can access. -<7> logs: Refers to the logs that can be managed under this role. -<8> verbs: The actions allowed on the resources. -<9> create: Grants permission to create new audit logs. - -=== Writing infrastructure logs -The write-infrastructure-logs-clusterrole.yaml file defines a ClusterRole that grants permission to create infrastructure logs in the Loki logging system. - -.Sample YAML -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cluster-logging-write-infrastructure-logs -rules: <1> - - apiGroups: <2> - - loki.grafana.com <3> - resources: <4> - - infrastructure <5> - resourceNames: <6> - - logs <7> - verbs: <8> - - create <9> ----- -<1> rules: Specifies the permissions this ClusterRole grants. -<2> apiGroups: Specifies the API group for Loki-related resources. -<3> loki.grafana.com: The API group managing the Loki logging system. -<4> resources: Defines the resource type that this role can interact with. -<5> infrastructure: Refers to infrastructure-related resources that this role manages. -<6> resourceNames: Specifies the names of resources this role can manage. -<7> logs: Refers to the log resources related to infrastructure. -<8> verbs: The actions permitted by this role. -<9> create: Grants permission to create infrastructure logs in the Loki system. - -=== ClusterLogForwarder editor role -The clusterlogforwarder-editor-role.yaml file defines a ClusterRole that allows users to manage ClusterLogForwarders in OpenShift. - - -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: clusterlogforwarder-editor-role -rules: <1> - - apiGroups: <2> - - observability.openshift.io <3> - resources: <4> - - clusterlogforwarders <5> - verbs: <6> - - create <7> - - delete <8> - - get <9> - - list <10> - - patch <11> - - update <12> - - watch <13> ----- -<1> rules: Specifies the permissions this ClusterRole grants. -<2> apiGroups: Refers to the OpenShift-specific API group -<3> obervability.openshift.io: The API group for managing observability resources, like logging. -<4> resources: Specifies the resources this role can manage. -<5> clusterlogforwarders: Refers to the log forwarding resources in OpenShift. -<6> verbs: Specifies the actions allowed on the ClusterLogForwarders. -<7> create: Grants permission to create new ClusterLogForwarders. -<8> delete: Grants permission to delete existing ClusterLogForwarders. -<9> get: Grants permission to retrieve information about specific ClusterLogForwarders. -<10> list: Allows listing all ClusterLogForwarders. -<11> patch: Grants permission to partially modify ClusterLogForwarders. -<12> update: Grants permission to update existing ClusterLogForwarders. -<13> watch: Grants permission to monitor changes to ClusterLogForwarders. diff --git a/modules/log6x-config-roles.adoc b/modules/log6x-config-roles.adoc deleted file mode 100644 index 5eaa4cb13423..000000000000 --- a/modules/log6x-config-roles.adoc +++ /dev/null @@ -1,113 +0,0 @@ -// Module included in the following assemblies: -// -// observability/logging/logging-6.0/log6x-clf.adoc - - -:_mod-docs-content-type: CONCEPT -[id="log6x-config-roles_{context}"] -= Configuring Roles for Logging - -Logging does not grant all users access to logs by default. As an administrator, you must configure your users' access unless the Operator was upgraded and prior configurations are in place. Depending on your configuration and need, you can configure fine grain access to logs using the following: - -* Cluster wide policies -* Namespace scoped policies -* Creation of custom admin groups - -As an administrator, you must create the role bindings and cluster role bindings appropriate for your deployment. The {clo} provides the following cluster roles: - -* `cluster-logging-application-view` grants permission to read application logs. -* `cluster-logging-infrastructure-view` grants permission to read infrastructure logs. -* `cluster-logging-audit-view` grants permission to read audit logs. - -If you have upgraded from a prior version, an additional cluster role `logging-application-logs-reader` and associated cluster role binding `logging-all-authenticated-application-logs-reader` provide backward compatibility, allowing any authenticated user read access in their namespaces. - -[NOTE] -==== -Users with access by namespace must provide a namespace when querying application logs. -==== - -[id="cluster-wide-access_{context}"] -== Cluster wide access -Cluster role binding resources reference cluster roles, and set permissions cluster wide. - -.Example ClusterRoleBinding -[source,yaml] ----- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: logging-all-application-logs-reader -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-logging-application-view # <1> -subjects: # <2> -- kind: Group - name: system:authenticated - apiGroup: rbac.authorization.k8s.io ----- -<1> Additional `ClusterRoles` are `cluster-logging-infrastructure-view`, and `cluster-logging-audit-view`. -<2> Specifies the users or groups this object applies to. - -[id="namespaced-access_{context}"] -== Namespaced access - -You can use `RoleBinding` resources with `ClusterRole` objects to define the namespace a user or group has access to logs for. - -.Example RoleBinding -[source,yaml] ----- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: allow-read-logs - namespace: log-test-0 # <1> -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-logging-application-view -subjects: -- kind: User - apiGroup: rbac.authorization.k8s.io - name: testuser-0 ----- -<1> Specifies the namespace this `RoleBinding` applies to. - - -[id="custom-admin-group-access_{context}"] -== Custom admin group access -If you have a large deployment with several users who require broader permissions, you can create a custom group using the `adminGroup` field. Users who are members of any group specified in the `adminGroups` field of the `LokiStack` CR. - -Administrator users have access to all application logs in all namespaces, if they also get assigned the `cluster-logging-application-view` role. - -.Example `LokiStack` CR -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: -# tag::LokiMode[] - name: logging-loki - namespace: openshift-logging -# end::LokiMode[] -# tag::NetObservMode[] - name: loki - namespace: netobserv -# end::NetObservMode[] -spec: - tenants: -# tag::LokiMode[] - mode: openshift-logging # <1> -# end::LokiMode[] -# tag::NetObservMode[] - mode: openshift-network # <1> -# end::NetObservMode[] - openshift: - adminGroups: # <2> - - cluster-admin - - custom-admin-group # <3> ----- -<1> Custom admin groups are only available in this mode. -<2> Entering an empty list `[]` value for this field disables admin groups. -<3> Overrides the default groups (`system:cluster-admins`, `cluster-admin`, `dedicated-admin`) -// end::CustomAdmin[] diff --git a/modules/log6x-configuring-lokistack-otlp-data-ingestion.adoc b/modules/log6x-configuring-lokistack-otlp-data-ingestion.adoc deleted file mode 100644 index 6757815e700e..000000000000 --- a/modules/log6x-configuring-lokistack-otlp-data-ingestion.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.0/log6x-configuring-lokistack-otlp.adoc - -:_mod-docs-content-type: PROCEDURE -[id="log6x-configuring-lokistack-otlp-data-ingestion_{context}"] -= Configuring LokiStack for OTLP data ingestion - -:FeatureName: The OpenTelemetry Protocol (OTLP) output log forwarder -include::snippets/technology-preview.adoc[] - -To configure a `LokiStack` custom resource (CR) for OTLP ingestion, follow these steps: - -.Prerequisites - -* Ensure that your Loki setup supports structured metadata, introduced in schema version 13 to enable OTLP log ingestion. - -.Procedure - -. Set the schema version: -+ -** When creating a new `LokiStack` CR, set `version: v13` in the storage schema configuration. -+ -[NOTE] -==== -For existing configurations, add a new schema entry with `version: v13` and an `effectiveDate` in the future. For more information on updating schema versions, see link:https://grafana.com/docs/loki/latest/configure/storage/#upgrading-schemas[Upgrading Schemas] (Grafana documentation). -==== - -. Configure the storage schema as follows: -+ -.Example configure storage schema -[source,yaml] ----- -# ... -spec: - storage: - schemas: - - version: v13 - effectiveDate: 2024-10-25 ----- -+ -Once the `effectiveDate` has passed, the v13 schema takes effect, enabling your `LokiStack` to store structured metadata. diff --git a/modules/log6x-configuring-otlp-output.adoc b/modules/log6x-configuring-otlp-output.adoc deleted file mode 100644 index 5ec97b63fc07..000000000000 --- a/modules/log6x-configuring-otlp-output.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.0/log6x-clf.adoc - -:_mod-docs-content-type: PROCEDURE -[id="log6x-configuring-otlp-output_{context}"] -= Configuring OTLP output - -Cluster administrators can use the OpenTelemetry Protocol (OTLP) output to collect and forward logs to OTLP receivers. The OTLP output uses the specification defined by the https://opentelemetry.io/docs/specs/otlp/[OpenTelemetry Observability framework] to send data over HTTP with JSON encoding. - -:FeatureName: The OpenTelemetry Protocol (OTLP) output log forwarder -include::snippets/technology-preview.adoc[] - -.Procedure - -* Create or edit a `ClusterLogForwarder` custom resource (CR) to enable forwarding using OTLP by adding the following annotation: -+ -.Example `ClusterLogForwarder` CR -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - annotations: - observability.openshift.io/tech-preview-otlp-output: "enabled" # <1> - name: clf-otlp -spec: - serviceAccount: - name: - outputs: - - name: otlp - type: otlp - otlp: - tuning: - compression: gzip - deliveryMode: AtLeastOnce - maxRetryDuration: 20 - maxWrite: 10M - minRetryDuration: 5 - url: # <2> - pipelines: - - inputRefs: - - application - - infrastructure - - audit - name: otlp-logs - outputRefs: - - otlp ----- -<1> Use this annotation to enable the OpenTelemetry Protocol (OTLP) output, which is a Technology Preview feature. -<2> This URL must be absolute and is a placeholder for the OTLP endpoint where logs are sent. - -[NOTE] -==== -The OTLP output uses the OpenTelemetry data model, which is different from the ViaQ data model that is used by other output types. It adheres to the OTLP using https://opentelemetry.io/docs/specs/semconv/[OpenTelemetry Semantic Conventions] defined by the OpenTelemetry Observability framework. -==== \ No newline at end of file diff --git a/modules/log6x-content-filter-drop-records.adoc b/modules/log6x-content-filter-drop-records.adoc deleted file mode 100644 index affd4c242ebe..000000000000 --- a/modules/log6x-content-filter-drop-records.adoc +++ /dev/null @@ -1,108 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.0/log6x-clf.adoc - -:_mod-docs-content-type: PROCEDURE -[id="log6x-content-filter-drop-records_{context}"] -= Configuring content filters to drop unwanted log records - -When the `drop` filter is configured, the log collector evaluates log streams according to the filters before forwarding. The collector drops unwanted log records that match the specified configuration. - -.Procedure - -. Add a configuration for a filter to the `filters` spec in the `ClusterLogForwarder` CR. -+ -The following example shows how to configure the `ClusterLogForwarder` CR to drop log records based on regular expressions: -+ -.Example `ClusterLogForwarder` CR -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: -# ... -spec: - serviceAccount: - name: - filters: - - name: - type: drop # <1> - drop: # <2> - - test: # <3> - - field: .kubernetes.labels."foo-bar/baz" # <4> - matches: .+ # <5> - - field: .kubernetes.pod_name - notMatches: "my-pod" # <6> - pipelines: - - name: # <7> - filterRefs: [""] -# ... ----- -<1> Specifies the type of filter. The `drop` filter drops log records that match the filter configuration. -<2> Specifies configuration options for applying the `drop` filter. -<3> Specifies the configuration for tests that are used to evaluate whether a log record is dropped. -** If all the conditions specified for a test are true, the test passes and the log record is dropped. -** When multiple tests are specified for the `drop` filter configuration, if any of the tests pass, the record is dropped. -** If there is an error evaluating a condition, for example, the field is missing from the log record being evaluated, that condition evaluates to false. -<4> Specifies a dot-delimited field path, which is a path to a field in the log record. The path can contain alpha-numeric characters and underscores (`a-zA-Z0-9_`), for example, `.kubernetes.namespace_name`. If segments contain characters outside of this range, the segment must be in quotes, for example, `.kubernetes.labels."foo.bar-bar/baz"`. You can include multiple field paths in a single `test` configuration, but they must all evaluate to true for the test to pass and the `drop` filter to be applied. -<5> Specifies a regular expression. If log records match this regular expression, they are dropped. You can set either the `matches` or `notMatches` condition for a single `field` path, but not both. -<6> Specifies a regular expression. If log records do not match this regular expression, they are dropped. You can set either the `matches` or `notMatches` condition for a single `field` path, but not both. -<7> Specifies the pipeline that the `drop` filter is applied to. - -. Apply the `ClusterLogForwarder` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- - -.Additional examples - -The following additional example shows how you can configure the `drop` filter to only keep higher priority log records: - -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: -# ... -spec: - serviceAccount: - name: - filters: - - name: important - type: drop - drop: - - test: - - field: .message - notMatches: "(?i)critical|error" - - field: .level - matches: "info|warning" -# ... ----- - -In addition to including multiple field paths in a single `test` configuration, you can also include additional tests that are treated as _OR_ checks. In the following example, records are dropped if either `test` configuration evaluates to true. However, for the second `test` configuration, both field specs must be true for it to be evaluated to true: - -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: -# ... -spec: - serviceAccount: - name: - filters: - - name: important - type: drop - drop: - - test: - - field: .kubernetes.namespace_name - matches: "^open" - - test: - - field: .log_type - matches: "application" - - field: .kubernetes.pod_name - notMatches: "my-pod" -# ... ----- diff --git a/modules/log6x-content-filter-prune-records.adoc b/modules/log6x-content-filter-prune-records.adoc deleted file mode 100644 index 77abac0c0571..000000000000 --- a/modules/log6x-content-filter-prune-records.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.0/log6x-clf.adoc - -:_mod-docs-content-type: PROCEDURE -[id="log6x-content-filter-prune-records_{context}"] -= Configuring content filters to prune log records - -When the `prune` filter is configured, the log collector evaluates log streams according to the filters before forwarding. The collector prunes log records by removing low value fields such as pod annotations. - -.Procedure - -. Add a configuration for a filter to the `prune` spec in the `ClusterLogForwarder` CR. -+ -The following example shows how to configure the `ClusterLogForwarder` CR to prune log records based on field paths: -+ -[IMPORTANT] -==== -If both are specified, records are pruned based on the `notIn` array first, which takes precedence over the `in` array. After records have been pruned by using the `notIn` array, they are then pruned by using the `in` array. -==== -+ -.Example `ClusterLogForwarder` CR -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: -# ... -spec: - serviceAccount: - name: - filters: - - name: - type: prune # <1> - prune: # <2> - in: [.kubernetes.annotations, .kubernetes.namespace_id] # <3> - notIn: [.kubernetes,.log_type,.message,."@timestamp"] # <4> - pipelines: - - name: # <5> - filterRefs: [""] -# ... ----- -<1> Specify the type of filter. The `prune` filter prunes log records by configured fields. -<2> Specify configuration options for applying the `prune` filter. The `in` and `notIn` fields are specified as arrays of dot-delimited field paths, which are paths to fields in log records. These paths can contain alpha-numeric characters and underscores (`a-zA-Z0-9_`), for example, `.kubernetes.namespace_name`. If segments contain characters outside of this range, the segment must be in quotes, for example, `.kubernetes.labels."foo.bar-bar/baz"`. -<3> Optional: Any fields that are specified in this array are removed from the log record. -<4> Optional: Any fields that are not specified in this array are removed from the log record. -<5> Specify the pipeline that the `prune` filter is applied to. -+ -[NOTE] -==== -The filters exempts the `log_type`, `.log_source`, and `.message` fields. -==== - -. Apply the `ClusterLogForwarder` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/log6x-creating-logfilesmetricexporter.adoc b/modules/log6x-creating-logfilesmetricexporter.adoc deleted file mode 100644 index 7ad20c99d68d..000000000000 --- a/modules/log6x-creating-logfilesmetricexporter.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/cluster-logging-collector.adoc - -:_mod-docs-content-type: PROCEDURE -[id="log6x-creating-logfilesmetricexporter_{context}"] -= Creating a LogFileMetricExporter resource - -To generate metrics from the logs produced by running containers, you must create a `LogFileMetricExporter` custom resource (CR). - -If you do not create the `LogFileMetricExporter` CR, you might see a *No datapoints found* message in the {product-title} web console dashboard for *Produced Logs*. - -.Prerequisites - -* You have administrator permissions. -* You have installed the {clo}. -* You have installed the {oc-first}. - -.Procedure - -. Create a `LogFileMetricExporter` CR as a YAML file: -+ -.Example `LogFileMetricExporter` CR -[source,yaml] ----- -apiVersion: logging.openshift.io/v1alpha1 -kind: LogFileMetricExporter -metadata: - name: instance - namespace: openshift-logging -spec: - nodeSelector: {} # <1> - resources: # <2> - limits: - cpu: 500m - memory: 256Mi - requests: - cpu: 200m - memory: 128Mi - tolerations: [] # <3> -# ... ----- -<1> Optional: The `nodeSelector` stanza defines which pods are scheduled on which nodes. -<2> The `resources` stanza defines resource requirements for the `LogFileMetricExporter` CR. -<3> Optional: The `tolerations` stanza defines the tolerations that the pods accept. - -. Apply the `LogFileMetricExporter` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/log6x-enabling-loki-alerts.adoc b/modules/log6x-enabling-loki-alerts.adoc deleted file mode 100644 index 9a441994ff9f..000000000000 --- a/modules/log6x-enabling-loki-alerts.adoc +++ /dev/null @@ -1,104 +0,0 @@ -// Module included in the following assemblies: -// -// observability/logging/logging-6.0/log6x-loki.adoc -// observability/logging/logging-6.2/log6x-loki-6.2.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-enabling-loki-alerts_{context}"] -= Creating a log-based alerting rule with Loki - -The `AlertingRule` CR contains a set of specifications and webhook validation definitions to declare groups of alerting rules for a single `LokiStack` instance. In addition, the webhook validation definition provides support for rule validation conditions: - -* If an `AlertingRule` CR includes an invalid `interval` period, it is an invalid alerting rule -* If an `AlertingRule` CR includes an invalid `for` period, it is an invalid alerting rule. -* If an `AlertingRule` CR includes an invalid LogQL `expr`, it is an invalid alerting rule. -* If an `AlertingRule` CR includes two groups with the same name, it is an invalid alerting rule. -* If none of the above applies, an alerting rule is considered valid. - -.AlertingRule definitions -[options="header"] -|=== -| Tenant type | Valid namespaces for `AlertingRule` CRs -| application a| `` -| audit a| `openshift-logging` -| infrastructure a| `openshift-/\*`, `kube-/\*`, `default` -|=== - -.Procedure - -. Create an `AlertingRule` custom resource (CR): -+ -.Example infrastructure `AlertingRule` CR -[source,yaml] ----- - apiVersion: loki.grafana.com/v1 - kind: AlertingRule - metadata: - name: loki-operator-alerts - namespace: openshift-operators-redhat <1> - labels: <2> - openshift.io/: "true" - spec: - tenantID: "infrastructure" <3> - groups: - - name: LokiOperatorHighReconciliationError - rules: - - alert: HighPercentageError - expr: | <4> - sum(rate({kubernetes_namespace_name="openshift-operators-redhat", kubernetes_pod_name=~"loki-operator-controller-manager.*"} |= "error" [1m])) by (job) - / - sum(rate({kubernetes_namespace_name="openshift-operators-redhat", kubernetes_pod_name=~"loki-operator-controller-manager.*"}[1m])) by (job) - > 0.01 - for: 10s - labels: - severity: critical <5> - annotations: - summary: High Loki Operator Reconciliation Errors <6> - description: High Loki Operator Reconciliation Errors <7> ----- -<1> The namespace where this `AlertingRule` CR is created must have a label matching the LokiStack `spec.rules.namespaceSelector` definition. -<2> The `labels` block must match the LokiStack `spec.rules.selector` definition. -<3> `AlertingRule` CRs for `infrastructure` tenants are only supported in the `openshift-\*`, `kube-\*`, or `default` namespaces. -<4> The value for `kubernetes_namespace_name:` must match the value for `metadata.namespace`. -<5> The value of this mandatory field must be `critical`, `warning`, or `info`. -<6> This field is mandatory. -<7> This field is mandatory. -+ -.Example application `AlertingRule` CR -[source,yaml] ----- - apiVersion: loki.grafana.com/v1 - kind: AlertingRule - metadata: - name: app-user-workload - namespace: app-ns <1> - labels: <2> - openshift.io/: "true" - spec: - tenantID: "application" - groups: - - name: AppUserWorkloadHighError - rules: - - alert: - expr: | <3> - sum(rate({kubernetes_namespace_name="app-ns", kubernetes_pod_name=~"podName.*"} |= "error" [1m])) by (job) - for: 10s - labels: - severity: critical <4> - annotations: - summary: <5> - description: <6> ----- -<1> The namespace where this `AlertingRule` CR is created must have a label matching the LokiStack `spec.rules.namespaceSelector` definition. -<2> The `labels` block must match the LokiStack `spec.rules.selector` definition. -<3> Value for `kubernetes_namespace_name:` must match the value for `metadata.namespace`. -<4> The value of this mandatory field must be `critical`, `warning`, or `info`. -<5> The value of this mandatory field is a summary of the rule. -<6> The value of this mandatory field is a detailed description of the rule. - -. Apply the `AlertingRule` CR: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/log6x-identity-federation.adoc b/modules/log6x-identity-federation.adoc deleted file mode 100644 index 90119626f097..000000000000 --- a/modules/log6x-identity-federation.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// observability/logging/logging-6.0/log6x-loki.adoc -// observability/logging/logging-6.2/log6x-loki-6.2.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-identity-federation_{context}"] -= Enabling authentication to cloud-based log stores using short-lived tokens - -Workload identity federation enables authentication to cloud-based log stores using short-lived tokens. - -.Procedure - -* Use one of the following options to enable authentication: - -** If you use the {product-title} web console to install the {loki-op}, clusters that use short-lived tokens are automatically detected. You are prompted to create roles and supply the data required for the {loki-op} to create a `CredentialsRequest` object, which populates a secret. - -** If you use the {oc-first} to install the {loki-op}, you must manually create a `Subscription` object using the appropriate template for your storage provider, as shown in the following examples. This authentication strategy is only supported for the storage providers indicated. -+ -.Example Azure sample subscription -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: loki-operator - namespace: openshift-operators-redhat -spec: - channel: "stable-6.0" - installPlanApproval: Manual - name: loki-operator - source: redhat-operators - sourceNamespace: openshift-marketplace - config: - env: - - name: CLIENTID - value: - - name: TENANTID - value: - - name: SUBSCRIPTIONID - value: - - name: REGION - value: ----- -+ -.Example AWS sample subscription -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: loki-operator - namespace: openshift-operators-redhat -spec: - channel: "stable-6.0" - installPlanApproval: Manual - name: loki-operator - source: redhat-operators - sourceNamespace: openshift-marketplace - config: - env: - - name: ROLEARN - value: ----- diff --git a/modules/log6x-input-spec-filter-audit-infrastructure.adoc b/modules/log6x-input-spec-filter-audit-infrastructure.adoc deleted file mode 100644 index 1f9592a035d0..000000000000 --- a/modules/log6x-input-spec-filter-audit-infrastructure.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.0/log6x-clf.adoc - -:_mod-docs-content-type: PROCEDURE -[id="log6x-input-spec-filter-audit-infrastructure_{context}"] -= Filtering the audit and infrastructure log inputs by source - -You can define the list of `audit` and `infrastructure` sources to collect the logs by using the `input` selector. - -.Procedure - -. Add a configuration to define the `audit` and `infrastructure` sources in the `ClusterLogForwarder` CR. - -+ -The following example shows how to configure the `ClusterLogForwarder` CR to define `audit` and `infrastructure` sources: -+ -.Example `ClusterLogForwarder` CR -+ -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -# ... -spec: - serviceAccount: - name: - inputs: - - name: mylogs1 - type: infrastructure - infrastructure: - sources: # <1> - - node - - name: mylogs2 - type: audit - audit: - sources: # <2> - - kubeAPI - - openshiftAPI - - ovn -# ... ----- -<1> Specifies the list of infrastructure sources to collect. The valid sources include: -** `node`: Journal log from the node -** `container`: Logs from the workloads deployed in the namespaces -<2> Specifies the list of audit sources to collect. The valid sources include: -** `kubeAPI`: Logs from the Kubernetes API servers -** `openshiftAPI`: Logs from the OpenShift API servers -** `auditd`: Logs from a node auditd service -** `ovn`: Logs from an open virtual network service - -. Apply the `ClusterLogForwarder` CR by running the following command: - -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/log6x-input-spec-filter-labels-expressions.adoc b/modules/log6x-input-spec-filter-labels-expressions.adoc deleted file mode 100644 index c04c37736fe3..000000000000 --- a/modules/log6x-input-spec-filter-labels-expressions.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.0/log6x-clf.adoc - -:_mod-docs-content-type: PROCEDURE -[id="log6x-input-spec-filter-labels-expressions_{context}"] -= Filtering application logs at input by including the label expressions or a matching label key and values - -You can include the application logs based on the label expressions or a matching label key and its values by using the `input` selector. - -.Procedure - -. Add a configuration for a filter to the `input` spec in the `ClusterLogForwarder` CR. -+ -The following example shows how to configure the `ClusterLogForwarder` CR to include logs based on label expressions or matched label key/values: -+ -.Example `ClusterLogForwarder` CR -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -# ... -spec: - serviceAccount: - name: - inputs: - - name: mylogs - application: - selector: - matchExpressions: - - key: env # <1> - operator: In # <2> - values: ["prod", "qa"] # <3> - - key: zone - operator: NotIn - values: ["east", "west"] - matchLabels: # <4> - app: one - name: app1 - type: application -# ... ----- -<1> Specifies the label key to match. -<2> Specifies the operator. Valid values include: `In`, `NotIn`, `Exists`, and `DoesNotExist`. -<3> Specifies an array of string values. If the `operator` value is either `Exists` or `DoesNotExist`, the value array must be empty. -<4> Specifies an exact key or value mapping. - -. Apply the `ClusterLogForwarder` CR by running the following command: - -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/log6x-input-spec-filter-namespace-container.adoc b/modules/log6x-input-spec-filter-namespace-container.adoc deleted file mode 100644 index 715aeb4970f8..000000000000 --- a/modules/log6x-input-spec-filter-namespace-container.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.0/log6x-clf.adoc - -:_mod-docs-content-type: PROCEDURE -[id="log6x-input-spec-filter-namespace-container_{context}"] -= Filtering application logs at input by including or excluding the namespace or container name - -You can include or exclude the application logs based on the namespace and container name by using the `input` selector. - -.Procedure - -. Add a configuration to include or exclude the namespace and container names in the `ClusterLogForwarder` CR. -+ -The following example shows how to configure the `ClusterLogForwarder` CR to include or exclude namespaces and container names: -+ -.Example `ClusterLogForwarder` CR -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -# ... -spec: - serviceAccount: - name: - inputs: - - name: mylogs - application: - includes: - - namespace: "my-project" # <1> - container: "my-container" # <2> - excludes: - - container: "other-container*" # <3> - namespace: "other-namespace" # <4> - type: application -# ... ----- -<1> Specifies that the logs are only collected from these namespaces. -<2> Specifies that the logs are only collected from these containers. -<3> Specifies the pattern of namespaces to ignore when collecting the logs. -<4> Specifies the set of containers to ignore when collecting the logs. -+ -[NOTE] -==== -The `excludes` field takes precedence over the `includes` field. -==== -+ -. Apply the `ClusterLogForwarder` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/log6x-log-collector-http-server.adoc b/modules/log6x-log-collector-http-server.adoc deleted file mode 100644 index 8412ce09d09f..000000000000 --- a/modules/log6x-log-collector-http-server.adoc +++ /dev/null @@ -1,119 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/cluster-logging-collector.adoc - -:_mod-docs-content-type: PROCEDURE -[id="log6x-log-collector-http-server_{context}"] -= Configuring the collector to receive audit logs as an HTTP server - -You can configure your log collector to listen for HTTP connections to only receive audit logs by specifying `http` as a receiver input in the `ClusterLogForwarder` custom resource (CR). - -:feature-name: HTTP receiver input -include::snippets/logging-http-sys-input-support.adoc[] - - -.Prerequisites - -* You have administrator permissions. -* You have installed the {oc-first}. -* You have installed the {clo}. -* You have created a `ClusterLogForwarder` CR. - -.Procedure - -. Modify the `ClusterLogForwarder` CR to add configuration for the `http` receiver input: -+ --- -.Example `ClusterLogForwarder` CR -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: -# ... -spec: - inputs: - - name: http-receiver # <1> - type: receiver - receiver: - type: http # <2> - port: 8443 # <3> - http: - format: kubeAPIAudit # <4> - outputs: - - name: default-lokistack - lokiStack: - authentication: - token: - from: serviceAccount - target: - name: logging-loki - namespace: openshift-logging - tls: - ca: - key: service-ca.crt - configMapName: openshift-service-ca.crt - type: lokiStack -# ... - pipelines: # <5> - - name: http-pipeline - inputRefs: - - http-receiver - outputRefs: - - -# ... ----- -<1> Specify a name for your input receiver. -<2> Specify the input receiver type as `http`. -<3> Optional: Specify the port that the input receiver listens on. This must be a value between `1024` and `65535`. The default value is `8443`. -<4> Currently, only the `kube-apiserver` webhook format is supported for `http` input receivers. -<5> Configure a pipeline for your input receiver. --- - -. Apply the changes to the `ClusterLogForwarder` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- - -.Verification - -. Verify that the collector is listening on the service that has a name in the `-` format by running the following command: -+ -[source,terminal] ----- -$ oc get svc ----- -+ -.Example output -+ -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -collector ClusterIP 172.30.85.239 24231/TCP 3m6s -collector-http-receiver ClusterIP 172.30.205.160 8443/TCP 3m6s ----- -+ -In this example output, the service name is `collector-http-receiver`. - -. Extract the certificate authority (CA) certificate file by running the following command: -+ -[source,terminal] ----- -$ oc extract cm/openshift-service-ca.crt -n ----- - -. Use the `curl` command to send logs by running the following command: -+ -[source,terminal] ----- -$ curl --cacert https://collector-http-receiver..svc:8443 -XPOST -d '{"":""}' ----- -+ -Replace `` with the extracted CA certificate file. -+ -[NOTE] -==== -You can only forward logs within a cluster by following the verification steps. -==== diff --git a/modules/log6x-log-collector-syslog-server.adoc b/modules/log6x-log-collector-syslog-server.adoc deleted file mode 100644 index 9c2ecedcb991..000000000000 --- a/modules/log6x-log-collector-syslog-server.adoc +++ /dev/null @@ -1,103 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/cluster-logging-collector.adoc - -:_mod-docs-content-type: PROCEDURE -[id="log-collector-syslog-server_{context}"] -= Configuring the collector to listen for connections as a syslog server - -You can configure your log collector to collect journal format infrastructure logs by specifying `syslog` as a receiver input in the `ClusterLogForwarder` custom resource (CR). - -:feature-name: Syslog receiver input -include::snippets/logging-http-sys-input-support.adoc[] - - -.Prerequisites - -* You have administrator permissions. -* You have installed the {oc-first}. -* You have installed the {clo}. -* You have created a `ClusterLogForwarder` CR. - -.Procedure - -. Grant the `collect-infrastructure-logs` cluster role to the service account by running the following command: -+ -.Example binding command -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user collect-infrastructure-logs -z logcollector ----- - -. Modify the `ClusterLogForwarder` CR to add configuration for the `syslog` receiver input: -+ -.Example `ClusterLogForwarder` CR -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: -# ... -spec: - serviceAccount: - name: - inputs: - - name: syslog-receiver # <1> - type: receiver - receiver: - type: syslog # <2> - port: 10514 # <3> - outputs: - - name: default-lokistack - lokiStack: - authentication: - token: - from: serviceAccount - target: - name: logging-loki - namespace: openshift-logging - tls: - ca: - key: service-ca.crt - configMapName: openshift-service-ca.crt - type: lokiStack -# ... - pipelines: # <4> - - name: syslog-pipeline - inputRefs: - - syslog-receiver - outputRefs: - - -# ... ----- -<1> Specify a name for your input receiver. -<2> Specify the input receiver type as `syslog`. -<3> Optional: Specify the port that the input receiver listens on. This must be a value between `1024` and `65535`. -<4> Configure a pipeline for your input receiver. - -. Apply the changes to the `ClusterLogForwarder` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- - -.Verification - -* Verify that the collector is listening on the service that has a name in the `-` format by running the following command: -+ -[source,terminal] ----- -$ oc get svc ----- -+ -.Example output -+ -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -collector ClusterIP 172.30.85.239 24231/TCP 33m -collector-syslog-receiver ClusterIP 172.30.216.142 10514/TCP 2m20s ----- -+ -In this example output, the service name is `collector-syslog-receiver`. diff --git a/modules/log6x-logging-http-forward-6-2.adoc b/modules/log6x-logging-http-forward-6-2.adoc deleted file mode 100644 index d076798fb77e..000000000000 --- a/modules/log6x-logging-http-forward-6-2.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.2/log6x-clf-6.2.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-http-forward-6-2_{context}"] -= Forwarding logs over HTTP - -To enable forwarding logs over HTTP, specify `http` as the output type in the `ClusterLogForwarder` custom resource (CR). - -.Procedure - -* Create or edit the `ClusterLogForwarder` CR using the template below: -+ -.Example ClusterLogForwarder CR -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: - namespace: -spec: - managementState: Managed - outputs: - - name: - type: http - http: - headers: # <1> - h1: v1 - h2: v2 - authentication: - username: - key: username - secretName: - password: - key: password - secretName: - timeout: 300 - proxyURL: # <2> - url: # <3> - tls: - insecureSkipVerify: # <4> - ca: - key: - secretName: # <5> - pipelines: - - inputRefs: - - application - name: pipe1 - outputRefs: - - # <6> - serviceAccount: - name: # <7> ----- -<1> Additional headers to send with the log record. -<2> Optional: URL of the HTTP/HTTPS proxy that should be used to forward logs over http or https from this output. This setting overrides any default proxy settings for the cluster or the node. -<3> Destination address for logs. -<4> Values are either `true` or `false`. -<5> Secret name for destination credentials. -<6> This value should be the same as the output name. -<7> The name of your service account. \ No newline at end of file diff --git a/modules/log6x-loki-memberlist-ip.adoc b/modules/log6x-loki-memberlist-ip.adoc deleted file mode 100644 index be03fbd4fc49..000000000000 --- a/modules/log6x-loki-memberlist-ip.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-loki.adoc -// * observability/logging/logging-6.2/log6x-loki-6.2.adoc - -:_mod-docs-content-type: CONCEPT -[id="logging-loki-memberlist-ip_{context}"] -= Configuring Loki to tolerate memberlist creation failure - -In an {product-title} cluster, administrators generally use a non-private IP network range. As a result, the LokiStack memberlist configuration fails because, by default, it only uses private IP networks. - -As an administrator, you can select the pod network for the memberlist configuration. You can modify the `LokiStack` custom resource (CR) to use the `podIP` address in the `hashRing` spec. To configure the `LokiStack` CR, use the following command: - -[source,terminal] ----- -$ oc patch LokiStack logging-loki -n openshift-logging --type=merge -p '{"spec": {"hashRing":{"memberlist":{"instanceAddrType":"podIP"},"type":"memberlist"}}}' ----- - -.Example LokiStack to include `podIP` -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: -# ... - hashRing: - type: memberlist - memberlist: - instanceAddrType: podIP -# ... ----- diff --git a/modules/log6x-loki-pod-placement.adoc b/modules/log6x-loki-pod-placement.adoc deleted file mode 100644 index ad62465d4687..000000000000 --- a/modules/log6x-loki-pod-placement.adoc +++ /dev/null @@ -1,199 +0,0 @@ -// Module included in the following assemblies: -// -// observability/logging/logging-6.0/log6x-loki.adoc -// observability/logging/logging-6.2/log6x-loki-6.2.adoc - -:_mod-docs-content-type: CONCEPT -[id="logging-loki-pod-placement_{context}"] -= Loki pod placement -You can control which nodes the Loki pods run on, and prevent other workloads from using those nodes, by using tolerations or node selectors on the pods. - -You can apply tolerations to the log store pods with the LokiStack custom resource (CR) and apply taints to a node with the node specification. A taint on a node is a `key:value` pair that instructs the node to repel all pods that do not allow the taint. Using a specific `key:value` pair that is not on other pods ensures that only the log store pods can run on that node. - -.Example LokiStack with node selectors -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: -# ... - template: - compactor: # <1> - nodeSelector: - node-role.kubernetes.io/infra: "" # <2> - distributor: - nodeSelector: - node-role.kubernetes.io/infra: "" - gateway: - nodeSelector: - node-role.kubernetes.io/infra: "" - indexGateway: - nodeSelector: - node-role.kubernetes.io/infra: "" - ingester: - nodeSelector: - node-role.kubernetes.io/infra: "" - querier: - nodeSelector: - node-role.kubernetes.io/infra: "" - queryFrontend: - nodeSelector: - node-role.kubernetes.io/infra: "" - ruler: - nodeSelector: - node-role.kubernetes.io/infra: "" -# ... ----- -<1> Specifies the component pod type that applies to the node selector. -<2> Specifies the pods that are moved to nodes containing the defined label. - - -.Example LokiStack CR with node selectors and tolerations -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: -# ... - template: - compactor: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - distributor: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - indexGateway: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - ingester: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - querier: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - queryFrontend: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - ruler: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - gateway: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved -# ... ----- - -To configure the `nodeSelector` and `tolerations` fields of the LokiStack (CR), you can use the [command]`oc explain` command to view the description and fields for a particular resource: - -[source,terminal] ----- -$ oc explain lokistack.spec.template ----- - -.Example output -[source,text] ----- -KIND: LokiStack -VERSION: loki.grafana.com/v1 - -RESOURCE: template - -DESCRIPTION: - Template defines the resource/limits/tolerations/nodeselectors per - component - -FIELDS: - compactor - Compactor defines the compaction component spec. - - distributor - Distributor defines the distributor component spec. -... ----- - -For more detailed information, you can add a specific field: - -[source,terminal] ----- -$ oc explain lokistack.spec.template.compactor ----- - -.Example output -[source,text] ----- -KIND: LokiStack -VERSION: loki.grafana.com/v1 - -RESOURCE: compactor - -DESCRIPTION: - Compactor defines the compaction component spec. - -FIELDS: - nodeSelector - NodeSelector defines the labels required by a node to schedule the - component onto it. -... ----- diff --git a/modules/log6x-loki-rate-limit-errors.adoc b/modules/log6x-loki-rate-limit-errors.adoc deleted file mode 100644 index 6986227350b3..000000000000 --- a/modules/log6x-loki-rate-limit-errors.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module is included in the following assemblies: -// * logging/cluster-logging-loki.adoc -// * observability/logging/log_collection_forwarding/log-forwarding.adoc -// * observability/logging/troubleshooting/log-forwarding-troubleshooting.adoc -// * observability/logging/logging-6.2/log6x-loki-6.2.adoc - -:_mod-docs-content-type: PROCEDURE -[id="loki-rate-limit-errors_{context}"] -= Troubleshooting Loki rate limit errors - -If the Log Forwarder API forwards a large block of messages that exceeds the rate limit to Loki, Loki generates rate limit (`429`) errors. - -These errors can occur during normal operation. For example, when adding the {logging} to a cluster that already has some logs, rate limit errors might occur while the {logging} tries to ingest all of the existing log entries. In this case, if the rate of addition of new logs is less than the total rate limit, the historical data is eventually ingested, and the rate limit errors are resolved without requiring user intervention. - -In cases where the rate limit errors continue to occur, you can fix the issue by modifying the `LokiStack` custom resource (CR). - -[IMPORTANT] -==== -The `LokiStack` CR is not available on Grafana-hosted Loki. This topic does not apply to Grafana-hosted Loki servers. -==== - -.Conditions - -* The Log Forwarder API is configured to forward logs to Loki. - -* Your system sends a block of messages that is larger than 2 MB to Loki. For example: -+ -[source,text] ----- -"values":[["1630410392689800468","{\"kind\":\"Event\",\"apiVersion\":\ -....... -...... -...... -...... -\"received_at\":\"2021-08-31T11:46:32.800278+00:00\",\"version\":\"1.7.4 1.6.0\"}},\"@timestamp\":\"2021-08-31T11:46:32.799692+00:00\",\"viaq_index_name\":\"audit-write\",\"viaq_msg_id\":\"MzFjYjJkZjItNjY0MC00YWU4LWIwMTEtNGNmM2E5ZmViMGU4\",\"log_type\":\"audit\"}"]]}]} ----- - -* After you enter `oc logs -n openshift-logging -l component=collector`, the collector logs in your cluster show a line containing one of the following error messages: -+ -[source,text] ----- -429 Too Many Requests Ingestion rate limit exceeded ----- -+ -.Example Vector error message -[source,text] ----- -2023-08-25T16:08:49.301780Z WARN sink{component_kind="sink" component_id=default_loki_infra component_type=loki component_name=default_loki_infra}: vector::sinks::util::retries: Retrying after error. error=Server responded with an error: 429 Too Many Requests internal_log_rate_limit=true ----- -+ -The error is also visible on the receiving end. For example, in the LokiStack ingester pod: -+ -.Example Loki ingester error message -[source,text] ----- -level=warn ts=2023-08-30T14:57:34.155592243Z caller=grpc_logging.go:43 duration=1.434942ms method=/logproto.Pusher/Push err="rpc error: code = Code(429) desc = entry with timestamp 2023-08-30 14:57:32.012778399 +0000 UTC ignored, reason: 'Per stream rate limit exceeded (limit: 3MB/sec) while attempting to ingest for stream ----- - -.Procedure - -* Update the `ingestionBurstSize` and `ingestionRate` fields in the `LokiStack` CR: -+ -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: - limits: - global: - ingestion: - ingestionBurstSize: 16 # <1> - ingestionRate: 8 # <2> -# ... ----- -<1> The `ingestionBurstSize` field defines the maximum local rate-limited sample size per distributor replica in MB. This value is a hard limit. Set this value to at least the maximum logs size expected in a single push request. Single requests that are larger than the `ingestionBurstSize` value are not permitted. -<2> The `ingestionRate` field is a soft limit on the maximum amount of ingested samples per second in MB. Rate limit errors occur if the rate of logs exceeds the limit, but the collector retries sending the logs. As long as the total average is lower than the limit, the system recovers and errors are resolved without user intervention. diff --git a/modules/log6x-loki-rbac-rules-perms.adoc b/modules/log6x-loki-rbac-rules-perms.adoc deleted file mode 100644 index 91b79d5fdaa8..000000000000 --- a/modules/log6x-loki-rbac-rules-perms.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// * observability/logging/logging-6.2/log6x-loki-6.2.adoc - - -:_mod-docs-content-type: REFERENCE -[id="loki-rbac-rules-permissions_{context}"] -= Authorizing LokiStack rules RBAC permissions - -Administrators can allow users to create and manage their own alerting and recording rules by binding cluster roles to usernames. -Cluster roles are defined as `ClusterRole` objects that contain necessary role-based access control (RBAC) permissions for users. - -The following cluster roles for alerting and recording rules are available for LokiStack: - -[options="header"] -|=== -|Rule name |Description - -|`alertingrules.loki.grafana.com-v1-admin` -|Users with this role have administrative-level access to manage alerting rules. This cluster role grants permissions to create, read, update, delete, list, and watch `AlertingRule` resources within the `loki.grafana.com/v1` API group. - -|`alertingrules.loki.grafana.com-v1-crdview` -|Users with this role can view the definitions of Custom Resource Definitions (CRDs) related to `AlertingRule` resources within the `loki.grafana.com/v1` API group, but do not have permissions for modifying or managing these resources. - -|`alertingrules.loki.grafana.com-v1-edit` -|Users with this role have permission to create, update, and delete `AlertingRule` resources. - -|`alertingrules.loki.grafana.com-v1-view` -|Users with this role can read `AlertingRule` resources within the `loki.grafana.com/v1` API group. They can inspect configurations, labels, and annotations for existing alerting rules but cannot make any modifications to them. - -|`recordingrules.loki.grafana.com-v1-admin` -|Users with this role have administrative-level access to manage recording rules. This cluster role grants permissions to create, read, update, delete, list, and watch `RecordingRule` resources within the `loki.grafana.com/v1` API group. - -|`recordingrules.loki.grafana.com-v1-crdview` -|Users with this role can view the definitions of Custom Resource Definitions (CRDs) related to `RecordingRule` resources within the `loki.grafana.com/v1` API group, but do not have permissions for modifying or managing these resources. - -|`recordingrules.loki.grafana.com-v1-edit` -|Users with this role have permission to create, update, and delete `RecordingRule` resources. - -|`recordingrules.loki.grafana.com-v1-view` -|Users with this role can read `RecordingRule` resources within the `loki.grafana.com/v1` API group. They can inspect configurations, labels, and annotations for existing alerting rules but cannot make any modifications to them. - -|=== - -[id="loki-rbac-rules-permissions-examples_{context}"] -== Examples - -To apply cluster roles for a user, you must bind an existing cluster role to a specific username. - -Cluster roles can be cluster or namespace scoped, depending on which type of role binding you use. -When a `RoleBinding` object is used, as when using the `oc adm policy add-role-to-user` command, the cluster role only applies to the specified namespace. -When a `ClusterRoleBinding` object is used, as when using the `oc adm policy add-cluster-role-to-user` command, the cluster role applies to all namespaces in the cluster. - -The following example command gives the specified user create, read, update and delete (CRUD) permissions for alerting rules in a specific namespace in the cluster: - -.Example cluster role binding command for alerting rule CRUD permissions in a specific namespace -[source,terminal] ----- -$ oc adm policy add-role-to-user alertingrules.loki.grafana.com-v1-admin -n ----- - -The following command gives the specified user administrator permissions for alerting rules in all namespaces: - -.Example cluster role binding command for administrator permissions -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user alertingrules.loki.grafana.com-v1-admin ----- diff --git a/modules/log6x-loki-reliability-hardening.adoc b/modules/log6x-loki-reliability-hardening.adoc deleted file mode 100644 index 8280c5184bb7..000000000000 --- a/modules/log6x-loki-reliability-hardening.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-loki.adoc -// * observability/logging/logging-6.2/log6x-loki-6.2.adoc - -:_mod-docs-content-type: CONCEPT -[id="logging-loki-reliability-hardening_{context}"] -= Configuring Loki to tolerate node failure - -The {loki-op} supports setting pod anti-affinity rules to request that pods of the same component are scheduled on different available nodes in the cluster. - -include::snippets/about-pod-affinity.adoc[] - -The Operator sets default, preferred `podAntiAffinity` rules for all Loki components, which includes the `compactor`, `distributor`, `gateway`, `indexGateway`, `ingester`, `querier`, `queryFrontend`, and `ruler` components. - -You can override the preferred `podAntiAffinity` settings for Loki components by configuring required settings in the `requiredDuringSchedulingIgnoredDuringExecution` field: - -.Example user settings for the ingester component -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: -# ... - template: - ingester: - podAntiAffinity: - # ... - requiredDuringSchedulingIgnoredDuringExecution: <1> - - labelSelector: - matchLabels: <2> - app.kubernetes.io/component: ingester - topologyKey: kubernetes.io/hostname -# ... ----- -<1> The stanza to define a required rule. -<2> The key-value pair (label) that must be matched to apply the rule. diff --git a/modules/log6x-loki-restart-hardening.adoc b/modules/log6x-loki-restart-hardening.adoc deleted file mode 100644 index 93ec41d505e9..000000000000 --- a/modules/log6x-loki-restart-hardening.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-loki.adoc -// * observability/logging/logging-6.2/log6x-loki-6.2.adoc - -:_mod-docs-content-type: CONCEPT -[id="logging-loki-restart-hardening_{context}"] -= LokiStack behavior during cluster restarts - -When an {product-title} cluster is restarted, LokiStack ingestion and the query path continue to operate within the available CPU and memory resources available for the node. This means that there is no downtime for the LokiStack during {product-title} cluster updates. This behavior is achieved by using `PodDisruptionBudget` resources. The {loki-op} provisions `PodDisruptionBudget` resources for Loki, which determine the minimum number of pods that must be available per component to ensure normal operations under certain conditions. diff --git a/modules/log6x-loki-retention.adoc b/modules/log6x-loki-retention.adoc deleted file mode 100644 index 9dc4d4f831a7..000000000000 --- a/modules/log6x-loki-retention.adoc +++ /dev/null @@ -1,118 +0,0 @@ -// Module included in the following assemblies: -// * observability/logging/logging-6.2/log6x-loki-6.2.adoc - - -:_mod-docs-content-type: PROCEDURE -[id="logging-loki-retention_{context}"] -= Enabling stream-based retention with Loki - -You can configure retention policies based on log streams. Rules for these may be set globally, per-tenant, or both. If you configure both, tenant rules apply before global rules. - -include::snippets/logging-retention-period-snip.adoc[] - -[NOTE] -==== -Schema v13 is recommended. -==== - -.Procedure - -. Create a `LokiStack` CR: -+ -** Enable stream-based retention globally as shown in the following example: -+ -.Example global stream-based retention for AWS -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: - limits: - global: <1> - retention: <2> - days: 20 - streams: - - days: 4 - priority: 1 - selector: '{kubernetes_namespace_name=~"test.+"}' <3> - - days: 1 - priority: 1 - selector: '{log_type="infrastructure"}' - managementState: Managed - replicationFactor: 1 - size: 1x.small - storage: - schemas: - - effectiveDate: "2020-10-11" - version: v13 - secret: - name: logging-loki-s3 - type: aws - storageClassName: gp3-csi - tenants: - mode: openshift-logging ----- -<1> Sets retention policy for all log streams. *Note: This field does not impact the retention period for stored logs in object storage.* -<2> Retention is enabled in the cluster when this block is added to the CR. -<3> Contains the link:https://grafana.com/docs/loki/latest/logql/query_examples/#query-examples[LogQL query] used to define the log stream.spec: - limits: - -** Enable stream-based retention per-tenant basis as shown in the following example: -+ -.Example per-tenant stream-based retention for AWS -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: - limits: - global: - retention: - days: 20 - tenants: <1> - application: - retention: - days: 1 - streams: - - days: 4 - selector: '{kubernetes_namespace_name=~"test.+"}' <2> - infrastructure: - retention: - days: 5 - streams: - - days: 1 - selector: '{kubernetes_namespace_name=~"openshift-cluster.+"}' - managementState: Managed - replicationFactor: 1 - size: 1x.small - storage: - schemas: - - effectiveDate: "2020-10-11" - version: v13 - secret: - name: logging-loki-s3 - type: aws - storageClassName: gp3-csi - tenants: - mode: openshift-logging ----- -<1> Sets retention policy by tenant. Valid tenant types are `application`, `audit`, and `infrastructure`. -<2> Contains the link:https://grafana.com/docs/loki/latest/logql/query_examples/#query-examples[LogQL query] used to define the log stream. - -. Apply the `LokiStack` CR: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- -+ -[NOTE] -==== -This is not for managing the retention for stored logs. Global retention periods for stored logs to a supported maximum of 30 days is configured with your object storage. -==== diff --git a/modules/log6x-loki-sizing.adoc b/modules/log6x-loki-sizing.adoc deleted file mode 100644 index df8987e7fe91..000000000000 --- a/modules/log6x-loki-sizing.adoc +++ /dev/null @@ -1,94 +0,0 @@ -// Module is included in the following assemblies: -// * observability/logging/logging-6.1/log6x-loki-6.1.adoc -// * observability/logging/logging-6.2/log6x-loki-6.2.adoc - -:_mod-docs-content-type: CONCEPT -[id="log6x-loki-sizing_{context}"] -= Loki deployment sizing - -Sizing for Loki follows the format of `1x.` where the value `1x` is number of instances and `` specifies performance capabilities. - -The `1x.pico` configuration defines a single Loki deployment with minimal resource and limit requirements, offering high availability (HA) support for all Loki components. This configuration is suited for deployments that do not require a single replication factor or auto-compaction. - -Disk requests are similar across size configurations, allowing customers to test different sizes to determine the best fit for their deployment needs. - - -[IMPORTANT] -==== -It is not possible to change the number `1x` for the deployment size. -==== - -.Loki sizing -[cols="1h,5*",options="header"] -|=== -| -|1x.demo -|1x.pico [6.1+ only] -|1x.extra-small -|1x.small -|1x.medium - -|Data transfer -|Demo use only -|50GB/day -|100GB/day -|500GB/day -|2TB/day - -|Queries per second (QPS) -|Demo use only -|1-25 QPS at 200ms -|1-25 QPS at 200ms -|25-50 QPS at 200ms -|25-75 QPS at 200ms - -|Replication factor -|None -|2 -|2 -|2 -|2 - -|Total CPU requests -|None -|7 vCPUs -|14 vCPUs -|34 vCPUs -|54 vCPUs - -|Total CPU requests if using the ruler -|None -|8 vCPUs -|16 vCPUs -|42 vCPUs -|70 vCPUs - -|Total memory requests -|None -|17Gi -|31Gi -|67Gi -|139Gi - - -|Total memory requests if using the ruler -|None -|18Gi -|35Gi -|83Gi -|171Gi - -|Total disk requests -|40Gi -|590Gi -|430Gi -|430Gi -|590Gi - -|Total disk requests if using the ruler -|60Gi -|910Gi -|750Gi -|750Gi -|910Gi -|=== diff --git a/modules/log6x-loki-zone-aware-rep.adoc b/modules/log6x-loki-zone-aware-rep.adoc deleted file mode 100644 index e8058e182cfe..000000000000 --- a/modules/log6x-loki-zone-aware-rep.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-loki.adoc -// * observability/logging/logging-6.2/log6x-loki-6.2.adoc - -:_mod-docs-content-type: CONCEPT -[id="logging-loki-zone-aware-rep_{context}"] -= Zone aware data replication - -The {loki-op} offers support for zone-aware data replication through pod topology spread constraints. Enabling this feature enhances reliability and safeguards against log loss in the event of a single zone failure. When configuring the deployment size as `1x.extra-small`, `1x.small`, or `1x.medium`, the `replication.factor` field is automatically set to 2. - -To ensure proper replication, you need to have at least as many availability zones as the replication factor specifies. While it is possible to have more availability zones than the replication factor, having fewer zones can lead to write failures. Each zone should host an equal number of instances for optimal operation. - -.Example LokiStack CR with zone replication enabled -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: - replicationFactor: 2 # <1> - replication: - factor: 2 # <2> - zones: - - maxSkew: 1 # <3> - topologyKey: topology.kubernetes.io/zone # <4> ----- -<1> Deprecated field, values entered are overwritten by `replication.factor`. -<2> This value is automatically set when deployment size is selected at setup. -<3> The maximum difference in number of pods between any two topology domains. The default is 1, and you cannot specify a value of 0. -<4> Defines zones in the form of a topology key that corresponds to a node label. diff --git a/modules/log6x-loki-zone-fail-recovery.adoc b/modules/log6x-loki-zone-fail-recovery.adoc deleted file mode 100644 index 0a05fe7fbea8..000000000000 --- a/modules/log6x-loki-zone-fail-recovery.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-loki.adoc -// * observability/logging/logging-6.2/log6x-loki-6.2.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-loki-zone-fail-recovery_{context}"] -= Recovering Loki pods from failed zones - -In {product-title} a zone failure happens when specific availability zone resources become inaccessible. Availability zones are isolated areas within a cloud provider's data center, aimed at enhancing redundancy and fault tolerance. If your {product-title} cluster is not configured to handle this, a zone failure can lead to service or data loss. - -Loki pods are part of a link:https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/[StatefulSet], and they come with Persistent Volume Claims (PVCs) provisioned by a `StorageClass` object. Each Loki pod and its PVCs reside in the same zone. When a zone failure occurs in a cluster, the StatefulSet controller automatically attempts to recover the affected pods in the failed zone. - -[WARNING] -==== -The following procedure will delete the PVCs in the failed zone, and all data contained therein. To avoid complete data loss the replication factor field of the `LokiStack` CR should always be set to a value greater than 1 to ensure that Loki is replicating. -==== - -.Prerequisites -* Verify your `LokiStack` CR has a replication factor greater than 1. -* Zone failure detected by the control plane, and nodes in the failed zone are marked by cloud provider integration. - -The StatefulSet controller automatically attempts to reschedule pods in a failed zone. Because the associated PVCs are also in the failed zone, automatic rescheduling to a different zone does not work. You must manually delete the PVCs in the failed zone to allow successful re-creation of the stateful Loki Pod and its provisioned PVC in the new zone. - - -.Procedure -. List the pods in `Pending` status by running the following command: -+ -[source,terminal] ----- -$ oc get pods --field-selector status.phase==Pending -n openshift-logging ----- -+ -.Example `oc get pods` output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE # <1> -logging-loki-index-gateway-1 0/1 Pending 0 17m -logging-loki-ingester-1 0/1 Pending 0 16m -logging-loki-ruler-1 0/1 Pending 0 16m ----- -<1> These pods are in `Pending` status because their corresponding PVCs are in the failed zone. - -. List the PVCs in `Pending` status by running the following command: -+ -[source,terminal] ----- -$ oc get pvc -o=json -n openshift-logging | jq '.items[] | select(.status.phase == "Pending") | .metadata.name' -r ----- -+ -.Example `oc get pvc` output -[source,terminal] ----- -storage-logging-loki-index-gateway-1 -storage-logging-loki-ingester-1 -wal-logging-loki-ingester-1 -storage-logging-loki-ruler-1 -wal-logging-loki-ruler-1 ----- - -. Delete the PVC(s) for a pod by running the following command: -+ -[source,terminal] ----- -$ oc delete pvc -n openshift-logging ----- -+ -. Delete the pod(s) by running the following command: -+ -[source,terminal] ----- -$ oc delete pod -n openshift-logging ----- -+ -Once these objects have been successfully deleted, they should automatically be rescheduled in an available zone. - -[id="logging-loki-zone-fail-term-state_{context}"] -== Troubleshooting PVC in a terminating state - -The PVCs might hang in the terminating state without being deleted, if PVC metadata finalizers are set to `kubernetes.io/pv-protection`. Removing the finalizers should allow the PVCs to delete successfully. - -* Remove the finalizer for each PVC by running the command below, then retry deletion. -+ -[source,terminal] ----- -$ oc patch pvc -p '{"metadata":{"finalizers":null}}' -n openshift-logging ----- diff --git a/modules/log6x-multiline-except.adoc b/modules/log6x-multiline-except.adoc deleted file mode 100644 index 634d6e7e66ab..000000000000 --- a/modules/log6x-multiline-except.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.0/log6x-clf.adoc - -:_mod-docs-content-type: PROCEDURE -[id="log6x-multiline-except_{context}"] -= Enabling multi-line exception detection - -Enables multi-line error detection of container logs. - -[WARNING] -==== -Enabling this feature could have performance implications and may require additional computing resources or alternate logging solutions. -==== - -Log parsers often incorrectly identify separate lines of the same exception as separate exceptions. This leads to extra log entries and an incomplete or inaccurate view of the traced information. - -.Example java exception -[source,java] ----- -java.lang.NullPointerException: Cannot invoke "String.toString()" because "" is null - at testjava.Main.handle(Main.java:47) - at testjava.Main.printMe(Main.java:19) - at testjava.Main.main(Main.java:10) ----- - -* To enable logging to detect multi-line exceptions and reassemble them into a single log entry, ensure that the `ClusterLogForwarder` Custom Resource (CR) contains a `detectMultilineErrors` field under the `.spec.filters`. - -.Example ClusterLogForwarder CR -[source,yaml] ----- -apiVersion: "observability.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: - namespace: -spec: - serviceAccount: - name: - filters: - - name: - type: detectMultilineException - pipelines: - - inputRefs: - - - name: - filterRefs: - - - outputRefs: - - ----- - -== Details -When log messages appear as a consecutive sequence forming an exception stack trace, they are combined into a single, unified log record. The first log message's content is replaced with the concatenated content of all the message fields in the sequence. - -The collector supports the following languages: - -* Java -* JS -* Ruby -* Python -* Golang -* PHP -* Dart diff --git a/modules/log6x-oc-explain.adoc b/modules/log6x-oc-explain.adoc deleted file mode 100644 index 695605a620fa..000000000000 --- a/modules/log6x-oc-explain.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -:_mod-docs-content-type: CONCEPT -[id="log6x-oc-explain_{context}"] - -= Using the `oc explain` command - -The `oc explain` command is an essential tool in the OpenShift CLI `oc` that provides detailed descriptions of the fields within Custom Resources (CRs). This command is invaluable for administrators and developers who are configuring or troubleshooting resources in an OpenShift cluster. - -== Resource Descriptions -`oc explain` offers in-depth explanations of all fields associated with a specific object. This includes standard resources like pods and services, as well as more complex entities like statefulsets and custom resources defined by Operators. - -To view the documentation for the `outputs` field of the `ClusterLogForwarder` custom resource, you can use: - -[source,terminal] ----- -$ oc explain clusterlogforwarders.observability.openshift.io.spec.outputs ----- - -[NOTE] -==== -In place of `clusterlogforwarder` the short form `obsclf` can be used. -==== - -This will display detailed information about these fields, including their types, default values, and any associated sub-fields. - -== Hierarchical Structure -The command displays the structure of resource fields in a hierarchical format, clarifying the relationships between different configuration options. - -For instance, here's how you can drill down into the `storage` configuration for a `LokiStack` custom resource: - -[source,terminal] ----- -$ oc explain lokistacks.loki.grafana.com ----- - -[source,terminal] ----- -$ oc explain lokistacks.loki.grafana.com.spec ----- - -[source,terminal] ----- -$ oc explain lokistacks.loki.grafana.com.spec.storage ----- - -[source,terminal] ----- -$ oc explain lokistacks.loki.grafana.com.spec.storage.schemas ----- - -Each command reveals a deeper level of the resource specification, making the structure clear. - -== Type Information -`oc explain` also indicates the type of each field (such as string, integer, or boolean), allowing you to verify that resource definitions use the correct data types. - -For example: - -[source,terminal] ----- -$ oc explain lokistacks.loki.grafana.com.spec.size ----- - -This will show that `size` should be defined using an integer value. - -== Default Values -When applicable, the command shows the default values for fields, providing insights into what values will be used if none are explicitly specified. - -Again using `lokistacks.loki.grafana.com` as an example: - -[source,terminal] ----- -$ oc explain lokistacks.spec.template.distributor.replicas ----- - -.Example output -[source,terminal] ----- -GROUP: loki.grafana.com -KIND: LokiStack -VERSION: v1 - -FIELD: replicas - -DESCRIPTION: - Replicas defines the number of replica pods of the component. ----- diff --git a/modules/log6x-quickstart-opentelemetry.adoc b/modules/log6x-quickstart-opentelemetry.adoc deleted file mode 100644 index a5944c2b8b05..000000000000 --- a/modules/log6x-quickstart-opentelemetry.adoc +++ /dev/null @@ -1,157 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.0/log6x-about.adoc - -:_mod-docs-content-type: PROCEDURE -[id="quick-start-opentelemetry_{context}"] -= Quick start with OpenTelemetry - -:FeatureName: The OpenTelemetry Protocol (OTLP) output log forwarder -include::snippets/technology-preview.adoc[] - -To configure OTLP ingestion and enable the OpenTelemetry data model, follow these steps: - -.Prerequisites -* You have access to an {product-title} cluster with `cluster-admin` permissions. -* You have installed the {oc-first}. -* You have access to a supported object store. For example, AWS S3, {gcp-full} Storage, {azure-short}, Swift, Minio, or {rh-storage}. - -.Procedure - -. Install the `{clo}`, `{loki-op}`, and `{coo-first}` from the software catalog. - -. Create a `LokiStack` custom resource (CR) in the `openshift-logging` namespace: -+ -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: - managementState: Managed - size: 1x.extra-small - storage: - schemas: - - effectiveDate: '2024-10-01' - version: v13 - secret: - name: logging-loki-s3 - type: s3 - storageClassName: gp3-csi - tenants: - mode: openshift-logging ----- -+ -[NOTE] -==== -Ensure that the `logging-loki-s3` secret is created beforehand. The contents of this secret vary depending on the object storage in use. For more information, see "Secrets and TLS Configuration". -==== - -. Create a service account for the collector: -+ -[source,terminal] ----- -$ oc create sa collector -n openshift-logging ----- - -. Allow the collector's service account to write data to the `LokiStack` CR: -+ -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user logging-collector-logs-writer -z collector -n openshift-logging ----- -+ -[NOTE] -==== -The `ClusterRole` resource is created automatically during the Cluster Logging Operator installation and does not need to be created manually. -==== - -. To collect logs, use the service account of the collector by running the following commands: -+ -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user collect-application-logs -z collector -n openshift-logging ----- -+ -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user collect-audit-logs -z collector -n openshift-logging ----- -+ -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user collect-infrastructure-logs -z collector -n openshift-logging ----- -+ -[NOTE] -==== -The example binds the collector to all three roles (application, infrastructure, and audit). By default, only application and infrastructure logs are collected. To collect audit logs, update your `ClusterLogForwarder` configuration to include them. Assign roles based on the specific log types required for your environment. -==== - -. Create a `UIPlugin` CR to enable the *Log* section in the *Observe* tab: -+ -[source,yaml] ----- -apiVersion: observability.openshift.io/v1alpha1 -kind: UIPlugin -metadata: - name: logging -spec: - type: Logging - logging: - lokiStack: - name: logging-loki ----- - -. Create a `ClusterLogForwarder` CR to configure log forwarding: -+ -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: collector - namespace: openshift-logging - annotations: - observability.openshift.io/tech-preview-otlp-output: "enabled" # <1> -spec: - serviceAccount: - name: collector - outputs: - - name: loki-otlp - type: lokiStack # <2> - lokiStack: - target: - name: logging-loki - namespace: openshift-logging - dataModel: Otel # <3> - authentication: - token: - from: serviceAccount - tls: - ca: - key: service-ca.crt - configMapName: openshift-service-ca.crt - pipelines: - - name: my-pipeline - inputRefs: - - application - - infrastructure - outputRefs: - - loki-otlp ----- -<1> Use the annotation to enable the `Otel` data model, which is a Technology Preview feature. -<2> Define the output type as `lokiStack`. -<3> Specifies the OpenTelemetry data model. -+ -[NOTE] -==== -You cannot use `lokiStack.labelKeys` when `dataModel` is `Otel`. To achieve similar functionality when `dataModel` is `Otel`, refer to "Configuring LokiStack for OTLP data ingestion". -==== - -.Verification -* To verify that OTLP is functioning correctly, complete the following steps: -.. In the OpenShift web console, click *Observe* -> *OpenShift Logging* -> *LokiStack* -> *Writes*. -.. Check the *Distributor - Structured Metadata* section. diff --git a/modules/log6x-quickstart-viaq.adoc b/modules/log6x-quickstart-viaq.adoc deleted file mode 100644 index 8331819f595b..000000000000 --- a/modules/log6x-quickstart-viaq.adoc +++ /dev/null @@ -1,146 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.0/log6x-about.adoc - -:_mod-docs-content-type: PROCEDURE -[id="quick-start-viaq_{context}"] -= Quick start with ViaQ - -To use the default ViaQ data model, follow these steps: - -.Prerequisites -* You have access to an {product-title} cluster with `cluster-admin` permissions. -* You installed the {oc-first}. -* You have access to a supported object store. For example, AWS S3, {gcp-full} Storage, {azure-short}, Swift, Minio, or {rh-storage}. - -.Procedure - -. Install the `{clo}`, `{loki-op}`, and `{coo-first}` from the software catalog. - -. Create a `LokiStack` custom resource (CR) in the `openshift-logging` namespace: -+ -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: - managementState: Managed - size: 1x.extra-small - storage: - schemas: - - effectiveDate: '2024-10-01' - version: v13 - secret: - name: logging-loki-s3 - type: s3 - storageClassName: gp3-csi - tenants: - mode: openshift-logging ----- -+ -[NOTE] -==== -Ensure that the `logging-loki-s3` secret is created beforehand. The contents of this secret vary depending on the object storage in use. For more information, see Secrets and TLS Configuration. -==== - -. Create a service account for the collector: -+ -[source,terminal] ----- -$ oc create sa collector -n openshift-logging ----- - -. Allow the collector's service account to write data to the `LokiStack` CR: -+ -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user logging-collector-logs-writer -z collector -n openshift-logging ----- -+ -[NOTE] -==== -The `ClusterRole` resource is created automatically during the Cluster Logging Operator installation and does not need to be created manually. -==== - -. To collect logs, use the service account of the collector by running the following commands: -+ -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user collect-application-logs -z collector -n openshift-logging ----- -+ -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user collect-audit-logs -z collector -n openshift-logging ----- -+ -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user collect-infrastructure-logs -z collector -n openshift-logging ----- -+ -[NOTE] -==== -The example binds the collector to all three roles (application, infrastructure, and audit), but by default, only application and infrastructure logs are collected. To collect audit logs, update your `ClusterLogForwarder` configuration to include them. Assign roles based on the specific log types required for your environment. -==== - -. Create a `UIPlugin` CR to enable the *Log* section in the *Observe* tab: -+ -[source,yaml] ----- -apiVersion: observability.openshift.io/v1alpha1 -kind: UIPlugin -metadata: - name: logging -spec: - type: Logging - logging: - lokiStack: - name: logging-loki ----- - -. Create a `ClusterLogForwarder` CR to configure log forwarding: -+ -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: collector - namespace: openshift-logging -spec: - serviceAccount: - name: collector - outputs: - - name: default-lokistack - type: lokiStack - lokiStack: - authentication: - token: - from: serviceAccount - target: - name: logging-loki - namespace: openshift-logging - tls: - ca: - key: service-ca.crt - configMapName: openshift-service-ca.crt - pipelines: - - name: default-logstore - inputRefs: - - application - - infrastructure - outputRefs: - - default-lokistack ----- -+ -[NOTE] -==== -The `dataModel` field is optional and left unset (`dataModel: ""`) by default. This allows the Cluster Logging Operator (CLO) to automatically select a data model. Currently, the CLO defaults to the ViaQ model when the field is unset, but this will change in future releases. Specifying `dataModel: ViaQ` ensures the configuration remains compatible if the default changes. -==== - -.Verification -* Verify that logs are visible in the *Log* section of the *Observe* tab in the {product-title} web console. diff --git a/modules/logging-5.6-api-ref.adoc b/modules/logging-5.6-api-ref.adoc deleted file mode 100644 index 110587f80f10..000000000000 --- a/modules/logging-5.6-api-ref.adoc +++ /dev/null @@ -1,1360 +0,0 @@ -// Module included in the following assemblies: -// -// Note: This content is automatically generated from source, do not edit. -:_mod-docs-content-type: REFERENCE -[id="logging-5-6-api-ref"] -= Logging 5.6 API reference -:toc: -:toclevels: 4 - -== ClusterLogForwarder -ClusterLogForwarder is an API to configure forwarding logs. - -You configure forwarding by specifying a list of `pipelines`, -which forward from a set of named inputs to a set of named outputs. - -There are built-in input names for common log categories, and you can -define custom inputs to do additional filtering. - -There is a built-in output name for the default openshift log store, but -you can define your own outputs with a URL and other connection information -to forward logs to other stores or processors, inside or outside the cluster. - -For more details see the documentation on the API fields. - -[options="header"] -|====================== -|Property|Type|Description - -|spec|object| Specification of the desired behavior of ClusterLogForwarder -|status|object| Status of the ClusterLogForwarder -|====================== - -=== .spec -==== Description -ClusterLogForwarderSpec defines how logs should be forwarded to remote targets. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|inputs|array| *(optional)* Inputs are named filters for log messages to be forwarded. -|outputDefaults|object| *(optional)* DEPRECATED OutputDefaults specify forwarder config explicitly for the default store. -|outputs|array| *(optional)* Outputs are named destinations for log messages. -|pipelines|array| Pipelines forward the messages selected by a set of inputs to a set of outputs. -|====================== - -=== .spec.inputs[] -==== Description -InputSpec defines a selector of log messages. - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|application|object| *(optional)* Application, if present, enables named set of `application` logs that -|name|string| Name used to refer to the input of a `pipeline`. -|====================== - -=== .spec.inputs[].application -==== Description -Application log selector. -All conditions in the selector must be satisfied (logical AND) to select logs. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|namespaces|array| *(optional)* Namespaces from which to collect application logs. -|selector|object| *(optional)* Selector for logs from pods with matching labels. -|====================== - -=== .spec.inputs[].application.namespaces[] -==== Description - -===== Type -* array - -=== .spec.inputs[].application.selector -==== Description -A label selector is a label query over a set of resources. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|matchLabels|object| *(optional)* matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels -|====================== - -=== .spec.inputs[].application.selector.matchLabels -==== Description - -===== Type -* object - -=== .spec.outputDefaults -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|elasticsearch|object| *(optional)* Elasticsearch OutputSpec default values -|====================== - -=== .spec.outputDefaults.elasticsearch -==== Description -ElasticsearchStructuredSpec is spec related to structured log changes to determine the elasticsearch index - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|enableStructuredContainerLogs|bool| *(optional)* EnableStructuredContainerLogs enables multi-container structured logs to allow -|structuredTypeKey|string| *(optional)* StructuredTypeKey specifies the metadata key to be used as name of elasticsearch index -|structuredTypeName|string| *(optional)* StructuredTypeName specifies the name of elasticsearch schema -|====================== - -=== .spec.outputs[] -==== Description -Output defines a destination for log messages. - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|syslog|object| *(optional)* -|fluentdForward|object| *(optional)* -|elasticsearch|object| *(optional)* -|kafka|object| *(optional)* -|cloudwatch|object| *(optional)* -|loki|object| *(optional)* -|googleCloudLogging|object| *(optional)* -|splunk|object| *(optional)* -|name|string| Name used to refer to the output from a `pipeline`. -|secret|object| *(optional)* Secret for authentication. -|tls|object| TLS contains settings for controlling options on TLS client connections. -|type|string| Type of output plugin. -|url|string| *(optional)* URL to send log records to. -|====================== - -=== .spec.outputs[].secret -==== Description -OutputSecretSpec is a secret reference containing name only, no namespace. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|name|string| Name of a secret in the namespace configured for log forwarder secrets. -|====================== - -=== .spec.outputs[].tls -==== Description -OutputTLSSpec contains options for TLS connections that are agnostic to the output type. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|insecureSkipVerify|bool| If InsecureSkipVerify is true, then the TLS client will be configured to ignore errors with certificates. -|====================== - -=== .spec.pipelines[] -==== Description -PipelinesSpec link a set of inputs to a set of outputs. - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|detectMultilineErrors|bool| *(optional)* DetectMultilineErrors enables multiline error detection of container logs -|inputRefs|array| InputRefs lists the names (`input.name`) of inputs to this pipeline. -|labels|object| *(optional)* Labels applied to log records passing through this pipeline. -|name|string| *(optional)* Name is optional, but must be unique in the `pipelines` list if provided. -|outputRefs|array| OutputRefs lists the names (`output.name`) of outputs from this pipeline. -|parse|string| *(optional)* Parse enables parsing of log entries into structured logs -|====================== - -=== .spec.pipelines[].inputRefs[] -==== Description - -===== Type -* array - -=== .spec.pipelines[].labels -==== Description - -===== Type -* object - -=== .spec.pipelines[].outputRefs[] -==== Description - -===== Type -* array - -=== .status -==== Description -ClusterLogForwarderStatus defines the observed state of ClusterLogForwarder - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|conditions|object| Conditions of the log forwarder. -|inputs|Conditions| Inputs maps input name to condition of the input. -|outputs|Conditions| Outputs maps output name to condition of the output. -|pipelines|Conditions| Pipelines maps pipeline name to condition of the pipeline. -|====================== - -=== .status.conditions -==== Description - -===== Type -* object - -=== .status.inputs -==== Description - -===== Type -* Conditions - -=== .status.outputs -==== Description - -===== Type -* Conditions - -=== .status.pipelines -==== Description - -===== Type -* Conditions== ClusterLogging -A Red Hat OpenShift Logging instance. ClusterLogging is the Schema for the clusterloggings API - -[options="header"] -|====================== -|Property|Type|Description - -|spec|object| Specification of the desired behavior of ClusterLogging -|status|object| Status defines the observed state of ClusterLogging -|====================== - -=== .spec -==== Description -ClusterLoggingSpec defines the desired state of ClusterLogging - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|collection|object| Specification of the Collection component for the cluster -|curation|object| **(DEPRECATED)** *(optional)* Deprecated. Specification of the Curation component for the cluster -|forwarder|object| **(DEPRECATED)** *(optional)* Deprecated. Specification for Forwarder component for the cluster -|logStore|object| *(optional)* Specification of the Log Storage component for the cluster -|managementState|string| *(optional)* Indicator if the resource is 'Managed' or 'Unmanaged' by the operator -|visualization|object| *(optional)* Specification of the Visualization component for the cluster -|====================== - -=== .spec.collection -==== Description -This is the struct that will contain information pertinent to Log and event collection - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|resources|object| *(optional)* The resource requirements for the collector -|nodeSelector|object| *(optional)* Define which Nodes the Pods are scheduled on. -|tolerations|array| *(optional)* Define the tolerations the Pods will accept -|fluentd|object| *(optional)* Fluentd represents the configuration for forwarders of type fluentd. -|logs|object| **(DEPRECATED)** *(optional)* Deprecated. Specification of Log Collection for the cluster -|type|string| *(optional)* The type of Log Collection to configure -|====================== - -=== .spec.collection.fluentd -==== Description -FluentdForwarderSpec represents the configuration for forwarders of type fluentd. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|buffer|object| -|inFile|object| -|====================== - -=== .spec.collection.fluentd.buffer -==== Description -FluentdBufferSpec represents a subset of fluentd buffer parameters to tune -the buffer configuration for all fluentd outputs. It supports a subset of -parameters to configure buffer and queue sizing, flush operations and retry -flushing. - -For general parameters refer to: -https://docs.fluentd.org/configuration/buffer-section#buffering-parameters - -For flush parameters refer to: -https://docs.fluentd.org/configuration/buffer-section#flushing-parameters - -For retry parameters refer to: -https://docs.fluentd.org/configuration/buffer-section#retries-parameters - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|chunkLimitSize|string| *(optional)* ChunkLimitSize represents the maximum size of each chunk. Events will be -|flushInterval|string| *(optional)* FlushInterval represents the time duration to wait between two consecutive flush -|flushMode|string| *(optional)* FlushMode represents the mode of the flushing thread to write chunks. The mode -|flushThreadCount|int| *(optional)* FlushThreadCount reprents the number of threads used by the fluentd buffer -|overflowAction|string| *(optional)* OverflowAction represents the action for the fluentd buffer plugin to -|retryMaxInterval|string| *(optional)* RetryMaxInterval represents the maximum time interval for exponential backoff -|retryTimeout|string| *(optional)* RetryTimeout represents the maximum time interval to attempt retries before giving up -|retryType|string| *(optional)* RetryType represents the type of retrying flush operations. Flush operations can -|retryWait|string| *(optional)* RetryWait represents the time duration between two consecutive retries to flush -|totalLimitSize|string| *(optional)* TotalLimitSize represents the threshold of node space allowed per fluentd -|====================== - -=== .spec.collection.fluentd.inFile -==== Description -FluentdInFileSpec represents a subset of fluentd in-tail plugin parameters -to tune the configuration for all fluentd in-tail inputs. - -For general parameters refer to: -https://docs.fluentd.org/input/tail#parameters - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|readLinesLimit|int| *(optional)* ReadLinesLimit represents the number of lines to read with each I/O operation -|====================== - -=== .spec.collection.logs -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|fluentd|object| Specification of the Fluentd Log Collection component -|type|string| The type of Log Collection to configure -|====================== - -=== .spec.collection.logs.fluentd -==== Description -CollectorSpec is spec to define scheduling and resources for a collector - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|nodeSelector|object| *(optional)* Define which Nodes the Pods are scheduled on. -|resources|object| *(optional)* The resource requirements for the collector -|tolerations|array| *(optional)* Define the tolerations the Pods will accept -|====================== - -=== .spec.collection.logs.fluentd.nodeSelector -==== Description - -===== Type -* object - -=== .spec.collection.logs.fluentd.resources -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. -|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. -|====================== - -=== .spec.collection.logs.fluentd.resources.limits -==== Description - -===== Type -* object - -=== .spec.collection.logs.fluentd.resources.requests -==== Description - -===== Type -* object - -=== .spec.collection.logs.fluentd.tolerations[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. -|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. -|operator|string| *(optional)* Operator represents a key's relationship to the value. -|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be -|value|string| *(optional)* Value is the taint value the toleration matches to. -|====================== - -=== .spec.collection.logs.fluentd.tolerations[].tolerationSeconds -==== Description - -===== Type -* int - -=== .spec.curation -==== Description -This is the struct that will contain information pertinent to Log curation (Curator) - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|curator|object| The specification of curation to configure -|type|string| The kind of curation to configure -|====================== - -=== .spec.curation.curator -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|nodeSelector|object| Define which Nodes the Pods are scheduled on. -|resources|object| *(optional)* The resource requirements for Curator -|schedule|string| The cron schedule that the Curator job is run. Defaults to "30 3 * * *" -|tolerations|array| -|====================== - -=== .spec.curation.curator.nodeSelector -==== Description - -===== Type -* object - -=== .spec.curation.curator.resources -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. -|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. -|====================== - -=== .spec.curation.curator.resources.limits -==== Description - -===== Type -* object - -=== .spec.curation.curator.resources.requests -==== Description - -===== Type -* object - -=== .spec.curation.curator.tolerations[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. -|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. -|operator|string| *(optional)* Operator represents a key's relationship to the value. -|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be -|value|string| *(optional)* Value is the taint value the toleration matches to. -|====================== - -=== .spec.curation.curator.tolerations[].tolerationSeconds -==== Description - -===== Type -* int - -=== .spec.forwarder -==== Description -ForwarderSpec contains global tuning parameters for specific forwarder implementations. -This field is not required for general use, it allows performance tuning by users -familiar with the underlying forwarder technology. -Currently supported: `fluentd`. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|fluentd|object| -|====================== - -=== .spec.forwarder.fluentd -==== Description -FluentdForwarderSpec represents the configuration for forwarders of type fluentd. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|buffer|object| -|inFile|object| -|====================== - -=== .spec.forwarder.fluentd.buffer -==== Description -FluentdBufferSpec represents a subset of fluentd buffer parameters to tune -the buffer configuration for all fluentd outputs. It supports a subset of -parameters to configure buffer and queue sizing, flush operations and retry -flushing. - -For general parameters refer to: -https://docs.fluentd.org/configuration/buffer-section#buffering-parameters - -For flush parameters refer to: -https://docs.fluentd.org/configuration/buffer-section#flushing-parameters - -For retry parameters refer to: -https://docs.fluentd.org/configuration/buffer-section#retries-parameters - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|chunkLimitSize|string| *(optional)* ChunkLimitSize represents the maximum size of each chunk. Events will be -|flushInterval|string| *(optional)* FlushInterval represents the time duration to wait between two consecutive flush -|flushMode|string| *(optional)* FlushMode represents the mode of the flushing thread to write chunks. The mode -|flushThreadCount|int| *(optional)* FlushThreadCount reprents the number of threads used by the fluentd buffer -|overflowAction|string| *(optional)* OverflowAction represents the action for the fluentd buffer plugin to -|retryMaxInterval|string| *(optional)* RetryMaxInterval represents the maximum time interval for exponential backoff -|retryTimeout|string| *(optional)* RetryTimeout represents the maximum time interval to attempt retries before giving up -|retryType|string| *(optional)* RetryType represents the type of retrying flush operations. Flush operations can -|retryWait|string| *(optional)* RetryWait represents the time duration between two consecutive retries to flush -|totalLimitSize|string| *(optional)* TotalLimitSize represents the threshold of node space allowed per fluentd -|====================== - -=== .spec.forwarder.fluentd.inFile -==== Description -FluentdInFileSpec represents a subset of fluentd in-tail plugin parameters -to tune the configuration for all fluentd in-tail inputs. - -For general parameters refer to: -https://docs.fluentd.org/input/tail#parameters - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|readLinesLimit|int| *(optional)* ReadLinesLimit represents the number of lines to read with each I/O operation -|====================== - -=== .spec.logStore -==== Description -The LogStoreSpec contains information about how logs are stored. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|elasticsearch|object| Specification of the Elasticsearch Log Store component -|lokistack|object| LokiStack contains information about which LokiStack to use for log storage if Type is set to LogStoreTypeLokiStack. -|retentionPolicy|object| *(optional)* Retention policy defines the maximum age for an index after which it should be deleted -|type|string| The Type of Log Storage to configure. The operator currently supports either using ElasticSearch -|====================== - -=== .spec.logStore.elasticsearch -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|nodeCount|int| Number of nodes to deploy for Elasticsearch -|nodeSelector|object| Define which Nodes the Pods are scheduled on. -|proxy|object| Specification of the Elasticsearch Proxy component -|redundancyPolicy|string| *(optional)* -|resources|object| *(optional)* The resource requirements for Elasticsearch -|storage|object| *(optional)* The storage specification for Elasticsearch data nodes -|tolerations|array| -|====================== - -=== .spec.logStore.elasticsearch.nodeSelector -==== Description - -===== Type -* object - -=== .spec.logStore.elasticsearch.proxy -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|resources|object| -|====================== - -=== .spec.logStore.elasticsearch.proxy.resources -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. -|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. -|====================== - -=== .spec.logStore.elasticsearch.proxy.resources.limits -==== Description - -===== Type -* object - -=== .spec.logStore.elasticsearch.proxy.resources.requests -==== Description - -===== Type -* object - -=== .spec.logStore.elasticsearch.resources -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. -|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. -|====================== - -=== .spec.logStore.elasticsearch.resources.limits -==== Description - -===== Type -* object - -=== .spec.logStore.elasticsearch.resources.requests -==== Description - -===== Type -* object - -=== .spec.logStore.elasticsearch.storage -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|size|object| The max storage capacity for the node to provision. -|storageClassName|string| *(optional)* The name of the storage class to use with creating the node's PVC. -|====================== - -=== .spec.logStore.elasticsearch.storage.size -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|Format|string| Change Format at will. See the comment for Canonicalize for -|d|object| d is the quantity in inf.Dec form if d.Dec != nil -|i|int| i is the quantity in int64 scaled form, if d.Dec == nil -|s|string| s is the generated value of this quantity to avoid recalculation -|====================== - -=== .spec.logStore.elasticsearch.storage.size.d -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|Dec|object| -|====================== - -=== .spec.logStore.elasticsearch.storage.size.d.Dec -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|scale|int| -|unscaled|object| -|====================== - -=== .spec.logStore.elasticsearch.storage.size.d.Dec.unscaled -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|abs|Word| sign -|neg|bool| -|====================== - -=== .spec.logStore.elasticsearch.storage.size.d.Dec.unscaled.abs -==== Description - -===== Type -* Word - -=== .spec.logStore.elasticsearch.storage.size.i -==== Description - -===== Type -* int - -[options="header"] -|====================== -|Property|Type|Description - -|scale|int| -|value|int| -|====================== - -=== .spec.logStore.elasticsearch.tolerations[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. -|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. -|operator|string| *(optional)* Operator represents a key's relationship to the value. -|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be -|value|string| *(optional)* Value is the taint value the toleration matches to. -|====================== - -=== .spec.logStore.elasticsearch.tolerations[].tolerationSeconds -==== Description - -===== Type -* int - -=== .spec.logStore.lokistack -==== Description -LokiStackStoreSpec is used to set up cluster-logging to use a LokiStack as logging storage. -It points to an existing LokiStack in the same namespace. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|name|string| Name of the LokiStack resource. -|====================== - -=== .spec.logStore.retentionPolicy -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|application|object| -|audit|object| -|infra|object| -|====================== - -=== .spec.logStore.retentionPolicy.application -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|diskThresholdPercent|int| *(optional)* The threshold percentage of ES disk usage that when reached, old indices should be deleted (e.g. 75) -|maxAge|string| *(optional)* -|namespaceSpec|array| *(optional)* The per namespace specification to delete documents older than a given minimum age -|pruneNamespacesInterval|string| *(optional)* How often to run a new prune-namespaces job -|====================== - -=== .spec.logStore.retentionPolicy.application.namespaceSpec[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|minAge|string| *(optional)* Delete the records matching the namespaces which are older than this MinAge (e.g. 1d) -|namespace|string| Target Namespace to delete logs older than MinAge (defaults to 7d) -|====================== - -=== .spec.logStore.retentionPolicy.audit -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|diskThresholdPercent|int| *(optional)* The threshold percentage of ES disk usage that when reached, old indices should be deleted (e.g. 75) -|maxAge|string| *(optional)* -|namespaceSpec|array| *(optional)* The per namespace specification to delete documents older than a given minimum age -|pruneNamespacesInterval|string| *(optional)* How often to run a new prune-namespaces job -|====================== - -=== .spec.logStore.retentionPolicy.audit.namespaceSpec[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|minAge|string| *(optional)* Delete the records matching the namespaces which are older than this MinAge (e.g. 1d) -|namespace|string| Target Namespace to delete logs older than MinAge (defaults to 7d) -|====================== - -=== .spec.logStore.retentionPolicy.infra -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|diskThresholdPercent|int| *(optional)* The threshold percentage of ES disk usage that when reached, old indices should be deleted (e.g. 75) -|maxAge|string| *(optional)* -|namespaceSpec|array| *(optional)* The per namespace specification to delete documents older than a given minimum age -|pruneNamespacesInterval|string| *(optional)* How often to run a new prune-namespaces job -|====================== - -=== .spec.logStore.retentionPolicy.infra.namespaceSpec[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|minAge|string| *(optional)* Delete the records matching the namespaces which are older than this MinAge (e.g. 1d) -|namespace|string| Target Namespace to delete logs older than MinAge (defaults to 7d) -|====================== - -=== .spec.visualization -==== Description -This is the struct that will contain information pertinent to Log visualization (Kibana) - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|kibana|object| Specification of the Kibana Visualization component -|type|string| The type of Visualization to configure -|====================== - -=== .spec.visualization.kibana -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|nodeSelector|object| Define which Nodes the Pods are scheduled on. -|proxy|object| Specification of the Kibana Proxy component -|replicas|int| Number of instances to deploy for a Kibana deployment -|resources|object| *(optional)* The resource requirements for Kibana -|tolerations|array| -|====================== - -=== .spec.visualization.kibana.nodeSelector -==== Description - -===== Type -* object - -=== .spec.visualization.kibana.proxy -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|resources|object| -|====================== - -=== .spec.visualization.kibana.proxy.resources -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. -|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. -|====================== - -=== .spec.visualization.kibana.proxy.resources.limits -==== Description - -===== Type -* object - -=== .spec.visualization.kibana.proxy.resources.requests -==== Description - -===== Type -* object - -=== .spec.visualization.kibana.replicas -==== Description - -===== Type -* int - -=== .spec.visualization.kibana.resources -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. -|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. -|====================== - -=== .spec.visualization.kibana.resources.limits -==== Description - -===== Type -* object - -=== .spec.visualization.kibana.resources.requests -==== Description - -===== Type -* object - -=== .spec.visualization.kibana.tolerations[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. -|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. -|operator|string| *(optional)* Operator represents a key's relationship to the value. -|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be -|value|string| *(optional)* Value is the taint value the toleration matches to. -|====================== - -=== .spec.visualization.kibana.tolerations[].tolerationSeconds -==== Description - -===== Type -* int - -=== .status -==== Description -ClusterLoggingStatus defines the observed state of ClusterLogging - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|collection|object| *(optional)* -|conditions|object| *(optional)* -|curation|object| *(optional)* -|logStore|object| *(optional)* -|visualization|object| *(optional)* -|====================== - -=== .status.collection -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|logs|object| *(optional)* -|====================== - -=== .status.collection.logs -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|fluentdStatus|object| *(optional)* -|====================== - -=== .status.collection.logs.fluentdStatus -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|clusterCondition|object| *(optional)* -|daemonSet|string| *(optional)* -|nodes|object| *(optional)* -|pods|string| *(optional)* -|====================== - -=== .status.collection.logs.fluentdStatus.clusterCondition -==== Description -`operator-sdk generate crds` does not allow map-of-slice, must use a named type. - -===== Type -* object - -=== .status.collection.logs.fluentdStatus.nodes -==== Description - -===== Type -* object - -=== .status.conditions -==== Description - -===== Type -* object - -=== .status.curation -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|curatorStatus|array| *(optional)* -|====================== - -=== .status.curation.curatorStatus[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|clusterCondition|object| *(optional)* -|cronJobs|string| *(optional)* -|schedules|string| *(optional)* -|suspended|bool| *(optional)* -|====================== - -=== .status.curation.curatorStatus[].clusterCondition -==== Description -`operator-sdk generate crds` does not allow map-of-slice, must use a named type. - -===== Type -* object - -=== .status.logStore -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|elasticsearchStatus|array| *(optional)* -|====================== - -=== .status.logStore.elasticsearchStatus[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|cluster|object| *(optional)* -|clusterConditions|object| *(optional)* -|clusterHealth|string| *(optional)* -|clusterName|string| *(optional)* -|deployments|array| *(optional)* -|nodeConditions|object| *(optional)* -|nodeCount|int| *(optional)* -|pods|object| *(optional)* -|replicaSets|array| *(optional)* -|shardAllocationEnabled|string| *(optional)* -|statefulSets|array| *(optional)* -|====================== - -=== .status.logStore.elasticsearchStatus[].cluster -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|activePrimaryShards|int| The number of Active Primary Shards for the Elasticsearch Cluster -|activeShards|int| The number of Active Shards for the Elasticsearch Cluster -|initializingShards|int| The number of Initializing Shards for the Elasticsearch Cluster -|numDataNodes|int| The number of Data Nodes for the Elasticsearch Cluster -|numNodes|int| The number of Nodes for the Elasticsearch Cluster -|pendingTasks|int| -|relocatingShards|int| The number of Relocating Shards for the Elasticsearch Cluster -|status|string| The current Status of the Elasticsearch Cluster -|unassignedShards|int| The number of Unassigned Shards for the Elasticsearch Cluster -|====================== - -=== .status.logStore.elasticsearchStatus[].clusterConditions -==== Description - -===== Type -* object - -=== .status.logStore.elasticsearchStatus[].deployments[] -==== Description - -===== Type -* array - -=== .status.logStore.elasticsearchStatus[].nodeConditions -==== Description - -===== Type -* object - -=== .status.logStore.elasticsearchStatus[].pods -==== Description - -===== Type -* object - -=== .status.logStore.elasticsearchStatus[].replicaSets[] -==== Description - -===== Type -* array - -=== .status.logStore.elasticsearchStatus[].statefulSets[] -==== Description - -===== Type -* array - -=== .status.visualization -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|kibanaStatus|array| *(optional)* -|====================== - -=== .status.visualization.kibanaStatus[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|clusterCondition|object| *(optional)* -|deployment|string| *(optional)* -|pods|string| *(optional)* The status for each of the Kibana pods for the Visualization component -|replicaSets|array| *(optional)* -|replicas|int| *(optional)* -|====================== - -=== .status.visualization.kibanaStatus[].clusterCondition -==== Description - -===== Type -* object - -=== .status.visualization.kibanaStatus[].replicaSets[] -==== Description - -===== Type -* array diff --git a/modules/logging-audit-log-filtering.adoc b/modules/logging-audit-log-filtering.adoc deleted file mode 100644 index d5c3fa9224ce..000000000000 --- a/modules/logging-audit-log-filtering.adoc +++ /dev/null @@ -1,124 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/configuring-log-forwarding.adoc - -:_mod-docs-content-type: CONCEPT -[id="logging-audit-filtering_{context}"] -= Overview of API audit filter -OpenShift API servers generate audit events for each API call, detailing the request, response, and the identity of the requester, leading to large volumes of data. The API Audit filter uses rules to enable the exclusion of non-essential events and the reduction of event size, facilitating a more manageable audit trail. Rules are checked in order, checking stops at the first match. How much data is included in an event is determined by the value of the `level` field: - -* `None`: The event is dropped. -* `Metadata`: Audit metadata is included, request and response bodies are removed. -* `Request`: Audit metadata and the request body are included, the response body is removed. -* `RequestResponse`: All data is included: metadata, request body and response body. The response body can be very large. For example, `oc get pods -A` generates a response body containing the YAML description of every pod in the cluster. - - -[NOTE] -==== -You can use this feature only if the Vector collector is set up in your logging deployment. -==== - -In logging 5.8 and later, the `ClusterLogForwarder` custom resource (CR) uses the same format as the standard link:https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/#audit-policy[Kubernetes audit policy], while providing the following additional functions: - -Wildcards:: Names of users, groups, namespaces, and resources can have a leading or trailing `\*` asterisk character. For example, namespace `openshift-\*` matches `openshift-apiserver` or `openshift-authentication`. Resource `\*/status` matches `Pod/status` or `Deployment/status`. - -Default Rules:: Events that do not match any rule in the policy are filtered as follows: -* Read-only system events such as `get`, `list`, `watch` are dropped. -* Service account write events that occur within the same namespace as the service account are dropped. -* All other events are forwarded, subject to any configured rate limits. - -To disable these defaults, either end your rules list with a rule that has only a `level` field or add an empty rule. - -Omit Response Codes:: A list of integer status codes to omit. You can drop events based on the HTTP status code in the response by using the `OmitResponseCodes` field, a list of HTTP status code for which no events are created. The default value is `[404, 409, 422, 429]`. If the value is an empty list, `[]`, then no status codes are omitted. - -The `ClusterLogForwarder` CR audit policy acts in addition to the {product-title} audit policy. The `ClusterLogForwarder` CR audit filter changes what the log collector forwards, and provides the ability to filter by verb, user, group, namespace, or resource. You can create multiple filters to send different summaries of the same audit stream to different places. For example, you can send a detailed stream to the local cluster log store, and a less detailed stream to a remote site. - -[NOTE] -==== -The example provided is intended to illustrate the range of rules possible in an audit policy and is not a recommended configuration. -==== - - -.Example audit policy -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - pipelines: - - name: my-pipeline - inputRefs: audit #<1> - filterRefs: my-policy #<2> - outputRefs: default - filters: - - name: my-policy - type: kubeAPIAudit - kubeAPIAudit: - # Don't generate audit events for all requests in RequestReceived stage. - omitStages: - - "RequestReceived" - - rules: - # Log pod changes at RequestResponse level - - level: RequestResponse - resources: - - group: "" - resources: ["pods"] - - # Log "pods/log", "pods/status" at Metadata level - - level: Metadata - resources: - - group: "" - resources: ["pods/log", "pods/status"] - - # Don't log requests to a configmap called "controller-leader" - - level: None - resources: - - group: "" - resources: ["configmaps"] - resourceNames: ["controller-leader"] - - # Don't log watch requests by the "system:kube-proxy" on endpoints or services - - level: None - users: ["system:kube-proxy"] - verbs: ["watch"] - resources: - - group: "" # core API group - resources: ["endpoints", "services"] - - # Don't log authenticated requests to certain non-resource URL paths. - - level: None - userGroups: ["system:authenticated"] - nonResourceURLs: - - "/api*" # Wildcard matching. - - "/version" - - # Log the request body of configmap changes in kube-system. - - level: Request - resources: - - group: "" # core API group - resources: ["configmaps"] - # This rule only applies to resources in the "kube-system" namespace. - # The empty string "" can be used to select non-namespaced resources. - namespaces: ["kube-system"] - - # Log configmap and secret changes in all other namespaces at the Metadata level. - - level: Metadata - resources: - - group: "" # core API group - resources: ["secrets", "configmaps"] - - # Log all other resources in core and extensions at the Request level. - - level: Request - resources: - - group: "" # core API group - - group: "extensions" # Version of group should NOT be included. - - # A catch-all rule to log all other requests at the Metadata level. - - level: Metadata ----- -<1> The log types that are collected. The value for this field can be `audit` for audit logs, `application` for application logs, `infrastructure` for infrastructure logs, or a named input that has been defined for your application. -<2> The name of your audit policy. diff --git a/modules/logging-collector-alerts.adoc b/modules/logging-collector-alerts.adoc deleted file mode 100644 index db65573383bc..000000000000 --- a/modules/logging-collector-alerts.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/logging_alerts/default-logging-alerts.adoc - -:_content-type: REFERENCE -[id="logging-collector-alerts_{context}"] -= Logging collector alerts - -In logging 5.8 and later versions, the following alerts are generated by the {clo}. You can view these alerts in the {product-title} web console. - -[cols="4", options="header"] -|=== -| Alert Name | Message | Description | Severity - -| CollectorNodeDown -| Prometheus could not scrape `namespace`/`pod` collector component for more than 10m. -| Collector cannot be scraped. -| Critical - -| CollectorHighErrorRate -| `value`% of records have resulted in an error by `namespace`/`pod` collector component. -| `namespace`/`pod` collector component errors are high. -| Critical - -| CollectorVeryHighErrorRate -| `value`% of records have resulted in an error by `namespace`/`pod` collector component. -| `namespace`/`pod` collector component errors are very high. -| Critical -|=== diff --git a/modules/logging-content-filter-drop-records.adoc b/modules/logging-content-filter-drop-records.adoc deleted file mode 100644 index d279c9789f44..000000000000 --- a/modules/logging-content-filter-drop-records.adoc +++ /dev/null @@ -1,108 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/performance_reliability/logging-content-filtering.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-content-filter-drop-records_{context}"] -= Configuring content filters to drop unwanted log records - -When the `drop` filter is configured, the log collector evaluates log streams according to the filters before forwarding. The collector drops unwanted log records that match the specified configuration. - -.Prerequisites - -* You have installed the {clo}. -* You have administrator permissions. -* You have created a `ClusterLogForwarder` custom resource (CR). - -.Procedure - -. Add a configuration for a filter to the `filters` spec in the `ClusterLogForwarder` CR. -+ -The following example shows how to configure the `ClusterLogForwarder` CR to drop log records based on regular expressions: -+ -.Example `ClusterLogForwarder` CR -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: -# ... -spec: - filters: - - name: - type: drop # <1> - drop: # <2> - - test: # <3> - - field: .kubernetes.labels."foo-bar/baz" # <4> - matches: .+ # <5> - - field: .kubernetes.pod_name - notMatches: "my-pod" # <6> - pipelines: - - name: # <7> - filterRefs: [""] -# ... ----- -<1> Specifies the type of filter. The `drop` filter drops log records that match the filter configuration. -<2> Specifies configuration options for applying the `drop` filter. -<3> Specifies the configuration for tests that are used to evaluate whether a log record is dropped. -** If all the conditions specified for a test are true, the test passes and the log record is dropped. -** When multiple tests are specified for the `drop` filter configuration, if any of the tests pass, the record is dropped. -** If there is an error evaluating a condition, for example, the field is missing from the log record being evaluated, that condition evaluates to false. -<4> Specifies a dot-delimited field path, which is a path to a field in the log record. The path can contain alpha-numeric characters and underscores (`a-zA-Z0-9_`), for example, `.kubernetes.namespace_name`. If segments contain characters outside of this range, the segment must be in quotes, for example, `.kubernetes.labels."foo.bar-bar/baz"`. You can include multiple field paths in a single `test` configuration, but they must all evaluate to true for the test to pass and the `drop` filter to be applied. -<5> Specifies a regular expression. If log records match this regular expression, they are dropped. You can set either the `matches` or `notMatches` condition for a single `field` path, but not both. -<6> Specifies a regular expression. If log records do not match this regular expression, they are dropped. You can set either the `matches` or `notMatches` condition for a single `field` path, but not both. -<7> Specifies the pipeline that the `drop` filter is applied to. - -. Apply the `ClusterLogForwarder` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- - -.Additional examples - -The following additional example shows how you can configure the `drop` filter to only keep higher priority log records: - -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: -# ... -spec: - filters: - - name: important - type: drop - drop: - test: - - field: .message - notMatches: "(?i)critical|error" - - field: .level - matches: "info|warning" -# ... ----- - -In addition to including multiple field paths in a single `test` configuration, you can also include additional tests that are treated as _OR_ checks. In the following example, records are dropped if either `test` configuration evaluates to true. However, for the second `test` configuration, both field specs must be true for it to be evaluated to true: - -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: -# ... -spec: - filters: - - name: important - type: drop - drop: - test: - - field: .kubernetes.namespace_name - matches: "^open" - test: - - field: .log_type - matches: "application" - - field: .kubernetes.pod_name - notMatches: "my-pod" -# ... ----- diff --git a/modules/logging-content-filter-prune-records.adoc b/modules/logging-content-filter-prune-records.adoc deleted file mode 100644 index 89e08fcd48a8..000000000000 --- a/modules/logging-content-filter-prune-records.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/performance_reliability/logging-content-filtering.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-content-filter-prune-records_{context}"] -= Configuring content filters to prune log records - -When the `prune` filter is configured, the log collector evaluates log streams according to the filters before forwarding. The collector prunes log records by removing low value fields such as pod annotations. - -.Prerequisites - -* You have installed the {clo}. -* You have administrator permissions. -* You have created a `ClusterLogForwarder` custom resource (CR). - -.Procedure - -. Add a configuration for a filter to the `prune` spec in the `ClusterLogForwarder` CR. -+ -The following example shows how to configure the `ClusterLogForwarder` CR to prune log records based on field paths: -+ -[IMPORTANT] -==== -If both are specified, records are pruned based on the `notIn` array first, which takes precedence over the `in` array. After records have been pruned by using the `notIn` array, they are then pruned by using the `in` array. -==== -+ -.Example `ClusterLogForwarder` CR -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: -# ... -spec: - filters: - - name: - type: prune # <1> - prune: # <2> - in: [.kubernetes.annotations, .kubernetes.namespace_id] # <3> - notIn: [.kubernetes,.log_type,.message,."@timestamp"] # <4> - pipelines: - - name: # <5> - filterRefs: [""] -# ... ----- -<1> Specify the type of filter. The `prune` filter prunes log records by configured fields. -<2> Specify configuration options for applying the `prune` filter. The `in` and `notIn` fields are specified as arrays of dot-delimited field paths, which are paths to fields in log records. These paths can contain alpha-numeric characters and underscores (`a-zA-Z0-9_`), for example, `.kubernetes.namespace_name`. If segments contain characters outside of this range, the segment must be in quotes, for example, `.kubernetes.labels."foo.bar-bar/baz"`. -<3> Optional: Any fields that are specified in this array are removed from the log record. -<4> Optional: Any fields that are not specified in this array are removed from the log record. -<5> Specify the pipeline that the `prune` filter is applied to. - -. Apply the `ClusterLogForwarder` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/logging-create-clf.adoc b/modules/logging-create-clf.adoc deleted file mode 100644 index abe6aef7bfee..000000000000 --- a/modules/logging-create-clf.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/log-forwarding.adoc - -:_mod-docs-content-type: REFERENCE -[id="logging-create-clf_{context}"] -= Creating a log forwarder - -To create a log forwarder, you must create a `ClusterLogForwarder` CR that specifies the log input types that the service account can collect. You can also specify which outputs the logs can be forwarded to. If you are using the multi log forwarder feature, you must also reference the service account in the `ClusterLogForwarder` CR. - -If you are using the multi log forwarder feature on your cluster, you can create `ClusterLogForwarder` custom resources (CRs) in any namespace, using any name. -If you are using a legacy implementation, the `ClusterLogForwarder` CR must be named `instance`, and must be created in the `openshift-logging` namespace. - -[IMPORTANT] -==== -You need administrator permissions for the namespace where you create the `ClusterLogForwarder` CR. -==== - -.ClusterLogForwarder resource example -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: <1> - namespace: <2> -spec: - serviceAccountName: <3> - pipelines: - - inputRefs: - - <4> - outputRefs: - - <5> - outputs: - - name: <6> - type: <5> - url: <7> -# ... ----- -<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name. -<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace. -<3> The name of your service account. The service account is only required in multi log forwarder implementations if the log forwarder is not deployed in the `openshift-logging` namespace. -<4> The log types that are collected. The value for this field can be `audit` for audit logs, `application` for application logs, `infrastructure` for infrastructure logs, or a named input that has been defined for your application. -<5> The type of output that you want to forward logs to. The value of this field can be `default`, `loki`, `kafka`, `elasticsearch`, `fluentdForward`, `syslog`, or `cloudwatch`. -+ -[NOTE] -==== -The `default` output type is not supported in mutli log forwarder implementations. -==== -<6> A name for the output that you want to forward logs to. -<7> The URL of the output that you want to forward logs to. - -// To be followed up on by adding input examples / docs: -//// -spec: - inputs: - - name: chatty-app - type: application - selector: - matchLabels: - load: heavy - pipelines: - - inputRefs: - - chatty-app - - infrastructure - - outputRefs: - - default -//// diff --git a/modules/logging-create-loki-cr-cli.adoc b/modules/logging-create-loki-cr-cli.adoc deleted file mode 100644 index 2c2ca6954014..000000000000 --- a/modules/logging-create-loki-cr-cli.adoc +++ /dev/null @@ -1,126 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_storage/installing-log-storage.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-create-loki-cr-cli_{context}"] -= Creating a LokiStack custom resource by using the CLI - -You can create a `LokiStack` custom resource (CR) by using the {oc-first}. - -.Prerequisites - -* You have administrator permissions. -* You installed the {loki-op}. -* You installed the {oc-first}. - -.Procedure - -. Create a `LokiStack` CR: -// tag::pre-5.9[] -+ --- -.Example `LokiStack` CR -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: - size: 1x.small # <1> - storage: - schemas: - - version: v12 - effectiveDate: "2022-06-01" - secret: - name: logging-loki-s3 # <2> - type: s3 # <3> - storageClassName: # <4> - tenants: - mode: openshift-logging ----- -<1> Specify the deployment size. In the {logging} 5.8 and later versions, the supported size options for production instances of Loki are `1x.extra-small`, `1x.small`, or `1x.medium`. -<2> Specify the name of your log store secret. -<3> Specify the type of your log store secret. -<4> Specify the name of a storage class for temporary storage. For best performance, specify a storage class that allocates block storage. Available storage classes for your cluster can be listed by using the `oc get storageclasses` command. - -[IMPORTANT] -==== -It is not possible to change the number `1x` for the deployment size. -==== - -// end::pre-5.9[] - -// tag::5.9[] - -.Example `LokiStack` CR -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki # <1> - namespace: openshift-logging -spec: - size: 1x.small # <2> - storage: - schemas: - - effectiveDate: '2023-10-15' - version: v13 - secret: - name: logging-loki-s3 # <3> - type: s3 # <4> - credentialMode: # <5> - storageClassName: # <6> - tenants: - mode: openshift-logging ----- -<1> Use the name `logging-loki`. -<2> Specify the deployment size. In the {logging} 5.8 and later versions, the supported size options for production instances of Loki are `1x.extra-small`, `1x.small`, or `1x.medium`. -<3> Specify the secret used for your log storage. -<4> Specify the corresponding storage type. -<5> Optional field, {logging} 5.9 and later. Supported user configured values are as follows: `static` is the default authentication mode available for all supported object storage types using credentials stored in a Secret. `token` for short-lived tokens retrieved from a credential source. In this mode the static configuration does not contain credentials needed for the object storage. Instead, they are generated during runtime using a service, which allows for shorter-lived credentials and much more granular control. This authentication mode is not supported for all object storage types. `token-cco` is the default value when Loki is running on managed STS mode and using CCO on STS/WIF clusters. -<6> Enter the name of a storage class for temporary storage. For best performance, specify a storage class that allocates block storage. Available storage classes for your cluster can be listed by using the `oc get storageclasses` command. -// end::5.9[] - -. Apply the `LokiStack` CR by running the following command: - -.Verification - -* Verify the installation by listing the pods in the `openshift-logging` project by running the following command and observing the output: -+ -[source,terminal] ----- -$ oc get pods -n openshift-logging ----- -+ -Confirm that you see several pods for components of the {logging}, similar to the following list: -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cluster-logging-operator-78fddc697-mnl82 1/1 Running 0 14m -collector-6cglq 2/2 Running 0 45s -collector-8r664 2/2 Running 0 45s -collector-8z7px 2/2 Running 0 45s -collector-pdxl9 2/2 Running 0 45s -collector-tc9dx 2/2 Running 0 45s -collector-xkd76 2/2 Running 0 45s -logging-loki-compactor-0 1/1 Running 0 8m2s -logging-loki-distributor-b85b7d9fd-25j9g 1/1 Running 0 8m2s -logging-loki-distributor-b85b7d9fd-xwjs6 1/1 Running 0 8m2s -logging-loki-gateway-7bb86fd855-hjhl4 2/2 Running 0 8m2s -logging-loki-gateway-7bb86fd855-qjtlb 2/2 Running 0 8m2s -logging-loki-index-gateway-0 1/1 Running 0 8m2s -logging-loki-index-gateway-1 1/1 Running 0 7m29s -logging-loki-ingester-0 1/1 Running 0 8m2s -logging-loki-ingester-1 1/1 Running 0 6m46s -logging-loki-querier-f5cf9cb87-9fdjd 1/1 Running 0 8m2s -logging-loki-querier-f5cf9cb87-fp9v5 1/1 Running 0 8m2s -logging-loki-query-frontend-58c579fcb7-lfvbc 1/1 Running 0 8m2s -logging-loki-query-frontend-58c579fcb7-tjf9k 1/1 Running 0 8m2s -logging-view-plugin-79448d8df6-ckgmx 1/1 Running 0 46s ----- diff --git a/modules/logging-create-loki-cr-console.adoc b/modules/logging-create-loki-cr-console.adoc deleted file mode 100644 index bf9f85916a3f..000000000000 --- a/modules/logging-create-loki-cr-console.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/log_storage/installing-log-storage.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-create-loki-cr-console_{context}"] -= Creating a LokiStack custom resource by using the web console - -You can create a `LokiStack` custom resource (CR) by using the {product-title} web console. - -.Prerequisites - -* You have administrator permissions. -* You have access to the {product-title} web console. -* You installed the {loki-op}. - -.Procedure - -. Go to the *Ecosystem* -> *Installed Operators* page. Click the *All instances* tab. - -. From the *Create new* drop-down list, select *LokiStack*. - -. Select *YAML view*, and then use the following template to create a `LokiStack` CR: -// tag::pre-5.9[] -+ -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki # <1> - namespace: openshift-logging -spec: - size: 1x.small # <2> - storage: - schemas: - - version: v12 - effectiveDate: '2022-06-01' - secret: - name: logging-loki-s3 # <3> - type: s3 # <4> - credentialMode: static # - storageClassName: # <5> - tenants: - mode: openshift-logging ----- -<1> Use the name `logging-loki`. -<2> Specify the deployment size. In the {logging} 5.8 and later versions, the supported size options for production instances of Loki are `1x.extra-small`, `1x.small`, or `1x.medium`. -<3> Specify the secret used for your log storage. -<4> Specify the corresponding storage type. -<5> Enter the name of a storage class for temporary storage. For best performance, specify a storage class that allocates block storage. Available storage classes for your cluster can be listed by using the `oc get storageclasses` command. -// end::pre-5.9[] - -// tag::5.9[] -+ -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki # <1> - namespace: openshift-logging -spec: - size: 1x.small # <2> - storage: - schemas: - - effectiveDate: '2023-10-15' - version: v13 - secret: - name: logging-loki-s3 # <3> - type: s3 # <4> - credentialMode: # <5> - storageClassName: # <6> - tenants: - mode: openshift-logging ----- -<1> Use the name `logging-loki`. -<2> Specify the deployment size. In the {logging} 5.8 and later versions, the supported size options for production instances of Loki are `1x.extra-small`, `1x.small`, or `1x.medium`. -<3> Specify the secret used for your log storage. -<4> Specify the corresponding storage type. -<5> Optional field, {logging} 5.9 and later. Supported user configured values are as follows: `static` is the default authentication mode available for all supported object storage types using credentials stored in a Secret. `token` for short-lived tokens retrieved from a credential source. In this mode the static configuration does not contain credentials needed for the object storage. Instead, they are generated during runtime using a service, which allows for shorter-lived credentials and much more granular control. This authentication mode is not supported for all object storage types. `token-cco` is the default value when Loki is running on managed STS mode and using CCO on STS/WIF clusters. -<6> Enter the name of a storage class for temporary storage. For best performance, specify a storage class that allocates block storage. Available storage classes for your cluster can be listed by using the `oc get storageclasses` command. -// end::5.9[] diff --git a/modules/logging-delivery-tuning.adoc b/modules/logging-delivery-tuning.adoc deleted file mode 100644 index de2364688988..000000000000 --- a/modules/logging-delivery-tuning.adoc +++ /dev/null @@ -1,104 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/configuring-log-forwarding.adoc - -:_mod-docs-content-type: REFERENCE -[id="logging-delivery-tuning_{context}"] -= Tuning log payloads and delivery - -In {logging} 5.9 and newer versions, the `tuning` spec in the `ClusterLogForwarder` custom resource (CR) provides a means of configuring your deployment to prioritize either throughput or durability of logs. - -For example, if you need to reduce the possibility of log loss when the collector restarts, or you require collected log messages to survive a collector restart to support regulatory mandates, you can tune your deployment to prioritize log durability. If you use outputs that have hard limitations on the size of batches they can receive, you may want to tune your deployment to prioritize log throughput. - -[IMPORTANT] -==== -To use this feature, your {logging} deployment must be configured to use the Vector collector. The `tuning` spec in the `ClusterLogForwarder` CR is not supported when using the Fluentd collector. -==== - -The following example shows the `ClusterLogForwarder` CR options that you can modify to tune log forwarder outputs: - -.Example `ClusterLogForwarder` CR tuning options -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: -# ... -spec: - tuning: - delivery: AtLeastOnce # <1> - compression: none # <2> - maxWrite: # <3> - minRetryDuration: 1s # <4> - maxRetryDuration: 1s # <5> -# ... ----- -<1> Specify the delivery mode for log forwarding. -** `AtLeastOnce` delivery means that if the log forwarder crashes or is restarted, any logs that were read before the crash but not sent to their destination are re-sent. It is possible that some logs are duplicated after a crash. -** `AtMostOnce` delivery means that the log forwarder makes no effort to recover logs lost during a crash. This mode gives better throughput, but may result in greater log loss. -<2> Specifying a `compression` configuration causes data to be compressed before it is sent over the network. Note that not all output types support compression, and if the specified compression type is not supported by the output, this results in an error. The possible values for this configuration are `none` for no compression, `gzip`, `snappy`, `zlib`, or `zstd`. `lz4` compression is also available if you are using a Kafka output. See the table "Supported compression types for tuning outputs" for more information. -<3> Specifies a limit for the maximum payload of a single send operation to the output. -<4> Specifies a minimum duration to wait between attempts before retrying delivery after a failure. This value is a string, and can be specified as milliseconds (`ms`), seconds (`s`), or minutes (`m`). -<5> Specifies a maximum duration to wait between attempts before retrying delivery after a failure. This value is a string, and can be specified as milliseconds (`ms`), seconds (`s`), or minutes (`m`). - -.Supported compression types for tuning outputs -[options="header"] -|=== -|Compression algorithm |Splunk |Amazon Cloudwatch |Elasticsearch 8 |LokiStack |Apache Kafka |HTTP |Syslog |{gcp-full} |Microsoft Azure Monitoring - -|`gzip` -|X -|X -|X -|X -| -|X -| -| -| - -|`snappy` -| -|X -| -|X -|X -|X -| -| -| - -|`zlib` -| -|X -|X -| -| -|X -| -| -| - -|`zstd` -| -|X -| -| -|X -|X -| -| -| - -|`lz4` -| -| -| -| -|X -| -| -| -| - -|=== diff --git a/modules/logging-enabling-loki-alerts.adoc b/modules/logging-enabling-loki-alerts.adoc deleted file mode 100644 index ea36798e9e40..000000000000 --- a/modules/logging-enabling-loki-alerts.adoc +++ /dev/null @@ -1,107 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging_alerts/custom-logging-alerts.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-enabling-loki-alerts_{context}"] -= Creating a log-based alerting rule with Loki - -The `AlertingRule` CR contains a set of specifications and webhook validation definitions to declare groups of alerting rules for a single `LokiStack` instance. In addition, the webhook validation definition provides support for rule validation conditions: - -* If an `AlertingRule` CR includes an invalid `interval` period, it is an invalid alerting rule -* If an `AlertingRule` CR includes an invalid `for` period, it is an invalid alerting rule. -* If an `AlertingRule` CR includes an invalid LogQL `expr`, it is an invalid alerting rule. -* If an `AlertingRule` CR includes two groups with the same name, it is an invalid alerting rule. -* If none of above applies, an alerting rule is considered valid. - -[options="header"] -|================================================ -| Tenant type | Valid namespaces for `AlertingRule` CRs -| application | -| audit | `openshift-logging` -| infrastructure | `openshift-/\*`, `kube-/\*`, `default` -|================================================ - -.Prerequisites - -* {clo} 5.7 and later -* {product-title} 4.13 and later - -.Procedure - -. Create an `AlertingRule` custom resource (CR): -+ -.Example infrastructure AlertingRule CR -[source,yaml] ----- - apiVersion: loki.grafana.com/v1 - kind: AlertingRule - metadata: - name: loki-operator-alerts - namespace: openshift-operators-redhat <1> - labels: <2> - openshift.io/: "true" - spec: - tenantID: "infrastructure" <3> - groups: - - name: LokiOperatorHighReconciliationError - rules: - - alert: HighPercentageError - expr: | <4> - sum(rate({kubernetes_namespace_name="openshift-operators-redhat", kubernetes_pod_name=~"loki-operator-controller-manager.*"} |= "error" [1m])) by (job) - / - sum(rate({kubernetes_namespace_name="openshift-operators-redhat", kubernetes_pod_name=~"loki-operator-controller-manager.*"}[1m])) by (job) - > 0.01 - for: 10s - labels: - severity: critical <5> - annotations: - summary: High Loki Operator Reconciliation Errors <6> - description: High Loki Operator Reconciliation Errors <7> ----- -<1> The namespace where this `AlertingRule` CR is created must have a label matching the LokiStack `spec.rules.namespaceSelector` definition. -<2> The `labels` block must match the LokiStack `spec.rules.selector` definition. -<3> `AlertingRule` CRs for `infrastructure` tenants are only supported in the `openshift-\*`, `kube-\*`, or `default` namespaces. -<4> The value for `kubernetes_namespace_name:` must match the value for `metadata.namespace`. -<5> The value of this mandatory field must be `critical`, `warning`, or `info`. -<6> This field is mandatory. -<7> This field is mandatory. -+ -.Example application AlertingRule CR -[source,yaml] ----- - apiVersion: loki.grafana.com/v1 - kind: AlertingRule - metadata: - name: app-user-workload - namespace: app-ns <1> - labels: <2> - openshift.io/: "true" - spec: - tenantID: "application" - groups: - - name: AppUserWorkloadHighError - rules: - - alert: - expr: | <3> - sum(rate({kubernetes_namespace_name="app-ns", kubernetes_pod_name=~"podName.*"} |= "error" [1m])) by (job) - for: 10s - labels: - severity: critical <4> - annotations: - summary: <5> - description: <6> ----- -<1> The namespace where this `AlertingRule` CR is created must have a label matching the LokiStack `spec.rules.namespaceSelector` definition. -<2> The `labels` block must match the LokiStack `spec.rules.selector` definition. -<3> Value for `kubernetes_namespace_name:` must match the value for `metadata.namespace`. -<4> The value of this mandatory field must be `critical`, `warning`, or `info`. -<5> The value of this mandatory field is a summary of the rule. -<6> The value of this mandatory field is a detailed description of the rule. - -. Apply the `AlertingRule` CR: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/logging-es-storage-considerations.adoc b/modules/logging-es-storage-considerations.adoc deleted file mode 100644 index 4e59447e3fed..000000000000 --- a/modules/logging-es-storage-considerations.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-deploying.adoc - -:_mod-docs-content-type: CONCEPT -[id="logging-es-storage-considerations_{context}"] -= Storage considerations for Elasticsearch - -A persistent volume is required for each Elasticsearch deployment configuration. On {product-title} this is achieved using persistent volume claims (PVCs). - -[NOTE] -==== -If you use a local volume for persistent storage, do not use a raw block volume, which is described with `volumeMode: block` in the `LocalVolume` object. Elasticsearch cannot use raw block volumes. -==== - -The OpenShift Elasticsearch Operator names the PVCs using the Elasticsearch resource name. - -Fluentd ships any logs from *systemd journal* and **/var/log/containers/*.log** to Elasticsearch. - -Elasticsearch requires sufficient memory to perform large merge operations. If it does not have enough memory, it becomes unresponsive. To avoid this problem, evaluate how much application log data you need, and allocate approximately double that amount of free storage capacity. - -By default, when storage capacity is 85% full, Elasticsearch stops allocating new data to the node. At 90%, Elasticsearch attempts to relocate existing shards from that node to other nodes if possible. But if no nodes have a free capacity below 85%, Elasticsearch effectively rejects creating new indices and becomes RED. - -[NOTE] -==== -These low and high watermark values are Elasticsearch defaults in the current release. You can modify these default values. Although the alerts use the same default values, you cannot change these values in the alerts. -==== diff --git a/modules/logging-fluentd-collector-alerts.adoc b/modules/logging-fluentd-collector-alerts.adoc deleted file mode 100644 index d9a087c70aea..000000000000 --- a/modules/logging-fluentd-collector-alerts.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging_alerts/default-logging-alerts.adoc - -:_mod-docs-content-type: REFERENCE -[id="logging-fluentd-collector-alerts_{context}"] -= Fluentd collector alerts - -The following alerts are generated by the legacy Fluentd log collector. You can view these alerts in the {product-title} web console. - -.Fluentd collector alerts -[cols="2,2,2,1",options="header"] -|=== -|Alert |Message |Description |Severity - -|`FluentDHighErrorRate` -|` of records have resulted in an error by fluentd .` -|The number of FluentD output errors is high, by default more than 10 in the previous 15 minutes. -|Warning - -|`FluentdNodeDown` -|`Prometheus could not scrape fluentd for more than 10m.` -|Fluentd is reporting that Prometheus could not scrape a specific Fluentd instance. -|Critical - -|`FluentdQueueLengthIncreasing` -|`In the last 1h, fluentd buffer queue length constantly increased more than 1. Current value is .` -|Fluentd is reporting that the queue size is increasing. -|Warning - -|`FluentDVeryHighErrorRate` -|` of records have resulted in an error by fluentd .` -|The number of FluentD output errors is very high, by default more than 25 in the previous 15 minutes. -|Critical - -|=== diff --git a/modules/logging-forward-splunk.adoc b/modules/logging-forward-splunk.adoc deleted file mode 100644 index fd6377ecf98c..000000000000 --- a/modules/logging-forward-splunk.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/configuring-log-forwarding.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-forward-splunk_{context}"] -= Forwarding logs to Splunk - -You can forward logs to the link:https://docs.splunk.com/Documentation/Splunk/9.0.0/Data/UsetheHTTPEventCollector[Splunk HTTP Event Collector (HEC)] in addition to, or instead of, the internal default {product-title} log store. - -[NOTE] -==== -Using this feature with Fluentd is not supported. -==== - -.Prerequisites -* {clo} 5.6 or later -* A `ClusterLogging` instance with `vector` specified as the collector -* Base64 encoded Splunk HEC token - -.Procedure - -. Create a secret using your Base64 encoded Splunk HEC token. -+ -[source,terminal] ----- -$ oc -n openshift-logging create secret generic vector-splunk-secret --from-literal hecToken= ----- -+ -. Create or edit the `ClusterLogForwarder` Custom Resource (CR) using the template below: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: <1> - namespace: <2> -spec: - serviceAccountName: <3> - outputs: - - name: splunk-receiver <4> - secret: - name: vector-splunk-secret <5> - type: splunk <6> - url: <7> - pipelines: <8> - - inputRefs: - - application - - infrastructure - name: <9> - outputRefs: - - splunk-receiver <10> ----- -<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name. -<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace. -<3> The name of your service account. The service account is only required in multi log forwarder implementations if the log forwarder is not deployed in the `openshift-logging` namespace. -<4> Specify a name for the output. -<5> Specify the name of the secret that contains your HEC token. -<6> Specify the output type as `splunk`. -<7> Specify the URL (including port) of your Splunk HEC. -<8> Specify which log types to forward by using the pipeline: `application`, `infrastructure`, or `audit`. -<9> Optional: Specify a name for the pipeline. -<10> Specify the name of the output to use when forwarding logs with this pipeline. diff --git a/modules/logging-forwarding-azure.adoc b/modules/logging-forwarding-azure.adoc deleted file mode 100644 index 3650a7bb3456..000000000000 --- a/modules/logging-forwarding-azure.adoc +++ /dev/null @@ -1,137 +0,0 @@ -// Module included in the following assemblies: -// * logging/configuring-log-forwarding.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-forwarding-azure_{context}"] -= Forwarding to Azure Monitor Logs -With {logging} 5.9 and later, you can forward logs to link:https://learn.microsoft.com/en-us/azure/azure-monitor/logs/data-platform-logs[Azure Monitor Logs] in addition to, or instead of, the default log store. This functionality is provided by the link:https://vector.dev/docs/reference/configuration/sinks/azure_monitor_logs/[Vector Azure Monitor Logs sink]. - -.Prerequisites - -* You are familiar with how to administer and create a `ClusterLogging` custom resource (CR) instance. -* You are familiar with how to administer and create a `ClusterLogForwarder` CR instance. -* You understand the `ClusterLogForwarder` CR specifications. -* You have basic familiarity with Azure services. -* You have an Azure account configured for Azure Portal or Azure CLI access. -* You have obtained your Azure Monitor Logs primary or the secondary security key. -* You have determined which log types to forward. - -To enable log forwarding to Azure Monitor Logs via the HTTP Data Collector API: - -Create a secret with your shared key: -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: my-secret - namespace: openshift-logging -type: Opaque -data: - shared_key: # <1> ----- -<1> Must contain a primary or secondary key for the link:https://learn.microsoft.com/en-us/azure/azure-monitor/logs/log-analytics-workspace-overview[Log Analytics workspace] making the request. - -To obtain a link:https://learn.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key[shared key], you can use this command in Azure CLI: - -[source,text] ----- -Get-AzOperationalInsightsWorkspaceSharedKey -ResourceGroupName "" -Name "” ----- - - -Create or edit your `ClusterLogForwarder` CR using the template matching your log selection. - -.Forward all logs -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogForwarder" -metadata: - name: instance - namespace: openshift-logging -spec: - outputs: - - name: azure-monitor - type: azureMonitor - azureMonitor: - customerId: my-customer-id # <1> - logType: my_log_type # <2> - secret: - name: my-secret - pipelines: - - name: app-pipeline - inputRefs: - - application - outputRefs: - - azure-monitor ----- -<1> Unique identifier for the Log Analytics workspace. Required field. -<2> link:https://learn.microsoft.com/en-us/azure/azure-monitor/logs/data-collector-api?tabs=powershell#record-type-and-properties[Azure record type] of the data being submitted. May only contain letters, numbers, and underscores (_), and may not exceed 100 characters. - -.Forward application and infrastructure logs -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogForwarder" -metadata: - name: instance - namespace: openshift-logging -spec: - outputs: - - name: azure-monitor-app - type: azureMonitor - azureMonitor: - customerId: my-customer-id - logType: application_log # <1> - secret: - name: my-secret - - name: azure-monitor-infra - type: azureMonitor - azureMonitor: - customerId: my-customer-id - logType: infra_log # - secret: - name: my-secret - pipelines: - - name: app-pipeline - inputRefs: - - application - outputRefs: - - azure-monitor-app - - name: infra-pipeline - inputRefs: - - infrastructure - outputRefs: - - azure-monitor-infra ----- -<1> link:https://learn.microsoft.com/en-us/azure/azure-monitor/logs/data-collector-api?tabs=powershell#record-type-and-properties[Azure record type] of the data being submitted. May only contain letters, numbers, and underscores (_), and may not exceed 100 characters. - -.Advanced configuration options -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogForwarder" -metadata: - name: instance - namespace: openshift-logging -spec: - outputs: - - name: azure-monitor - type: azureMonitor - azureMonitor: - customerId: my-customer-id - logType: my_log_type - azureResourceId: "/subscriptions/111111111" # <1> - host: "ods.opinsights.azure.com" # <2> - secret: - name: my-secret - pipelines: - - name: app-pipeline - inputRefs: - - application - outputRefs: - - azure-monitor ----- -<1> Resource ID of the Azure resource the data should be associated with. Optional field. -<2> Alternative host for dedicated Azure regions. Optional field. Default value is `ods.opinsights.azure.com`. Default value for Azure Government is `ods.opinsights.azure.us`. diff --git a/modules/logging-http-forward.adoc b/modules/logging-http-forward.adoc deleted file mode 100644 index e8fa694751c5..000000000000 --- a/modules/logging-http-forward.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/configuring-log-forwarding.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-http-forward_{context}"] -= Forwarding logs over HTTP - -Forwarding logs over HTTP is supported for both the Fluentd and Vector log collectors. To enable, specify `http` as the output type in the `ClusterLogForwarder` custom resource (CR). - -.Procedure - -* Create or edit the `ClusterLogForwarder` CR using the template below: -+ -.Example ClusterLogForwarder CR -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: <1> - namespace: <2> -spec: - serviceAccountName: <3> - outputs: - - name: httpout-app - type: http - url: <4> - http: - headers: <5> - h1: v1 - h2: v2 - method: POST - secret: - name: <6> - tls: - insecureSkipVerify: <7> - pipelines: - - name: - inputRefs: - - application - outputRefs: - - httpout-app <8> ----- -<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name. -<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace. -<3> The name of your service account. The service account is only required in multi log forwarder implementations if the log forwarder is not deployed in the `openshift-logging` namespace. -<4> Destination address for logs. -<5> Additional headers to send with the log record. -<6> Secret name for destination credentials. -<7> Values are either `true` or `false`. -<8> This value should be the same as the output name. diff --git a/modules/logging-identity-federation.adoc b/modules/logging-identity-federation.adoc deleted file mode 100644 index 5b00570827ff..000000000000 --- a/modules/logging-identity-federation.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// * logging/log_storage/installing-log-storage.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-identity-federation_{context}"] -= Workload identity federation -Workload identity federation enables authentication to cloud-based log stores using short-lived tokens. - -.Prerequisites -* {product-title} 4.14 and later -* {logging-uc} 5.9 and later - -.Procedure -* If you use the {product-title} web console to install the {loki-op}, clusters that use short-lived tokens are automatically detected. You are prompted to create roles and supply the data required for the {loki-op} to create a `CredentialsRequest` object, which populates a secret. - -* If you use the {oc-first} to install the {loki-op}, you must manually create a subscription object using the appropriate template for your storage provider, as shown in the following examples. This authentication strategy is only supported for the storage providers indicated. - -.Azure sample subscription -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: loki-operator - namespace: openshift-operators-redhat -spec: - channel: "stable-5.9" - installPlanApproval: Manual - name: loki-operator - source: redhat-operators - sourceNamespace: openshift-marketplace - config: - env: - - name: CLIENTID - value: - - name: TENANTID - value: - - name: SUBSCRIPTIONID - value: - - name: REGION - value: ----- - -.AWS sample subscription -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: loki-operator - namespace: openshift-operators-redhat -spec: - channel: "stable-5.9" - installPlanApproval: Manual - name: loki-operator - source: redhat-operators - sourceNamespace: openshift-marketplace - config: - env: - - name: ROLEARN - value: ----- diff --git a/modules/logging-input-spec-filter-audit-infrastructure.adoc b/modules/logging-input-spec-filter-audit-infrastructure.adoc deleted file mode 100644 index 5428616dbe95..000000000000 --- a/modules/logging-input-spec-filter-audit-infrastructure.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/performance_reliability/logging-input-spec-filtering.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-input-spec-filter-audit-infrastructure_{context}"] -= Filtering the audit and infrastructure log inputs by source - -You can define the list of `audit` and `infrastructure` sources to collect the logs by using the `input` selector. - -.Prerequisites - -* You have installed the {clo}. -* You have administrator permissions. -* You have created a `ClusterLogForwarder` custom resource (CR). - -.Procedure - -. Add a configuration to define the `audit` and `infrastructure` sources in the `ClusterLogForwarder` CR. - -+ -The following example shows how to configure the `ClusterLogForwarder` CR to define `aduit` and `infrastructure` sources: -+ -.Example `ClusterLogForwarder` CR -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -# ... -spec: - inputs: - - name: mylogs1 - infrastructure: - sources: # <1> - - node - - name: mylogs2 - audit: - sources: # <2> - - kubeAPI - - openshiftAPI - - ovn -# ... ----- -<1> Specifies the list of infrastructure sources to collect. The valid sources include: -** `node`: Journal log from the node -** `container`: Logs from the workloads deployed in the namespaces -<2> Specifies the list of audit sources to collect. The valid sources include: -** `kubeAPI`: Logs from the Kubernetes API servers -** `openshiftAPI`: Logs from the OpenShift API servers -** `auditd`: Logs from a node auditd service -** `ovn`: Logs from an open virtual network service - -. Apply the `ClusterLogForwarder` CR by running the following command: - -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- \ No newline at end of file diff --git a/modules/logging-input-spec-filter-labels-expressions.adoc b/modules/logging-input-spec-filter-labels-expressions.adoc deleted file mode 100644 index 1e8c7b38e6b4..000000000000 --- a/modules/logging-input-spec-filter-labels-expressions.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/performance_reliability/logging-input-spec-filtering.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-input-spec-filter-labels-expressions_{context}"] -= Filtering application logs at input by including either the label expressions or matching label key and values - -You can include the application logs based on the label expressions or a matching label key and its values by using the `input` selector. - -.Prerequisites - -* You have installed the {clo}. -* You have administrator permissions. -* You have created a `ClusterLogForwarder` custom resource (CR). - -.Procedure - -. Add a configuration for a filter to the `input` spec in the `ClusterLogForwarder` CR. -+ -The following example shows how to configure the `ClusterLogForwarder` CR to include logs based on label expressions or matched label key/values: -+ -.Example `ClusterLogForwarder` CR -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -# ... -spec: - inputs: - - name: mylogs - application: - selector: - matchExpressions: - - key: env # <1> - operator: In # <2> - values: [“prod”, “qa”] # <3> - - key: zone - operator: NotIn - values: [“east”, “west”] - matchLabels: # <4> - app: one - name: app1 -# ... ----- -<1> Specifies the label key to match. -<2> Specifies the operator. Valid values include: `In`, `NotIn`, `Exists`, and `DoesNotExist`. -<3> Specifies an array of string values. If the `operator` value is either `Exists` or `DoesNotExist`, the value array must be empty. -<4> Specifies an exact key or value mapping. - -. Apply the `ClusterLogForwarder` CR by running the following command: - -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- \ No newline at end of file diff --git a/modules/logging-input-spec-filter-namespace-container.adoc b/modules/logging-input-spec-filter-namespace-container.adoc deleted file mode 100644 index 0d7ae0d434cc..000000000000 --- a/modules/logging-input-spec-filter-namespace-container.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/performance_reliability/logging-input-spec-filtering.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-input-spec-filter-namespace-container_{context}"] -= Filtering application logs at input by including or excluding the namespace or container name - -You can include or exclude the application logs based on the namespace and container name by using the `input` selector. - -.Prerequisites - -* You have installed the {clo}. -* You have administrator permissions. -* You have created a `ClusterLogForwarder` custom resource (CR). - -.Procedure - -. Add a configuration to include or exclude the namespace and container names in the `ClusterLogForwarder` CR. -+ -The following example shows how to configure the `ClusterLogForwarder` CR to include or exclude namespaces and container names: -+ -.Example `ClusterLogForwarder` CR -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -# ... -spec: - inputs: - - name: mylogs - application: - includes: - - namespace: "my-project" # <1> - container: "my-container" # <2> - excludes: - - container: "other-container*" # <3> - namespace: "other-namespace" # <4> -# ... ----- -<1> Specifies that the logs are only collected from these namespaces. -<2> Specifies that the logs are only collected from these containers. -<3> Specifies the pattern of namespaces to ignore when collecting the logs. -<4> Specifies the set of containers to ignore when collecting the logs. - -. Apply the `ClusterLogForwarder` CR by running the following command: - -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- -[NOTE] -==== -The `excludes` option takes precedence over `includes`. -==== diff --git a/modules/logging-install-es-operator.adoc b/modules/logging-install-es-operator.adoc deleted file mode 100644 index 7246b320b76f..000000000000 --- a/modules/logging-install-es-operator.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-deploying.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-install-es-operator_{context}"] -= Installing the OpenShift Elasticsearch Operator by using the web console - -The OpenShift Elasticsearch Operator creates and manages the Elasticsearch cluster used by OpenShift Logging. - -.Prerequisites - -* Elasticsearch is a memory-intensive application. Each Elasticsearch node needs at least 16GB of memory for both memory requests and limits, unless you specify otherwise in the `ClusterLogging` custom resource. -+ -The initial set of {product-title} nodes might not be large enough to support the Elasticsearch cluster. You must add additional nodes to the {product-title} cluster to run with the recommended or higher memory, up to a maximum of 64GB for each Elasticsearch node. -+ -Elasticsearch nodes can operate with a lower memory setting, though this is not recommended for production environments. - -* Ensure that you have the necessary persistent storage for Elasticsearch. Note that each Elasticsearch node -requires its own storage volume. -+ -[NOTE] -==== -If you use a local volume for persistent storage, do not use a raw block volume, which is described with `volumeMode: block` in the `LocalVolume` object. Elasticsearch cannot use raw block volumes. -==== - -.Procedure - -. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. -. Click *OpenShift Elasticsearch Operator* from the list of available Operators, and click *Install*. -. Ensure that the *All namespaces on the cluster* is selected under *Installation mode*. -. Ensure that *openshift-operators-redhat* is selected under *Installed Namespace*. -+ -You must specify the `openshift-operators-redhat` namespace. The `openshift-operators` namespace might contain Community Operators, which are untrusted and could publish a metric with the same name as {product-title} metric, which would cause conflicts. - -. Select *Enable operator recommended cluster monitoring on this namespace*. -+ -This option sets the `openshift.io/cluster-monitoring: "true"` label in the `Namespace` object. You must select this option to ensure that cluster monitoring scrapes the `openshift-operators-redhat` namespace. - -. Select *stable-5.x* as the *Update channel*. -. Select an *Update approval* strategy: -+ -* The *Automatic* strategy allows Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. -+ -* The *Manual* strategy requires a user with appropriate credentials to approve the Operator update. - -. Click *Install*. - -.Verification - -. Verify that the OpenShift Elasticsearch Operator installed by switching to the *Ecosystem* -> *Installed Operators* page. -. Ensure that *OpenShift Elasticsearch Operator* is listed in all projects with a *Status* of *Succeeded*. diff --git a/modules/logging-loki-cli-install.adoc b/modules/logging-loki-cli-install.adoc deleted file mode 100644 index 0d542e9e71e8..000000000000 --- a/modules/logging-loki-cli-install.adoc +++ /dev/null @@ -1,258 +0,0 @@ -// Module is included in the following assemblies: -// -// * observability/logging/log_storage/installing-log-storage.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-loki-cli-install_{context}"] -= Installing {logging-uc} and the {loki-op} using the CLI - -To install and configure logging on your {product-title} cluster, an Operator such as {loki-op} for log storage must be installed first. This can be done from the {product-title} CLI. - -.Prerequisites - -* You have administrator permissions. -* You installed the {oc-first}. -* You have access to a supported object store. For example: AWS S3, {gcp-full} Storage, Azure, Swift, Minio, or {rh-storage}. - -.Procedure - --- -include::snippets/logging-stable-updates-snip.adoc[leveloffset=+1] --- - -. Create a `Namespace` object for {loki-op}: -+ -.Example `Namespace` object -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-operators-redhat # <1> - annotations: - openshift.io/node-selector: "" - labels: - openshift.io/cluster-monitoring: "true" # <2> ----- -<1> You must specify the `openshift-operators-redhat` namespace. To prevent possible conflicts with metrics, you should configure the Prometheus Cluster Monitoring stack to scrape metrics from the `openshift-operators-redhat` namespace and not the `openshift-operators` namespace. The `openshift-operators` namespace might contain community Operators, which are untrusted and could publish a metric with the same name as an {product-title} metric, which would cause conflicts. -<2> A string value that specifies the label as shown to ensure that cluster monitoring scrapes the `openshift-operators-redhat` namespace. - -. Apply the `Namespace` object by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- - -. Create a `Subscription` object for {loki-op}: -+ -.Example `Subscription` object -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: loki-operator - namespace: openshift-operators-redhat # <1> -spec: - channel: stable # <2> - name: loki-operator - source: redhat-operators # <3> - sourceNamespace: openshift-marketplace ----- -<1> You must specify the `openshift-operators-redhat` namespace. -<2> Specify `stable`, or `stable-5.` as the channel. -<3> Specify `redhat-operators`. If your {product-title} cluster is installed on a restricted network, also known as a disconnected cluster, specify the name of the `CatalogSource` object you created when you configured the Operator Lifecycle Manager (OLM). - -. Apply the `Subscription` object by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- - -. Create a `namespace` object for the {clo}: -+ -.Example `namespace` object -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-logging # <1> - annotations: - openshift.io/node-selector: "" - labels: - openshift.io/cluster-logging: "true" - openshift.io/cluster-monitoring: "true" # <2> ----- -<1> The Red{nbsp}Hat OpenShift Logging Operator is only deployable to the `openshift-logging` namespace. -<2> A string value that specifies the label as shown to ensure that cluster monitoring scrapes the `openshift-operators-redhat` namespace. - -. Apply the `namespace` object by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- - -. Create an `OperatorGroup` object -+ -.Example `OperatorGroup` object -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: cluster-logging - namespace: openshift-logging # <1> -spec: - targetNamespaces: - - openshift-logging ----- -<1> You must specify the `openshift-logging` namespace. - -. Apply the `OperatorGroup` object by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- - -. Create a `Subscription` object: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: cluster-logging - namespace: openshift-logging # <1> -spec: - channel: stable # <2> - name: cluster-logging - source: redhat-operators # <3> - sourceNamespace: openshift-marketplace ----- -<1> You must specify the `openshift-logging` namespace. -<2> Specify `stable`, or `stable-5.` as the channel. -<3> Specify `redhat-operators`. If your {product-title} cluster is installed on a restricted network, also known as a disconnected cluster, specify the name of the CatalogSource object you created when you configured the Operator Lifecycle Manager (OLM). - -. Apply the `Subscription` object by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- - - -. Create a `LokiStack` CR: -+ -.Example `LokiStack` CR -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki # <1> - namespace: openshift-logging # <2> -spec: - size: 1x.small # <3> - storage: - schemas: - - version: v13 - effectiveDate: "--
" - secret: - name: logging-loki-s3 # <4> - type: s3 # <5> - credentialMode: # <6> - storageClassName: # <7> - tenants: - mode: openshift-logging # <8> ----- -<1> Use the name `logging-loki`. -<2> You must specify the `openshift-logging` namespace. -<3> Specify the deployment size. In the {logging} 5.8 and later versions, the supported size options for production instances of Loki are `1x.extra-small`, `1x.small`, or `1x.medium`. -<4> Specify the name of your log store secret. -<5> Specify the corresponding storage type. -<6> Optional field, logging 5.9 and later. Supported user configured values are as follows: `static` is the default authentication mode available for all supported object storage types using credentials stored in a Secret. `token` for short-lived tokens retrieved from a credential source. In this mode the static configuration does not contain credentials needed for the object storage. Instead, they are generated during runtime using a service, which allows for shorter-lived credentials and much more granular control. This authentication mode is not supported for all object storage types. `token-cco` is the default value when Loki is running on managed STS mode and using CCO on STS/WIF clusters. -<7> Specify the name of a storage class for temporary storage. For best performance, specify a storage class that allocates block storage. Available storage classes for your cluster can be listed by using the `oc get storageclasses` command. -<8> LokiStack defaults to running in multi-tenant mode, which cannot be modified. One tenant is provided for each log type: audit, infrastructure, and application logs. This enables access control for individual users and user groups to different log streams. - -. Apply the `LokiStack CR` object by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- - -. Create a `ClusterLogging` CR object: -+ -.Example ClusterLogging CR object -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - name: instance # <1> - namespace: openshift-logging # <2> -spec: - collection: - type: vector - logStore: - lokistack: - name: logging-loki - retentionPolicy: - application: - maxAge: 7d - audit: - maxAge: 7d - infra: - maxAge: 7d - type: lokistack - visualization: - type: ocp-console - ocpConsole: - logsLimit: 15 - managementState: Managed ----- -<1> Name must be `instance`. -<2> Namespace must be `openshift-logging`. - - -. Apply the `ClusterLogging CR` object by running the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- - -. Verify the installation by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-logging ----- -+ -.Example output -[source,terminal] ----- -$ oc get pods -n openshift-logging -NAME READY STATUS RESTARTS AGE -cluster-logging-operator-fb7f7cf69-8jsbq 1/1 Running 0 98m -collector-222js 2/2 Running 0 18m -collector-g9ddv 2/2 Running 0 18m -collector-hfqq8 2/2 Running 0 18m -collector-sphwg 2/2 Running 0 18m -collector-vv7zn 2/2 Running 0 18m -collector-wk5zz 2/2 Running 0 18m -logging-view-plugin-6f76fbb78f-n2n4n 1/1 Running 0 18m -lokistack-sample-compactor-0 1/1 Running 0 42m -lokistack-sample-distributor-7d7688bcb9-dvcj8 1/1 Running 0 42m -lokistack-sample-gateway-5f6c75f879-bl7k9 2/2 Running 0 42m -lokistack-sample-gateway-5f6c75f879-xhq98 2/2 Running 0 42m -lokistack-sample-index-gateway-0 1/1 Running 0 42m -lokistack-sample-ingester-0 1/1 Running 0 42m -lokistack-sample-querier-6b7b56bccc-2v9q4 1/1 Running 0 42m -lokistack-sample-query-frontend-84fb57c578-gq2f7 1/1 Running 0 42m ----- diff --git a/modules/logging-loki-gui-install.adoc b/modules/logging-loki-gui-install.adoc deleted file mode 100644 index d97edac8969d..000000000000 --- a/modules/logging-loki-gui-install.adoc +++ /dev/null @@ -1,172 +0,0 @@ -// Module is included in the following assemblies: -// -// * observability/logging/log_storage/installing-log-storage.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-loki-gui-install_{context}"] -= Installing {logging-uc} and the {loki-op} using the web console - -To install and configure logging on your {product-title} cluster, an Operator such as {loki-op} for log storage must be installed first. This can be done from the software catalog within the web console. - -.Prerequisites - -* You have access to a supported object store (AWS S3, {gcp-full} Storage, Azure, Swift, Minio, {rh-storage}). -* You have administrator permissions. -* You have access to the {product-title} web console. - -.Procedure - -. In the {product-title} web console *Administrator* perspective, go to *Ecosystem* -> *Software Catalog*. - -. Type {loki-op} in the *Filter by keyword* field. Click *{loki-op}* in the list of available Operators, and then click *Install*. -+ -[IMPORTANT] -==== -The Community {loki-op} is not supported by Red{nbsp}Hat. -==== - -. Select *stable* or *stable-x.y* as the *Update channel*. -+ --- -include::snippets/logging-stable-updates-snip.adoc[] --- -+ -The {loki-op} must be deployed to the global operator group namespace `openshift-operators-redhat`, so the *Installation mode* and *Installed Namespace* are already selected. If this namespace does not already exist, it is created for you. - -. Select *Enable Operator-recommended cluster monitoring on this namespace.* -+ -This option sets the `openshift.io/cluster-monitoring: "true"` label in the `Namespace` object. You must select this option to ensure that cluster monitoring scrapes the `openshift-operators-redhat` namespace. - -. For *Update approval* select *Automatic*, then click *Install*. -+ -If the approval strategy in the subscription is set to *Automatic*, the update process initiates as soon as a new Operator version is available in the selected channel. If the approval strategy is set to *Manual*, you must manually approve pending updates. - -. Install the Red{nbsp}Hat OpenShift Logging Operator: - -.. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. - -.. Choose *Red{nbsp}Hat OpenShift Logging* from the list of available Operators, and click *Install*. - -.. Ensure that the *A specific namespace on the cluster* is selected under *Installation Mode*. - -.. Ensure that *Operator recommended namespace* is *openshift-logging* under *Installed Namespace*. - -.. Select *Enable Operator recommended cluster monitoring on this namespace*. -+ -This option sets the `openshift.io/cluster-monitoring: "true"` label in the Namespace object. -You must select this option to ensure that cluster monitoring -scrapes the `openshift-logging` namespace. - -.. Select *stable-5.y* as the *Update Channel*. - -.. Select an *Approval Strategy*. -+ -* The *Automatic* strategy allows Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. -+ -* The *Manual* strategy requires a user with appropriate credentials to approve the Operator update. - -.. Click *Install*. - -. Go to the *Ecosystem* -> *Installed Operators* page. Click the *All instances* tab. - -. From the *Create new* drop-down list, select *LokiStack*. - -. Select *YAML view*, and then use the following template to create a `LokiStack` CR: -+ --- -.Example `LokiStack` CR -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki # <1> - namespace: openshift-logging # <2> -spec: - size: 1x.small # <3> - storage: - schemas: - - version: v13 - effectiveDate: "--
" - secret: - name: logging-loki-s3 # <4> - type: s3 # <5> - credentialMode: # <6> - storageClassName: # <7> - tenants: - mode: openshift-logging # <8> ----- -<1> Use the name `logging-loki`. -<2> You must specify the `openshift-logging` namespace. -<3> Specify the deployment size. In the {logging} 5.8 and later versions, the supported size options for production instances of Loki are `1x.extra-small`, `1x.small`, or `1x.medium`. -<4> Specify the name of your log store secret. -<5> Specify the corresponding storage type. -<6> Optional field, logging 5.9 and later. Supported user configured values are as follows: static is the default authentication mode available for all supported object storage types using credentials stored in a Secret. token for short-lived tokens retrieved from a credential source. In this mode the static configuration does not contain credentials needed for the object storage. Instead, they are generated during runtime using a service, which allows for shorter-lived credentials and much more granular control. This authentication mode is not supported for all object storage types. token-cco is the default value when Loki is running on managed STS mode and using CCO on STS/WIF clusters. -<7> Specify the name of a storage class for temporary storage. For best performance, specify a storage class that allocates block storage. Available storage classes for your cluster can be listed by using the `oc get storageclasses` command. -<8> LokiStack defaults to running in multi-tenant mode, which cannot be modified. One tenant is provided for each log type: audit, infrastructure, and application logs. This enables access control for individual users and user groups to different log streams. --- -+ -[IMPORTANT] -==== -It is not possible to change the number `1x` for the deployment size. -==== - -. Click *Create*. - -. Create an OpenShift Logging instance: - -.. Switch to the *Administration* -> *Custom Resource Definitions* page. - -.. On the *Custom Resource Definitions* page, click *ClusterLogging*. - -.. On the *Custom Resource Definition details* page, select *View Instances* from the *Actions* menu. - -.. On the *ClusterLoggings* page, click *Create ClusterLogging*. -+ -You might have to refresh the page to load the data. - -.. In the YAML field, replace the code with the following: -+ --- -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - name: instance # <1> - namespace: openshift-logging # <2> -spec: - collection: - type: vector - logStore: - lokistack: - name: logging-loki - retentionPolicy: - application: - maxAge: 7d - audit: - maxAge: 7d - infra: - maxAge: 7d - type: lokistack - visualization: - type: ocp-console - ocpConsole: - logsLimit: 15 - - managementState: Managed ----- -<1> Name must be `instance`. -<2> Namespace must be `openshift-logging`. --- - -.Verification - -. Go to *Ecosystem* -> *Installed Operators*. -. Make sure the *openshift-logging* project is selected. -. In the *Status* column, verify that you see green checkmarks with *InstallSucceeded* and the text *Up to date*. - -[NOTE] -==== -An Operator might display a `Failed` status before the installation finishes. If the Operator install completes with an `InstallSucceeded` message, refresh the page. -==== diff --git a/modules/logging-loki-memberlist-ip.adoc b/modules/logging-loki-memberlist-ip.adoc deleted file mode 100644 index 4b31df4bbf05..000000000000 --- a/modules/logging-loki-memberlist-ip.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-loki.adoc - -:_mod-docs-content-type: CONCEPT -[id="logging-loki-memberlist-ip_{context}"] -= Configuring Loki to tolerate memberlist creation failure - -In an OpenShift cluster, administrators generally use a non-private IP network range. As a result, the LokiStack memberlist configuration fails because, by default, it only uses private IP networks. - -As an administrator, you can select the pod network for the memberlist configuration. You can modify the LokiStack CR to use the `podIP` in the `hashRing` spec. To configure the LokiStack CR, use the following command: - -[source,terminal] ----- -$ oc patch LokiStack logging-loki -n openshift-logging --type=merge -p '{"spec": {"hashRing":{"memberlist":{"instanceAddrType":"podIP","type": "memberlist"}}}}' ----- - -.Example LokiStack to include `podIP` -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: -# ... - hashRing: - type: memberlist - memberlist: - instanceAddrType: podIP -# ... ----- - diff --git a/modules/logging-loki-pod-placement.adoc b/modules/logging-loki-pod-placement.adoc deleted file mode 100644 index 161e2caa3da8..000000000000 --- a/modules/logging-loki-pod-placement.adoc +++ /dev/null @@ -1,201 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_storage/cluster-logging-loki.adoc - -:_mod-docs-content-type: CONCEPT -[id="logging-loki-pod-placement_{context}"] -= Loki pod placement -You can control which nodes the Loki pods run on, and prevent other workloads from using those nodes, by using tolerations or node selectors on the pods. - -You can apply tolerations to the log store pods with the LokiStack custom resource (CR) and apply taints to a node with the node specification. A taint on a node is a `key:value` pair that instructs the node to repel all pods that do not allow the taint. Using a specific `key:value` pair that is not on other pods ensures that only the log store pods can run on that node. - -.Example LokiStack with node selectors -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: -# ... - template: - compactor: # <1> - nodeSelector: - node-role.kubernetes.io/infra: "" # <2> - distributor: - nodeSelector: - node-role.kubernetes.io/infra: "" - gateway: - nodeSelector: - node-role.kubernetes.io/infra: "" - indexGateway: - nodeSelector: - node-role.kubernetes.io/infra: "" - ingester: - nodeSelector: - node-role.kubernetes.io/infra: "" - querier: - nodeSelector: - node-role.kubernetes.io/infra: "" - queryFrontend: - nodeSelector: - node-role.kubernetes.io/infra: "" - ruler: - nodeSelector: - node-role.kubernetes.io/infra: "" -# ... ----- - -<1> Specifies the component pod type that applies to the node selector. -<2> Specifies the pods that are moved to nodes containing the defined label. - -In the previous example configuration, all Loki pods are moved to nodes containing the `node-role.kubernetes.io/infra: ""` label. - - -.Example LokiStack CR with node selectors and tolerations -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: -# ... - template: - compactor: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - distributor: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - indexGateway: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - ingester: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - querier: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - queryFrontend: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - ruler: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - gateway: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved -# ... ----- - -To configure the `nodeSelector` and `tolerations` fields of the LokiStack (CR), you can use the [command]`oc explain` command to view the description and fields for a particular resource: - -[source,terminal] ----- -$ oc explain lokistack.spec.template ----- - -.Example output -[source,text] ----- -KIND: LokiStack -VERSION: loki.grafana.com/v1 - -RESOURCE: template - -DESCRIPTION: - Template defines the resource/limits/tolerations/nodeselectors per - component - -FIELDS: - compactor - Compactor defines the compaction component spec. - - distributor - Distributor defines the distributor component spec. -... ----- - -For more detailed information, you can add a specific field: - -[source,terminal] ----- -$ oc explain lokistack.spec.template.compactor ----- - -.Example output -[source,text] ----- -KIND: LokiStack -VERSION: loki.grafana.com/v1 - -RESOURCE: compactor - -DESCRIPTION: - Compactor defines the compaction component spec. - -FIELDS: - nodeSelector - NodeSelector defines the labels required by a node to schedule the - component onto it. -... ----- diff --git a/modules/logging-loki-reliability-hardening.adoc b/modules/logging-loki-reliability-hardening.adoc deleted file mode 100644 index 92c5202dd5a8..000000000000 --- a/modules/logging-loki-reliability-hardening.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-loki.adoc - -:_mod-docs-content-type: CONCEPT -[id="logging-loki-reliability-hardening_{context}"] -= Configuring Loki to tolerate node failure - -In the {logging} 5.8 and later versions, the {loki-op} supports setting pod anti-affinity rules to request that pods of the same component are scheduled on different available nodes in the cluster. - -include::snippets/about-pod-affinity.adoc[] - -The Operator sets default, preferred `podAntiAffinity` rules for all Loki components, which includes the `compactor`, `distributor`, `gateway`, `indexGateway`, `ingester`, `querier`, `queryFrontend`, and `ruler` components. - -You can override the preferred `podAntiAffinity` settings for Loki components by configuring required settings in the `requiredDuringSchedulingIgnoredDuringExecution` field: - -.Example user settings for the ingester component -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: -# ... - template: - ingester: - podAntiAffinity: - # ... - requiredDuringSchedulingIgnoredDuringExecution: <1> - - labelSelector: - matchLabels: <2> - app.kubernetes.io/component: ingester - topologyKey: kubernetes.io/hostname -# ... ----- -<1> The stanza to define a required rule. -<2> The key-value pair (label) that must be matched to apply the rule. diff --git a/modules/logging-loki-restart-hardening.adoc b/modules/logging-loki-restart-hardening.adoc deleted file mode 100644 index 37d0eca9383e..000000000000 --- a/modules/logging-loki-restart-hardening.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-loki.adoc - -:_mod-docs-content-type: CONCEPT -[id="logging-loki-restart-hardening_{context}"] -= LokiStack behavior during cluster restarts - -In logging version 5.8 and newer versions, when an {product-title} cluster is restarted, LokiStack ingestion and the query path continue to operate within the available CPU and memory resources available for the node. This means that there is no downtime for the LokiStack during {product-title} cluster updates. This behavior is achieved by using `PodDisruptionBudget` resources. The {loki-op} provisions `PodDisruptionBudget` resources for Loki, which determine the minimum number of pods that must be available per component to ensure normal operations under certain conditions. diff --git a/modules/logging-loki-retention.adoc b/modules/logging-loki-retention.adoc deleted file mode 100644 index ee8d255164c4..000000000000 --- a/modules/logging-loki-retention.adoc +++ /dev/null @@ -1,112 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_storage/cluster-logging-loki.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-loki-retention_{context}"] -= Enabling stream-based retention with Loki - -With Logging version 5.6 and higher, you can configure retention policies based on log streams. Rules for these may be set globally, per tenant, or both. If you configure both, tenant rules apply before global rules. - -include::snippets/logging-retention-period-snip.adoc[] - -[NOTE] -==== -Although logging version 5.9 and higher supports schema v12, v13 is recommended. -==== - -. To enable stream-based retention, create a `LokiStack` CR: -+ -.Example global stream-based retention for AWS -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: - limits: - global: <1> - retention: <2> - days: 20 - streams: - - days: 4 - priority: 1 - selector: '{kubernetes_namespace_name=~"test.+"}' <3> - - days: 1 - priority: 1 - selector: '{log_type="infrastructure"}' - managementState: Managed - replicationFactor: 1 - size: 1x.small - storage: - schemas: - - effectiveDate: "2020-10-11" - version: v11 - secret: - name: logging-loki-s3 - type: aws - storageClassName: gp3-csi - tenants: - mode: openshift-logging ----- -<1> Sets retention policy for all log streams. *Note: This field does not impact the retention period for stored logs in object storage.* -<2> Retention is enabled in the cluster when this block is added to the CR. -<3> Contains the link:https://grafana.com/docs/loki/latest/logql/query_examples/#query-examples[LogQL query] used to define the log stream.spec: - limits: - -.Example per-tenant stream-based retention for AWS -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: - limits: - global: - retention: - days: 20 - tenants: <1> - application: - retention: - days: 1 - streams: - - days: 4 - selector: '{kubernetes_namespace_name=~"test.+"}' <2> - infrastructure: - retention: - days: 5 - streams: - - days: 1 - selector: '{kubernetes_namespace_name=~"openshift-cluster.+"}' - managementState: Managed - replicationFactor: 1 - size: 1x.small - storage: - schemas: - - effectiveDate: "2020-10-11" - version: v11 - secret: - name: logging-loki-s3 - type: aws - storageClassName: gp3-csi - tenants: - mode: openshift-logging ----- -<1> Sets retention policy by tenant. Valid tenant types are `application`, `audit`, and `infrastructure`. -<2> Contains the link:https://grafana.com/docs/loki/latest/logql/query_examples/#query-examples[LogQL query] used to define the log stream. - -2 Apply the `LokiStack` CR: - -[source,terminal] ----- -$ oc apply -f .yaml ----- - -[NOTE] -==== -This is not for managing the retention for stored logs. Global retention periods for stored logs to a supported maximum of 30 days is configured with your object storage. -==== diff --git a/modules/logging-loki-storage-aws.adoc b/modules/logging-loki-storage-aws.adoc deleted file mode 100644 index b31ad09623d2..000000000000 --- a/modules/logging-loki-storage-aws.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module is included in the following assemblies: -// -// * observability/logging/log_storage/installing-log-storage.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-loki-storage-aws_{context}"] -= AWS storage - -.Prerequisites - -* You installed the {loki-op}. -* You installed the {oc-first}. -* You created a link:https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-bucket-overview.html[bucket] on AWS. -* You created an link:https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_resource-based[AWS IAM Policy and IAM User]. - -.Procedure - -* Create an object storage secret with the name `logging-loki-aws` by running the following command: -+ -[source,terminal,subs="+quotes"] ----- -$ oc create secret generic logging-loki-aws \ - --from-literal=bucketnames="" \ - --from-literal=endpoint="" \ - --from-literal=access_key_id="" \ - --from-literal=access_key_secret="" \ - --from-literal=region="" ----- - -[id="AWS_storage_STS_{context}"] -== AWS storage for STS enabled clusters - -If your cluster has STS enabled, the Cloud Credential Operator (CCO) supports short-term authentication using AWS tokens. - -You can create the Loki object storage secret manually by running the following command: -[source,terminal,subs="+quotes"] ----- -$ oc -n openshift-logging create secret generic "logging-loki-aws" \ - --from-literal=bucketnames="" \ - --from-literal=region="" \ - --from-literal=audience="" <1> ----- -<1> Optional annotation, default value is `openshift`. diff --git a/modules/logging-loki-storage-azure.adoc b/modules/logging-loki-storage-azure.adoc deleted file mode 100644 index 4aee024a57fa..000000000000 --- a/modules/logging-loki-storage-azure.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module is included in the following assemblies: -// -// * observability/logging/log_storage/installing-log-storage.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-loki-storage-azure_{context}"] -= Azure storage - -.Prerequisites - -* You installed the {loki-op}. -* You installed the {oc-first}. -* You created a link:https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction[bucket] on Azure. - -.Procedure - -* Create an object storage secret with the name `logging-loki-azure` by running the following command: -+ -[source,terminal,subs="+quotes"] ----- -$ oc create secret generic logging-loki-azure \ - --from-literal=container="" \ - --from-literal=environment="" \ # <1> - --from-literal=account_name="" \ - --from-literal=account_key="" ----- -<1> Supported environment values are `AzureGlobal`, `AzureChinaCloud`, `AzureGermanCloud`, or `AzureUSGovernment`. - -[id="azure_storage_workload_id_{context}"] -== Azure storage for {entra-first} enabled clusters - -If your cluster has {entra-first} enabled, the Cloud Credential Operator (CCO) supports short-term authentication using {entra-short}. - -You can create the Loki object storage secret manually by running the following command: - -[source,terminal,subs="+quotes"] ----- -$ oc -n openshift-logging create secret generic logging-loki-azure \ ---from-literal=environment="" \ ---from-literal=account_name="" \ ---from-literal=container="" ----- diff --git a/modules/logging-loki-storage-gcp.adoc b/modules/logging-loki-storage-gcp.adoc deleted file mode 100644 index e3f41215905a..000000000000 --- a/modules/logging-loki-storage-gcp.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module is included in the following assemblies: -// -// * observability/logging/log_storage/installing-log-storage.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-loki-storage-gcp_{context}"] -= {gcp-full} storage - -.Prerequisites - -* You installed the {loki-op}. -* You installed the {oc-first}. -* You created a link:https://cloud.google.com/resource-manager/docs/creating-managing-projects[project] on {gcp-first}. -* You created a link:https://cloud.google.com/storage/docs/creating-buckets[bucket] in the same project. -* You created a link:https://cloud.google.com/docs/authentication/getting-started#creating_a_service_account[service account] in the same project for {gcp-short} authentication. - -.Procedure - -. Copy the service account credentials received from {gcp-short} into a file called `key.json`. - -. Create an object storage secret with the name `logging-loki-gcs` by running the following command: -+ -[source,terminal,subs="+quotes"] ----- -$ oc create secret generic logging-loki-gcs \ - --from-literal=bucketname="" \ - --from-file=key.json="" ----- diff --git a/modules/logging-loki-storage-minio.adoc b/modules/logging-loki-storage-minio.adoc deleted file mode 100644 index 8327dcf24a9c..000000000000 --- a/modules/logging-loki-storage-minio.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module is included in the following assemblies: -// -// * observability/logging/log_storage/installing-log-storage.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-loki-storage-minio_{context}"] -= Minio storage - -.Prerequisites - -* You installed the {loki-op}. -* You installed the {oc-first}. -* You have link:https://operator.min.io/[Minio] deployed on your cluster. -* You created a link:https://docs.min.io/docs/minio-client-complete-guide.html[bucket] on Minio. - -.Procedure - -* Create an object storage secret with the name `logging-loki-minio` by running the following command: -+ -[source,terminal,subs="+quotes"] ----- -$ oc create secret generic logging-loki-minio \ - --from-literal=bucketnames="" \ - --from-literal=endpoint="" \ - --from-literal=access_key_id="" \ - --from-literal=access_key_secret="" ----- diff --git a/modules/logging-loki-storage-odf.adoc b/modules/logging-loki-storage-odf.adoc deleted file mode 100644 index 5a4c13b1b564..000000000000 --- a/modules/logging-loki-storage-odf.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module is included in the following assemblies: -// logging/cluster-logging-loki.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-loki-storage-odf_{context}"] -= {rh-storage} storage - -.Prerequisites - -* You installed the {loki-op}. -* You installed the {oc-first}. -* You deployed link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/[{rh-storage}]. -* You configured your {rh-storage} cluster link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/latest/html/managing_and_allocating_storage_resources/adding-file-and-object-storage-to-an-existing-external-ocs-cluster[for object storage]. - -.Procedure - -. Create an `ObjectBucketClaim` custom resource in the `openshift-logging` namespace: -+ -[source,yaml] ----- -apiVersion: objectbucket.io/v1alpha1 -kind: ObjectBucketClaim -metadata: - name: loki-bucket-odf - namespace: openshift-logging -spec: - generateBucketName: loki-bucket-odf - storageClassName: openshift-storage.noobaa.io ----- - -. Get bucket properties from the associated `ConfigMap` object by running the following command: -+ -[source,terminal] ----- -BUCKET_HOST=$(oc get -n openshift-logging configmap loki-bucket-odf -o jsonpath='{.data.BUCKET_HOST}') -BUCKET_NAME=$(oc get -n openshift-logging configmap loki-bucket-odf -o jsonpath='{.data.BUCKET_NAME}') -BUCKET_PORT=$(oc get -n openshift-logging configmap loki-bucket-odf -o jsonpath='{.data.BUCKET_PORT}') ----- - -. Get bucket access key from the associated secret by running the following command: -+ -[source,terminal] ----- -ACCESS_KEY_ID=$(oc get -n openshift-logging secret loki-bucket-odf -o jsonpath='{.data.AWS_ACCESS_KEY_ID}' | base64 -d) -SECRET_ACCESS_KEY=$(oc get -n openshift-logging secret loki-bucket-odf -o jsonpath='{.data.AWS_SECRET_ACCESS_KEY}' | base64 -d) ----- - -. Create an object storage secret with the name `logging-loki-odf` by running the following command: -+ -[source,terminal,subs="+quotes"] ----- -$ oc create -n openshift-logging secret generic logging-loki-odf \ ---from-literal=access_key_id="" \ ---from-literal=access_key_secret="" \ ---from-literal=bucketnames="" \ ---from-literal=endpoint="https://:" ----- diff --git a/modules/logging-loki-storage-swift.adoc b/modules/logging-loki-storage-swift.adoc deleted file mode 100644 index c4566a5dab24..000000000000 --- a/modules/logging-loki-storage-swift.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module is included in the following assemblies: -// -// * observability/logging/log_storage/installing-log-storage.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-loki-storage-swift_{context}"] -= Swift storage - -.Prerequisites - -* You installed the {loki-op}. -* You installed the {oc-first}. -* You created a https://docs.openstack.org/newton/user-guide/cli-swift-create-containers.html[bucket] on Swift. - -.Procedure - -* Create an object storage secret with the name `logging-loki-swift` by running the following command: -+ -[source,terminal,subs="+quotes"] ----- -$ oc create secret generic logging-loki-swift \ - --from-literal=auth_url="" \ - --from-literal=username="" \ - --from-literal=user_domain_name="" \ - --from-literal=user_domain_id="" \ - --from-literal=user_id="" \ - --from-literal=password="" \ - --from-literal=domain_id="" \ - --from-literal=domain_name="" \ - --from-literal=container_name="" ----- - -* You can optionally provide project-specific data, region, or both by running the following command: -+ -[source,terminal,subs="+quotes"] ----- -$ oc create secret generic logging-loki-swift \ - --from-literal=auth_url="" \ - --from-literal=username="" \ - --from-literal=user_domain_name="" \ - --from-literal=user_domain_id="" \ - --from-literal=user_id="" \ - --from-literal=password="" \ - --from-literal=domain_id="" \ - --from-literal=domain_name="" \ - --from-literal=container_name="" \ - --from-literal=project_id="" \ - --from-literal=project_name="" \ - --from-literal=project_domain_id="" \ - --from-literal=project_domain_name="" \ - --from-literal=region="" ----- diff --git a/modules/logging-loki-storage.adoc b/modules/logging-loki-storage.adoc deleted file mode 100644 index 31a412c186c6..000000000000 --- a/modules/logging-loki-storage.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module is included in the following assemblies: -// -// * observability/logging/log_storage/installing-log-storage.adoc - -:_mod-docs-content-type: CONCEPT -[id="logging-loki-storage_{context}"] -= Loki object storage - -The {loki-op} supports link:https://aws.amazon.com/[AWS S3], as well as other S3 compatible object stores such as link:https://min.io/[Minio] and link:https://www.redhat.com/en/technologies/cloud-computing/openshift-data-foundation[{rh-storage}]. link:https://azure.microsoft.com[Azure], link:https://cloud.google.com/[GCS], and link:https://docs.openstack.org/swift/latest/[Swift] are also supported. - -The recommended nomenclature for Loki storage is `logging-loki-__`. - -The following table shows the `type` values within the `LokiStack` custom resource (CR) for each storage provider. For more information, see the section on your storage provider. - -[options="header"] -.Secret type quick reference -|=== -| Storage provider | Secret `type` value -| AWS | s3 -| Azure | azure -| {gcp-full} | gcs -| Minio | s3 -| OpenShift Data Foundation | s3 -| Swift | swift -|=== diff --git a/modules/logging-loki-zone-aware-rep.adoc b/modules/logging-loki-zone-aware-rep.adoc deleted file mode 100644 index be76d3519a95..000000000000 --- a/modules/logging-loki-zone-aware-rep.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-loki.adoc - -:_mod-docs-content-type: CONCEPT -[id="logging-loki-zone-aware-rep_{context}"] -= Zone aware data replication - -In the {logging} 5.8 and later versions, the {loki-op} offers support for zone-aware data replication through pod topology spread constraints. Enabling this feature enhances reliability and safeguards against log loss in the event of a single zone failure. When configuring the deployment size as `1x.extra.small`, `1x.small`, or `1x.medium,` the `replication.factor` field is automatically set to 2. - -To ensure proper replication, you need to have at least as many availability zones as the replication factor specifies. While it is possible to have more availability zones than the replication factor, having fewer zones can lead to write failures. Each zone should host an equal number of instances for optimal operation. - -.Example LokiStack CR with zone replication enabled -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: - replicationFactor: 2 # <1> - replication: - factor: 2 # <2> - zones: - - maxSkew: 1 # <3> - topologyKey: topology.kubernetes.io/zone # <4> ----- -<1> Deprecated field, values entered are overwritten by `replication.factor`. -<2> This value is automatically set when deployment size is selected at setup. -<3> The maximum difference in number of pods between any two topology domains. The default is 1, and you cannot specify a value of 0. -<4> Defines zones in the form of a topology key that corresponds to a node label. diff --git a/modules/logging-loki-zone-fail-recovery.adoc b/modules/logging-loki-zone-fail-recovery.adoc deleted file mode 100644 index 7befcf756b42..000000000000 --- a/modules/logging-loki-zone-fail-recovery.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-loki.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-loki-zone-fail-recovery_{context}"] -= Recovering Loki pods from failed zones - -In {product-title} a zone failure happens when specific availability zone resources become inaccessible. Availability zones are isolated areas within a cloud provider's data center, aimed at enhancing redundancy and fault tolerance. If your {product-title} cluster is not configured to handle this, a zone failure can lead to service or data loss. - -Loki pods are part of a link:https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/[StatefulSet], and they come with Persistent Volume Claims (PVCs) provisioned by a `StorageClass` object. Each Loki pod and its PVCs reside in the same zone. When a zone failure occurs in a cluster, the StatefulSet controller automatically attempts to recover the affected pods in the failed zone. - -[WARNING] -==== -The following procedure will delete the PVCs in the failed zone, and all data contained therein. To avoid complete data loss the replication factor field of the `LokiStack` CR should always be set to a value greater than 1 to ensure that Loki is replicating. -==== - -.Prerequisites -* Logging version 5.8 or later. -* Verify your `LokiStack` CR has a replication factor greater than 1. -* Zone failure detected by the control plane, and nodes in the failed zone are marked by cloud provider integration. - -The StatefulSet controller automatically attempts to reschedule pods in a failed zone. Because the associated PVCs are also in the failed zone, automatic rescheduling to a different zone does not work. You must manually delete the PVCs in the failed zone to allow successful re-creation of the stateful Loki Pod and its provisioned PVC in the new zone. - - -.Procedure -. List the pods in `Pending` status by running the following command: -+ -[source,terminal] ----- -oc get pods --field-selector status.phase==Pending -n openshift-logging ----- -+ -.Example `oc get pods` output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE # <1> -logging-loki-index-gateway-1 0/1 Pending 0 17m -logging-loki-ingester-1 0/1 Pending 0 16m -logging-loki-ruler-1 0/1 Pending 0 16m ----- -<1> These pods are in `Pending` status because their corresponding PVCs are in the failed zone. - -. List the PVCs in `Pending` status by running the following command: -+ -[source,terminal] ----- -oc get pvc -o=json -n openshift-logging | jq '.items[] | select(.status.phase == "Pending") | .metadata.name' -r ----- -+ -.Example `oc get pvc` output -[source,terminal] ----- -storage-logging-loki-index-gateway-1 -storage-logging-loki-ingester-1 -wal-logging-loki-ingester-1 -storage-logging-loki-ruler-1 -wal-logging-loki-ruler-1 ----- - -. Delete the PVC(s) for a pod by running the following command: -+ -[source,terminal] ----- -oc delete pvc ____ -n openshift-logging ----- -+ -. Then delete the pod(s) by running the following command: -+ -[source,terminal] ----- -oc delete pod ____ -n openshift-logging ----- - -Once these objects have been successfully deleted, they should automatically be rescheduled in an available zone. - -[id="logging-loki-zone-fail-term-state_{context}"] -== Troubleshooting PVC in a terminating state - -The PVCs might hang in the terminating state without being deleted, if PVC metadata finalizers are set to `kubernetes.io/pv-protection`. Removing the finalizers should allow the PVCs to delete successfully. - -. Remove the finalizer for each PVC by running the command below, then retry deletion. -+ -[source,terminal] ----- -oc patch pvc ____ -p '{"metadata":{"finalizers":null}}' -n openshift-logging ----- diff --git a/modules/logging-multiline-except.adoc b/modules/logging-multiline-except.adoc deleted file mode 100644 index 3717db92dbbc..000000000000 --- a/modules/logging-multiline-except.adoc +++ /dev/null @@ -1,88 +0,0 @@ -// Module included in the following assemblies: -// -:_mod-docs-content-type: PROCEDURE -[id="logging-multiline-except_{context}"] -= Enabling multi-line exception detection - -Enables multi-line error detection of container logs. - -[WARNING] -==== -Enabling this feature could have performance implications and may require additional computing resources or alternate logging solutions. -==== - -Log parsers often incorrectly identify separate lines of the same exception as separate exceptions. This leads to extra log entries and an incomplete or inaccurate view of the traced information. - -.Example java exception -[,text] ----- -java.lang.NullPointerException: Cannot invoke "String.toString()" because "" is null - at testjava.Main.handle(Main.java:47) - at testjava.Main.printMe(Main.java:19) - at testjava.Main.main(Main.java:10) ----- - -* To enable logging to detect multi-line exceptions and reassemble them into a single log entry, ensure that the `ClusterLogForwarder` Custom Resource (CR) contains a `detectMultilineErrors` field, with a value of `true`. - - -.Example ClusterLogForwarder CR -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - pipelines: - - name: my-app-logs - inputRefs: - - application - outputRefs: - - default - detectMultilineErrors: true ----- - -== Details -When log messages appear as a consecutive sequence forming an exception stack trace, they are combined into a single, unified log record. The first log message's content is replaced with the concatenated content of all the message fields in the sequence. - -.Supported languages per collector -|=== -|Language | Fluentd | Vector - -|Java | ✓ | ✓ -|JS | ✓ | ✓ -|Ruby | ✓ | ✓ -|Python | ✓ | ✓ -|Golang | ✓ | ✓ -|PHP | ✓ | ✓ -|Dart | ✓ | ✓ -|=== - -== Troubleshooting -When enabled, the collector configuration will include a new section with type: `detect_exceptions` - -.Example vector configuration section ----- -[transforms.detect_exceptions_app-logs] - type = "detect_exceptions" - inputs = ["application"] - languages = ["All"] - group_by = ["kubernetes.namespace_name","kubernetes.pod_name","kubernetes.container_name"] - expire_after_ms = 2000 - multiline_flush_interval_ms = 1000 ----- - -.Example fluentd config section ----- - - ----- diff --git a/modules/logging-plugin-es-loki.adoc b/modules/logging-plugin-es-loki.adoc deleted file mode 100644 index 0b961149b0ec..000000000000 --- a/modules/logging-plugin-es-loki.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_visualization/log-visualization-ocp-console.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-plugin-es-loki_{context}"] -= Configuring the {log-plug} when you have the Elasticsearch log store and LokiStack installed - -In {logging} version 5.8 and later, if the Elasticsearch log store is your default log store but you have also installed the LokiStack, you can enable the {log-plug} by using the following procedure. - -.Prerequisites - -* You have administrator permissions. -* You have installed the {clo}, the {es-op}, and the {loki-op}. -* You have installed the {oc-first}. -* You have created a `ClusterLogging` custom resource (CR). - -.Procedure - -. Ensure that the {log-plug} is enabled by running the following command: -+ -[source,terminal] ----- -$ oc get consoles.operator.openshift.io cluster -o yaml |grep logging-view-plugin \ -|| oc patch consoles.operator.openshift.io cluster --type=merge \ ---patch '{ "spec": { "plugins": ["logging-view-plugin"]}}' ----- - -. Add the `.metadata.annotations.logging.openshift.io/ocp-console-migration-target: lokistack-dev` annotation to the `ClusterLogging` CR, by running the following command: -+ -[source,terminal] ----- -$ oc patch clusterlogging instance --type=merge --patch \ -'{ "metadata": { "annotations": { "logging.openshift.io/ocp-console-migration-target": "lokistack-dev" }}}' \ --n openshift-logging ----- -+ -.Example output -[source,terminal] ----- -clusterlogging.logging.openshift.io/instance patched ----- - -.Verification - -* Verify that the annotation was added successfully, by running the following command and observing the output: -+ -[source,terminal] ----- -$ oc get clusterlogging instance \ --o=jsonpath='{.metadata.annotations.logging\.openshift\.io/ocp-console-migration-target}' \ --n openshift-logging ----- -+ -.Example output -[source,terminal] ----- -"lokistack-dev" ----- - -The {log-plug} pod is now deployed. You can view logging data by navigating to the {product-title} web console and viewing the *Observe* -> *Logs* page. diff --git a/modules/logging-release-notes-5-7-8.adoc b/modules/logging-release-notes-5-7-8.adoc deleted file mode 100644 index d1f23123b2d5..000000000000 --- a/modules/logging-release-notes-5-7-8.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// cluster-logging-release-notes.adoc -// logging-5-7-release-notes.adoc - -:_mod-docs-content-type: REFERENCE -[id="logging-release-notes-5-7-8_{context}"] -= Logging 5.7.8 -This release includes link:https://access.redhat.com/errata/RHBA-2023:6730[OpenShift Logging Bug Fix Release 5.7.8]. - -[id="logging-release-notes-5-7-8-bug-fixes"] -== Bug fixes -* Before this update, there was a potential conflict when the same name was used for the `outputRefs` and `inputRefs` parameters in the `ClusterLogForwarder` custom resource (CR). As a result, the collector pods entered in a `CrashLoopBackOff` status. With this update, the output labels contain the `OUTPUT_` prefix to ensure a distinction between output labels and pipeline names. (link:https://issues.redhat.com/browse/LOG-4383[LOG-4383]) - -* Before this update, while configuring the JSON log parser, if you did not set the `structuredTypeKey` or `structuredTypeName` parameters for the Cluster Logging Operator, no alert would display about an invalid configuration. With this update, the Cluster Logging Operator informs you about the configuration issue. (link:https://issues.redhat.com/browse/LOG-4441[LOG-4441]) - -* Before this update, if the `hecToken` key was missing or incorrect in the secret specified for a Splunk output, the validation failed because the Vector forwarded logs to Splunk without a token. With this update, if the `hecToken` key is missing or incorrect, the validation fails with the `A non-empty hecToken entry is required` error message. (link:https://issues.redhat.com/browse/LOG-4580[LOG-4580]) - -* Before this update, selecting a date from the `Custom time range` for logs caused an error in the web console. With this update, you can select a date from the time range model in the web console successfully. (link:https://issues.redhat.com/browse/LOG-4684[LOG-4684]) - -[id="logging-release-notes-5-7-8-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2023-40217[CVE-2023-40217] -* link:https://access.redhat.com/security/cve/CVE-2023-44487[CVE-2023-44487] \ No newline at end of file diff --git a/modules/logging-release-notes-5-8-0.adoc b/modules/logging-release-notes-5-8-0.adoc deleted file mode 100644 index 8797b2d7ded9..000000000000 --- a/modules/logging-release-notes-5-8-0.adoc +++ /dev/null @@ -1,76 +0,0 @@ -//module included in logging-5-8-release-notes.adoc -:_mod-docs-content-type: REFERENCE -[id="logging-release-notes-5-8-0_{context}"] -= Logging 5.8.0 - -This release includes link:https://access.redhat.com/errata/RHBA-2023:6139[OpenShift Logging Bug Fix Release 5.8.0] and link:https://access.redhat.com/errata/RHBA-2023:6134[OpenShift Logging Bug Fix Release 5.8.0 Kibana]. - -[id="logging-release-notes-5-8-0-deprecation-notice"] -== Deprecation notice - -In Logging 5.8, Elasticsearch, Fluentd, and Kibana are deprecated and are planned to be removed in Logging 6.0, which is expected to be shipped alongside a future release of {product-title}. Red Hat will provide critical and above CVE bug fixes and support for these components during the current release lifecycle, but these components will no longer receive feature enhancements. The Vector-based collector provided by the {clo} and LokiStack provided by the {loki-op} are the preferred Operators for log collection and storage. We encourage all users to adopt the Vector and Loki log stack, as this will be the stack that will be enhanced going forward. - -[id="logging-release-notes-5-8-0-enhancements"] -== Enhancements - -[id="logging-release-notes-5-8-0-log-collection"] -=== Log Collection - -* With this update, the LogFileMetricExporter is no longer deployed with the collector by default. You must manually create a `LogFileMetricExporter` custom resource (CR) to generate metrics from the logs produced by running containers. If you do not create the `LogFileMetricExporter` CR, you may see a *No datapoints found* message in the {product-title} web console dashboard for *Produced Logs*. (link:https://issues.redhat.com/browse/LOG-3819[LOG-3819]) - -* With this update, you can deploy multiple, isolated, and RBAC-protected `ClusterLogForwarder` custom resource (CR) instances in any namespace. This allows independent groups to forward desired logs to any destination while isolating their configuration from other collector deployments. (link:https://issues.redhat.com/browse/LOG-1343[LOG-1343]) -+ -[IMPORTANT] -==== -In order to support multi-cluster log forwarding in additional namespaces other than the `openshift-logging` namespace, you must update the {clo} to watch all namespaces. This functionality is supported by default in new {clo} version 5.8 installations. -==== - -* With this update, you can use the flow control or rate limiting mechanism to limit the volume of log data that can be collected or forwarded by dropping excess log records. The input limits prevent poorly-performing containers from overloading the {logging-uc} and the output limits put a ceiling on the rate of logs shipped to a given data store. (link:https://issues.redhat.com/browse/LOG-884[LOG-884]) - -* With this update, you can configure the log collector to look for HTTP connections and receive logs as an HTTP server, also known as a webhook. (link:https://issues.redhat.com/browse/LOG-4562[LOG-4562]) - -* With this update, you can configure audit policies to control which Kubernetes and OpenShift API server events are forwarded by the log collector. (link:https://issues.redhat.com/browse/LOG-3982[LOG-3982]) - -[id="logging-release-notes-5-8-0-log-storage"] -=== Log Storage - -* With this update, LokiStack administrators can have more fine-grained control over who can access which logs by granting access to logs on a namespace basis. (link:https://issues.redhat.com/browse/LOG-3841[LOG-3841]) - -* With this update, the {loki-op} introduces `PodDisruptionBudget` configuration on LokiStack deployments to ensure normal operations during {product-title} cluster restarts by keeping ingestion and the query path available. (link:https://issues.redhat.com/browse/LOG-3839[LOG-3839]) - -* With this update, the reliability of existing LokiStack installations are seamlessly improved by applying a set of default Affinity and Anti-Affinity policies. -(link:https://issues.redhat.com/browse/LOG-3840[LOG-3840]) - -* With this update, you can manage zone-aware data replication as an administrator in LokiStack, in order to enhance reliability in the event of a zone failure. (link:https://issues.redhat.com/browse/LOG-3266[LOG-3266]) - -* With this update, a new supported small-scale LokiStack size of 1x.extra-small is introduced for {product-title} clusters hosting a few workloads and smaller ingestion volumes (up to 100GB/day). (link:https://issues.redhat.com/browse/LOG-4329[LOG-4329]) - -* With this update, the LokiStack administrator has access to an official Loki dashboard to inspect the storage performance and the health of each component. (link:https://issues.redhat.com/browse/LOG-4327[LOG-4327]) - -[id="logging-release-notes-5-8-0-log-console"] -=== Log Console - -* With this update, you can enable the Logging Console Plugin when Elasticsearch is the default Log Store. (link:https://issues.redhat.com/browse/LOG-3856[LOG-3856]) - -* With this update, {product-title} application owners can receive notifications for application log-based alerts on the {product-title} web console *Developer* perspective for {product-title} version 4.14 and later. (link:https://issues.redhat.com/browse/LOG-3548[LOG-3548]) - -[id="logging-release-notes-5-8-0-known-issues"] -== Known Issues - -* Currently, Splunk log forwarding might not work after upgrading to version 5.8 of the {clo}. This issue is caused by transitioning from OpenSSL version 1.1.1 to version 3.0.7. In the newer OpenSSL version, there is a default behavior change, where connections to TLS 1.2 endpoints are rejected if they do not expose the link:https://datatracker.ietf.org/doc/html/rfc5746[RFC 5746] extension. -+ -As a workaround, enable TLS 1.3 support on the TLS terminating load balancer in front of the Splunk HEC (HTTP Event Collector) endpoint. Splunk is a third-party system and this should be configured from the Splunk end. - -* Currently, there is a flaw in handling multiplexed streams in the HTTP/2 protocol, where you can repeatedly make a request for a new multiplex stream and immediately send an `RST_STREAM` frame to cancel it. This created extra work for the server set up and tore down the streams, resulting in a denial of service due to server resource consumption. There is currently no workaround for this issue. (link:https://issues.redhat.com/browse/LOG-4609[LOG-4609]) - -* Currently, when using FluentD as the collector, the collector pod cannot start on the {product-title} IPv6-enabled cluster. The pod logs produce the `fluentd pod [error]: unexpected error error_class=SocketError error="getaddrinfo: Name or service not known` error. There is currently no workaround for this issue. (link:https://issues.redhat.com/browse/LOG-4706[LOG-4706]) - -* Currently, the log alert is not available on an IPv6-enabled cluster. There is currently no workaround for this issue. (link:https://issues.redhat.com/browse/LOG-4709[LOG-4709]) - -* Currently, `must-gather` cannot gather any logs on a FIPS-enabled cluster, because the required OpenSSL library is not available in the `cluster-logging-rhel9-operator`. There is currently no workaround for this issue. (link:https://issues.redhat.com/browse/LOG-4403[LOG-4403]) - -* Currently, when deploying the {logging} version 5.8 on a FIPS-enabled cluster, the collector pods cannot start and are stuck in `CrashLoopBackOff` status, while using FluentD as a collector. There is currently no workaround for this issue. (link:https://issues.redhat.com/browse/LOG-3933[LOG-3933]) - -[id="logging-release-notes-5-8-0-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2023-40217[CVE-2023-40217] diff --git a/modules/logging-release-notes-5-8-1.adoc b/modules/logging-release-notes-5-8-1.adoc deleted file mode 100644 index e04a3580f5ee..000000000000 --- a/modules/logging-release-notes-5-8-1.adoc +++ /dev/null @@ -1,127 +0,0 @@ -//module included in logging-5-8-release-notes.adoc -:_mod-docs-content-type: REFERENCE -[id="logging-release-notes-5-8-1_{context}"] -= Logging 5.8.1 - -This release includes link:https://access.redhat.com/errata/RHSA-2023:7720[OpenShift Logging Bug Fix Release 5.8.1] and link:https://access.redhat.com/errata/RHBA-2023:7717[OpenShift Logging Bug Fix Release 5.8.1 Kibana]. - -[id="logging-release-notes-5-8-1-enhancements"] -== Enhancements - -[id="logging-release-notes-5-8-1-log-collection"] -=== Log Collection - -* With this update, while configuring Vector as a collector, you can add logic to the {clo} to use a token specified in the secret in place of the token associated with the service account. (link:https://issues.redhat.com/browse/LOG-4780[LOG-4780]) - -* With this update, the *BoltDB Shipper* Loki dashboards are now renamed to *Index* dashboards. (link:https://issues.redhat.com/browse/LOG-4828[LOG-4828]) - -[id="logging-release-notes-5-8-1-bug-fixes"] -== Bug fixes - -* Before this update, the `ClusterLogForwarder` created empty indices after enabling the parsing of JSON logs, even when the rollover conditions were not met. With this update, the `ClusterLogForwarder` skips the rollover when the `write-index` is empty. (link:https://issues.redhat.com/browse/LOG-4452[LOG-4452]) - -* Before this update, the Vector set the `default` log level incorrectly. With this update, the correct log level is set by improving the enhancement of regular expression, or `regexp`, for log level detection. (link:https://issues.redhat.com/browse/LOG-4480[LOG-4480]) - -* Before this update, during the process of creating index patterns, the default alias was missing from the initial index in each log output. As a result, Kibana users were unable to create index patterns by using {es-op}. This update adds the missing aliases to {es-op}, resolving the issue. Kibana users can now create index patterns that include the `{app,infra,audit}-000001` indexes. (link:https://issues.redhat.com/browse/LOG-4683[LOG-4683]) - -* Before this update, Fluentd collector pods were in a `CrashLoopBackOff` state due to binding of the Prometheus server on IPv6 clusters. With this update, the collectors work properly on IPv6 clusters. (link:https://issues.redhat.com/browse/LOG-4706[LOG-4706]) - -* Before this update, the {clo} would undergo numerous reconciliations whenever there was a change in the `ClusterLogForwarder`. With this update, the {clo} disregards the status changes in the collector daemonsets that triggered the reconciliations. (link:https://issues.redhat.com/browse/LOG-4741[LOG-4741]) - -* Before this update, the Vector log collector pods were stuck in the `CrashLoopBackOff` state on {ibm-power-title} machines. With this update, the Vector log collector pods start successfully on {ibm-power-title} architecture machines. (link:https://issues.redhat.com/browse/LOG-4768[LOG-4768]) - -* Before this update, forwarding with a legacy forwarder to an internal LokiStack would produce SSL certificate errors using Fluentd collector pods. With this update, the log collector service account is used by default for authentication, using the associated token and `ca.crt`. (link:https://issues.redhat.com/browse/LOG-4791[LOG-4791]) - -* Before this update, forwarding with a legacy forwarder to an internal LokiStack would produce SSL certificate errors using Vector collector pods. With this update, the log collector service account is used by default for authentication and also using the associated token and `ca.crt`. (link:https://issues.redhat.com/browse/LOG-4852[LOG-4852]) - -* Before this fix, IPv6 addresses would not be parsed correctly after evaluating a host or multiple hosts for placeholders. With this update, IPv6 addresses are correctly parsed. (link:https://issues.redhat.com/browse/LOG-4811[LOG-4811]) - -* Before this update, it was necessary to create a `ClusterRoleBinding` to collect audit permissions for HTTP receiver inputs. With this update, it is not necessary to create the `ClusterRoleBinding` because the endpoint already depends upon the cluster certificate authority. (link:https://issues.redhat.com/browse/LOG-4815[LOG-4815]) - -* Before this update, the {loki-op} did not mount a custom CA bundle to the ruler pods. As a result, during the process to evaluate alerting or recording rules, object storage access failed. With this update, the {loki-op} mounts the custom CA bundle to all ruler pods. The ruler pods can download logs from object storage to evaluate alerting or recording rules. (link:https://issues.redhat.com/browse/LOG-4836[LOG-4836]) - -* Before this update, while removing the `inputs.receiver` section in the `ClusterLogForwarder`, the HTTP input services and its associated secrets were not deleted. With this update, the HTTP input resources are deleted when not needed. (link:https://issues.redhat.com/browse/LOG-4612[LOG-4612]) - -* Before this update, the `ClusterLogForwarder` indicated validation errors in the status, but the outputs and the pipeline status did not accurately reflect the specific issues. With this update, the pipeline status displays the validation failure reasons correctly in case of misconfigured outputs, inputs, or filters. (link:https://issues.redhat.com/browse/LOG-4821[LOG-4821]) - -* Before this update, changing a `LogQL` query that used controls such as time range or severity changed the label matcher operator defining it like a regular expression. With this update, regular expression operators remain unchanged when updating the query. (link:https://issues.redhat.com/browse/LOG-4841[LOG-4841]) - -[id="logging-release-notes-5-8-1-CVEs"] -== CVEs - -* link:https://access.redhat.com/security/cve/CVE-2007-4559[CVE-2007-4559] -* link:https://access.redhat.com/security/cve/CVE-2021-3468[CVE-2021-3468] -* link:https://access.redhat.com/security/cve/CVE-2021-3502[CVE-2021-3502] -* link:https://access.redhat.com/security/cve/CVE-2021-3826[CVE-2021-3826] -* link:https://access.redhat.com/security/cve/CVE-2021-43618[CVE-2021-43618] -* link:https://access.redhat.com/security/cve/CVE-2022-3523[CVE-2022-3523] -* link:https://access.redhat.com/security/cve/CVE-2022-3565[CVE-2022-3565] -* link:https://access.redhat.com/security/cve/CVE-2022-3594[CVE-2022-3594] -* link:https://access.redhat.com/security/cve/CVE-2022-4285[CVE-2022-4285] -* link:https://access.redhat.com/security/cve/CVE-2022-38457[CVE-2022-38457] -* link:https://access.redhat.com/security/cve/CVE-2022-40133[CVE-2022-40133] -* link:https://access.redhat.com/security/cve/CVE-2022-40982[CVE-2022-40982] -* link:https://access.redhat.com/security/cve/CVE-2022-41862[CVE-2022-41862] -* link:https://access.redhat.com/security/cve/CVE-2022-42895[CVE-2022-42895] -* link:https://access.redhat.com/security/cve/CVE-2023-0597[CVE-2023-0597] -* link:https://access.redhat.com/security/cve/CVE-2023-1073[CVE-2023-1073] -* link:https://access.redhat.com/security/cve/CVE-2023-1074[CVE-2023-1074] -* link:https://access.redhat.com/security/cve/CVE-2023-1075[CVE-2023-1075] -* link:https://access.redhat.com/security/cve/CVE-2023-1076[CVE-2023-1076] -* link:https://access.redhat.com/security/cve/CVE-2023-1079[CVE-2023-1079] -* link:https://access.redhat.com/security/cve/CVE-2023-1206[CVE-2023-1206] -* link:https://access.redhat.com/security/cve/CVE-2023-1249[CVE-2023-1249] -* link:https://access.redhat.com/security/cve/CVE-2023-1252[CVE-2023-1252] -* link:https://access.redhat.com/security/cve/CVE-2023-1652[CVE-2023-1652] -* link:https://access.redhat.com/security/cve/CVE-2023-1855[CVE-2023-1855] -* link:https://access.redhat.com/security/cve/CVE-2023-1981[CVE-2023-1981] -* link:https://access.redhat.com/security/cve/CVE-2023-1989[CVE-2023-1989] -* link:https://access.redhat.com/security/cve/CVE-2023-2731[CVE-2023-2731] -* link:https://access.redhat.com/security/cve/CVE-2023-3138[CVE-2023-3138] -* link:https://access.redhat.com/security/cve/CVE-2023-3141[CVE-2023-3141] -* link:https://access.redhat.com/security/cve/CVE-2023-3161[CVE-2023-3161] -* link:https://access.redhat.com/security/cve/CVE-2023-3212[CVE-2023-3212] -* link:https://access.redhat.com/security/cve/CVE-2023-3268[CVE-2023-3268] -* link:https://access.redhat.com/security/cve/CVE-2023-3316[CVE-2023-3316] -* link:https://access.redhat.com/security/cve/CVE-2023-3358[CVE-2023-3358] -* link:https://access.redhat.com/security/cve/CVE-2023-3576[CVE-2023-3576] -* link:https://access.redhat.com/security/cve/CVE-2023-3609[CVE-2023-3609] -* link:https://access.redhat.com/security/cve/CVE-2023-3772[CVE-2023-3772] -* link:https://access.redhat.com/security/cve/CVE-2023-3773[CVE-2023-3773] -* link:https://access.redhat.com/security/cve/CVE-2023-4016[CVE-2023-4016] -* link:https://access.redhat.com/security/cve/CVE-2023-4128[CVE-2023-4128] -* link:https://access.redhat.com/security/cve/CVE-2023-4155[CVE-2023-4155] -* link:https://access.redhat.com/security/cve/CVE-2023-4194[CVE-2023-4194] -* link:https://access.redhat.com/security/cve/CVE-2023-4206[CVE-2023-4206] -* link:https://access.redhat.com/security/cve/CVE-2023-4207[CVE-2023-4207] -* link:https://access.redhat.com/security/cve/CVE-2023-4208[CVE-2023-4208] -* link:https://access.redhat.com/security/cve/CVE-2023-4273[CVE-2023-4273] -* link:https://access.redhat.com/security/cve/CVE-2023-4641[CVE-2023-4641] -* link:https://access.redhat.com/security/cve/CVE-2023-22745[CVE-2023-22745] -* link:https://access.redhat.com/security/cve/CVE-2023-26545[CVE-2023-26545] -* link:https://access.redhat.com/security/cve/CVE-2023-26965[CVE-2023-26965] -* link:https://access.redhat.com/security/cve/CVE-2023-26966[CVE-2023-26966] -* link:https://access.redhat.com/security/cve/CVE-2023-27522[CVE-2023-27522] -* link:https://access.redhat.com/security/cve/CVE-2023-29491[CVE-2023-29491] -* link:https://access.redhat.com/security/cve/CVE-2023-29499[CVE-2023-29499] -* link:https://access.redhat.com/security/cve/CVE-2023-30456[CVE-2023-30456] -* link:https://access.redhat.com/security/cve/CVE-2023-31486[CVE-2023-31486] -* link:https://access.redhat.com/security/cve/CVE-2023-32324[CVE-2023-32324] -* link:https://access.redhat.com/security/cve/CVE-2023-32573[CVE-2023-32573] -* link:https://access.redhat.com/security/cve/CVE-2023-32611[CVE-2023-32611] -* link:https://access.redhat.com/security/cve/CVE-2023-32665[CVE-2023-32665] -* link:https://access.redhat.com/security/cve/CVE-2023-33203[CVE-2023-33203] -* link:https://access.redhat.com/security/cve/CVE-2023-33285[CVE-2023-33285] -* link:https://access.redhat.com/security/cve/CVE-2023-33951[CVE-2023-33951] -* link:https://access.redhat.com/security/cve/CVE-2023-33952[CVE-2023-33952] -* link:https://access.redhat.com/security/cve/CVE-2023-34241[CVE-2023-34241] -* link:https://access.redhat.com/security/cve/CVE-2023-34410[CVE-2023-34410] -* link:https://access.redhat.com/security/cve/CVE-2023-35825[CVE-2023-35825] -* link:https://access.redhat.com/security/cve/CVE-2023-36054[CVE-2023-36054] -* link:https://access.redhat.com/security/cve/CVE-2023-37369[CVE-2023-37369] -* link:https://access.redhat.com/security/cve/CVE-2023-38197[CVE-2023-38197] -* link:https://access.redhat.com/security/cve/CVE-2023-38545[CVE-2023-38545] -* link:https://access.redhat.com/security/cve/CVE-2023-38546[CVE-2023-38546] -* link:https://access.redhat.com/security/cve/CVE-2023-39191[CVE-2023-39191] -* link:https://access.redhat.com/security/cve/CVE-2023-39975[CVE-2023-39975] -* link:https://access.redhat.com/security/cve/CVE-2023-44487[CVE-2023-44487] diff --git a/modules/logging-release-notes-5-8-2.adoc b/modules/logging-release-notes-5-8-2.adoc deleted file mode 100644 index f7225b7fa29d..000000000000 --- a/modules/logging-release-notes-5-8-2.adoc +++ /dev/null @@ -1,28 +0,0 @@ -//module included in logging-5-8-release-notes.adoc -:_mod-docs-content-type: REFERENCE -[id="logging-release-notes-5-8-2"] -= Logging 5.8.2 - -This release includes link:https://access.redhat.com/errata/RHSA-2024:0271[OpenShift Logging Bug Fix Release 5.8.2]. - -[id="logging-release-notes-5-8-2-bug-fixes"] -== Bug fixes -* Before this update, the LokiStack ruler pods would not format the IPv6 pod IP in HTTP URLs used for cross pod communication, causing querying rules and alerts through the Prometheus-compatible API to fail. With this update, the LokiStack ruler pods encapsulate the IPv6 pod IP in square brackets, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4890[LOG-4890]) - -* Before this update, the developer console logs did not account for the current namespace, resulting in query rejection for users without cluster-wide log access. With this update, namespace inclusion has been corrected, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4947[LOG-4947]) - -* Before this update, the logging view plugin of the {Product-Title} web console did not allow for custom node placement and tolerations. With this update, defining custom node placements and tolerations has been added to the logging view plugin of the {Product-Title} web console. (link:https://issues.redhat.com/browse/LOG-4912[LOG-4912]) - -//// -* Before this update, in {Product-Title} Release Candidate 4.15, the Loki ruler was not able to send alerts to `Alertmanager` user workload monitoring due to a permissions issue. With this update, the Loki Operator RBAC permissions allow sending alerts to `Alertmanager`, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4951[LOG-4951]) -//// - -[id="logging-release-notes-5-8-2-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-44638[CVE-2022-44638] -* link:https://access.redhat.com/security/cve/CVE-2023-1192[CVE-2023-1192] -* link:https://access.redhat.com/security/cve/CVE-2023-5345[CVE-2023-5345] -* link:https://access.redhat.com/security/cve/CVE-2023-20569[CVE-2023-20569] -* link:https://access.redhat.com/security/cve/CVE-2023-26159[CVE-2023-26159] -* link:https://access.redhat.com/security/cve/CVE-2023-39615[CVE-2023-39615] -* link:https://access.redhat.com/security/cve/CVE-2023-45871[CVE-2023-45871] diff --git a/modules/logging-release-notes-5-8-3.adoc b/modules/logging-release-notes-5-8-3.adoc deleted file mode 100644 index 3c4a656d6263..000000000000 --- a/modules/logging-release-notes-5-8-3.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging_release_notes/logging-5-8-release-notes.adoc - -:_mod-docs-content-type: REFERENCE -[id="logging-release-notes-5-8-3_{context}"] -= Logging 5.8.3 -This release includes link:https://access.redhat.com/errata/RHBA-2024:0693[Logging Bug Fix 5.8.3] and -link:https://access.redhat.com/errata/RHSA-2024:0728[Logging Security Fix 5.8.3] - -[id="logging-release-notes-5-8-3-bug-fixes"] -== Bug fixes -* Before this update, when configured to read a custom S3 Certificate Authority the Loki Operator would not automatically update the configuration when the name of the ConfigMap or the contents changed. With this update, the Loki Operator is watching for changes to the ConfigMap and automatically updates the generated configuration. (link:https://issues.redhat.com/browse/LOG-4969[LOG-4969]) - -* Before this update, Loki outputs configured without a valid URL caused the collector pods to crash. With this update, outputs are subject to URL validation, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4822[LOG-4822]) - -* Before this update the Cluster Logging Operator would generate collector configuration fields for outputs that did not specify a secret to use the service account bearer token. With this update, an output does not require authentication, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4962[LOG-4962]) - -* Before this update, the `tls.insecureSkipVerify` field of an output was not set to a value of `true` without a secret defined. With this update, a secret is no longer required to set this value. (link:https://issues.redhat.com/browse/LOG-4963[LOG-4963]) - -* Before this update, output configurations allowed the combination of an insecure (HTTP) URL with TLS authentication. With this update, outputs configured for TLS authentication require a secure (HTTPS) URL. (link:https://issues.redhat.com/browse/LOG-4893[LOG-4893]) - -[id="logging-release-notes-5-8-3-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2021-35937[CVE-2021-35937] -* link:https://access.redhat.com/security/cve/CVE-2021-35938[CVE-2021-35938] -* link:https://access.redhat.com/security/cve/CVE-2021-35939[CVE-2021-35939] -* link:https://access.redhat.com/security/cve/CVE-2023-7104[CVE-2023-7104] -* link:https://access.redhat.com/security/cve/CVE-2023-27043[CVE-2023-27043] -* link:https://access.redhat.com/security/cve/CVE-2023-48795[CVE-2023-48795] -* link:https://access.redhat.com/security/cve/CVE-2023-51385[CVE-2023-51385] -* link:https://access.redhat.com/security/cve/CVE-2024-0553[CVE-2024-0553] diff --git a/modules/logging-release-notes-5-8-4.adoc b/modules/logging-release-notes-5-8-4.adoc deleted file mode 100644 index 18eaa974be9b..000000000000 --- a/modules/logging-release-notes-5-8-4.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging_release_notes/logging-5-8-release-notes.adoc - -:_mod-docs-content-type: REFERENCE -[id="logging-release-notes-5-8-4_{context}"] -= Logging 5.8.4 -This release includes link:https://access.redhat.com/errata/RHBA-2024:1065[OpenShift Logging Bug Fix Release 5.8.4]. - -[id="logging-release-notes-5-8-4-bug-fixes"] -== Bug fixes - -* Before this update, the developer console's logs did not account for the current namespace, resulting in query rejection for users without cluster-wide log access. With this update, all supported OCP versions ensure correct namespace inclusion. (link:https://issues.redhat.com/browse/LOG-4905[LOG-4905]) - -* Before this update, the Cluster Logging Operator deployed `ClusterRoles` supporting LokiStack deployments only when the default log output was LokiStack. With this update, the roles are split into two groups: read and write. The write roles deploys based on the setting of the default log storage, just like all the roles used to do before. The read roles deploys based on whether the logging console plugin is active. (link:https://issues.redhat.com/browse/LOG-4987[LOG-4987]) - -* Before this update, multiple `ClusterLogForwarders` defining the same input receiver name had their service endlessly reconciled because of changing `ownerReferences` on one service. With this update, each receiver input will have its own service named with the convention of `-`. (link:https://issues.redhat.com/browse/LOG-5009[LOG-5009]) - -* Before this update, the `ClusterLogForwarder` did not report errors when forwarding logs to cloudwatch without a secret. With this update, the following error message appears when forwarding logs to cloudwatch without a secret: `secret must be provided for cloudwatch output`. (link:https://issues.redhat.com/browse/LOG-5021[LOG-5021]) - -* Before this update, the `log_forwarder_input_info` included `application`, `infrastructure`, and `audit` input metric points. With this update, `http` is also added as a metric point. (link:https://issues.redhat.com/browse/LOG-5043[LOG-5043]) - -[id="logging-release-notes-5-8-4-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2021-35937[CVE-2021-35937] -* link:https://access.redhat.com/security/cve/CVE-2021-35938[CVE-2021-35938] -* link:https://access.redhat.com/security/cve/CVE-2021-35939[CVE-2021-35939] -* link:https://access.redhat.com/security/cve/CVE-2022-3545[CVE-2022-3545] -* link:https://access.redhat.com/security/cve/CVE-2022-24963[CVE-2022-24963] -* link:https://access.redhat.com/security/cve/CVE-2022-36402[CVE-2022-36402] -* link:https://access.redhat.com/security/cve/CVE-2022-41858[CVE-2022-41858] -* link:https://access.redhat.com/security/cve/CVE-2023-2166[CVE-2023-2166] -* link:https://access.redhat.com/security/cve/CVE-2023-2176[CVE-2023-2176] -* link:https://access.redhat.com/security/cve/CVE-2023-3777[CVE-2023-3777] -* link:https://access.redhat.com/security/cve/CVE-2023-3812[CVE-2023-3812] -* link:https://access.redhat.com/security/cve/CVE-2023-4015[CVE-2023-4015] -* link:https://access.redhat.com/security/cve/CVE-2023-4622[CVE-2023-4622] -* link:https://access.redhat.com/security/cve/CVE-2023-4623[CVE-2023-4623] -* link:https://access.redhat.com/security/cve/CVE-2023-5178[CVE-2023-5178] -* link:https://access.redhat.com/security/cve/CVE-2023-5363[CVE-2023-5363] -* link:https://access.redhat.com/security/cve/CVE-2023-5388[CVE-2023-5388] -* link:https://access.redhat.com/security/cve/CVE-2023-5633[CVE-2023-5633] -* link:https://access.redhat.com/security/cve/CVE-2023-6679[CVE-2023-6679] -* link:https://access.redhat.com/security/cve/CVE-2023-7104[CVE-2023-7104] -* link:https://access.redhat.com/security/cve/CVE-2023-27043[CVE-2023-27043] -* link:https://access.redhat.com/security/cve/CVE-2023-38409[CVE-2023-38409] -* link:https://access.redhat.com/security/cve/CVE-2023-40283[CVE-2023-40283] -* link:https://access.redhat.com/security/cve/CVE-2023-42753[CVE-2023-42753] -* link:https://access.redhat.com/security/cve/CVE-2023-43804[CVE-2023-43804] -* link:https://access.redhat.com/security/cve/CVE-2023-45803[CVE-2023-45803] -* link:https://access.redhat.com/security/cve/CVE-2023-46813[CVE-2023-46813] -* link:https://access.redhat.com/security/cve/CVE-2024-20918[CVE-2024-20918] -* link:https://access.redhat.com/security/cve/CVE-2024-20919[CVE-2024-20919] -* link:https://access.redhat.com/security/cve/CVE-2024-20921[CVE-2024-20921] -* link:https://access.redhat.com/security/cve/CVE-2024-20926[CVE-2024-20926] -* link:https://access.redhat.com/security/cve/CVE-2024-20945[CVE-2024-20945] -* link:https://access.redhat.com/security/cve/CVE-2024-20952[CVE-2024-20952] \ No newline at end of file diff --git a/modules/logging-release-notes-5-9-0.adoc b/modules/logging-release-notes-5-9-0.adoc deleted file mode 100644 index 796c218eb77a..000000000000 --- a/modules/logging-release-notes-5-9-0.adoc +++ /dev/null @@ -1,80 +0,0 @@ -//module included in logging-5-9-release-notes.adoc -:_mod-docs-content-type: REFERENCE -[id="logging-release-notes-5-9-0_{context}"] -= Logging 5.9.0 - -This release includes link:https://access.redhat.com/errata/RHBA-2024:1591[OpenShift Logging Bug Fix Release 5.9.0] - -[id="logging-release-notes-5-9-0-removal-notice"] -== Removal notice - -The {logging-uc} 5.9 release does not contain an updated version of the {es-op}. Instances of {es-op} from prior {logging} releases, remain supported until the EOL of the {logging} release. As an alternative to using the {es-op} to manage the default log storage, you can use the {loki-op}. For more information on the {logging-uc} lifecycle dates, see link:https://access.redhat.com/support/policy/updates/openshift_operators#platform-agnostic[Platform Agnostic Operators]. - -[id="logging-release-notes-5-9-0-deprecation-notice"] -== Deprecation notice - -* In {logging-uc} 5.9, Fluentd, and Kibana are deprecated and are planned to be removed in {logging-uc} 6.0, which is expected to be shipped alongside a future release of {product-title}. Red Hat will provide critical and above CVE bug fixes and support for these components during the current release lifecycle, but these components will no longer receive feature enhancements. The Vector-based collector provided by the {clo} and LokiStack provided by the {loki-op} are the preferred Operators for log collection and storage. We encourage all users to adopt the Vector and Loki log stack, as this will be the stack that will be enhanced going forward. - -* In {logging-uc} 5.9, the `Fields` option for the Splunk output type was never implemented and is now deprecated. It will be removed in a future release. - -[id="logging-release-notes-5-9-0-enhancements"] -== Enhancements - -[id="logging-release-notes-5-9-0-log-collection"] -=== Log Collection -* This enhancement adds the ability to refine the process of log collection by using a workload's metadata to `drop` or `prune` logs based on their content. Additionally, it allows the collection of infrastructure logs, such as journal or container logs, and audit logs, such as `kube api` or `ovn` logs, to only collect individual sources. (link:https://issues.redhat.com/browse/LOG-2155[LOG-2155]) - -* This enhancement introduces a new type of remote log receiver, the syslog receiver. You can configure it to expose a port over a network, allowing external systems to send syslog logs using compatible tools such as rsyslog. (link:https://issues.redhat.com/browse/LOG-3527[LOG-3527]) - -* With this update, the `ClusterLogForwarder` API now supports log forwarding to Azure Monitor Logs, giving users better monitoring abilities. This feature helps users to maintain optimal system performance and streamline the log analysis processes in Azure Monitor, which speeds up issue resolution and improves operational efficiency. (link:https://issues.redhat.com/browse/LOG-4605[LOG-4605]) - -* This enhancement improves collector resource utilization by deploying collectors as a deployment with two replicas. This occurs when the only input source defined in the `ClusterLogForwarder` custom resource (CR) is a receiver input instead of using a daemon set on all nodes. Additionally, collectors deployed in this manner do not mount the host file system. To use this enhancement, you need to annotate the `ClusterLogForwarder` CR with the `logging.openshift.io/dev-preview-enable-collector-as-deployment` annotation. (link:https://issues.redhat.com/browse/LOG-4779[LOG-4779]) - -* This enhancement introduces the capability for custom tenant configuration across all supported outputs, facilitating the organization of log records in a logical manner. However, it does not permit custom tenant configuration for {logging} managed storage. (link:https://issues.redhat.com/browse/LOG-4843[LOG-4843]) - -* With this update, the `ClusterLogForwarder` CR that specifies an application input with one or more infrastructure namespaces like `default`, `openshift*`, or `kube*`, now requires a service account with the `collect-infrastructure-logs` role. (link:https://issues.redhat.com/browse/LOG-4943[LOG-4943]) - -* This enhancement introduces the capability for tuning some output settings, such as compression, retry duration, and maximum payloads, to match the characteristics of the receiver. Additionally, this feature includes a delivery mode to allow administrators to choose between throughput and log durability. For example, the `AtLeastOnce` option configures minimal disk buffering of collected logs so that the collector can deliver those logs after a restart. (link:https://issues.redhat.com/browse/LOG-5026[LOG-5026]) - -* This enhancement adds three new Prometheus alerts, warning users about the deprecation of Elasticsearch, Fluentd, and Kibana. (link:https://issues.redhat.com/browse/LOG-5055[LOG-5055]) - -[id="logging-release-notes-5-9-0-log-storage"] -=== Log Storage - -* This enhancement in LokiStack improves support for OTEL by using the new V13 object storage format and enabling automatic stream sharding by default. This also prepares the collector for future enhancements and configurations. (link:https://issues.redhat.com/browse/LOG-4538[LOG-4538]) - -* This enhancement introduces support for short-lived token workload identity federation with Azure and AWS log stores for STS enabled {product-title} 4.14 and later clusters. Local storage requires the addition of a `CredentialMode: static` annotation under `spec.storage.secret` in the LokiStack CR. (link:https://issues.redhat.com/browse/LOG-4540[LOG-4540]) - -* With this update, the validation of the Azure storage secret is now extended to give early warning for certain error conditions. (link:https://issues.redhat.com/browse/LOG-4571[LOG-4571]) - -* With this update, Loki now adds upstream and downstream support for {gcp-short} workload identity federation mechanism. This allows authenticated and authorized access to the corresponding object storage services. (link:https://issues.redhat.com/browse/LOG-4754[LOG-4754]) - -[id="logging-release-notes-5-9-0-bug-fixes"] -== Bug Fixes -* Before this update, the {logging} must-gather could not collect any logs on a FIPS-enabled cluster. With this update, a new `oc` client is available in `cluster-logging-rhel9-operator`, and must-gather works properly on FIPS clusters. (link:https://issues.redhat.com/browse/LOG-4403[LOG-4403]) - -* Before this update, the LokiStack ruler pods could not format the IPv6 pod IP in HTTP URLs used for cross-pod communication. This issue caused querying rules and alerts through the Prometheus-compatible API to fail. With this update, the LokiStack ruler pods encapsulate the IPv6 pod IP in square brackets, resolving the problem. Now, querying rules and alerts through the Prometheus-compatible API works just like in IPv4 environments. (link:https://issues.redhat.com/browse/LOG-4709[LOG-4709]) - -* Before this fix, the YAML content from the {logging} must-gather was exported in a single line, making it unreadable. With this update, the YAML white spaces are preserved, ensuring that the file is properly formatted. (link:https://issues.redhat.com/browse/LOG-4792[LOG-4792]) - -* Before this update, when the `ClusterLogForwarder` CR was enabled, the {clo} could run into a nil pointer exception when `ClusterLogging.Spec.Collection` was nil. With this update, the issue is now resolved in the {clo}. (link:https://issues.redhat.com/browse/LOG-5006[LOG-5006]) - -* Before this update, in specific corner cases, replacing the `ClusterLogForwarder` CR status field caused the `resourceVersion` to constantly update due to changing timestamps in `Status` conditions. This condition led to an infinite reconciliation loop. With this update, all status conditions synchronize, so that timestamps remain unchanged if conditions stay the same. (link:https://issues.redhat.com/browse/LOG-5007[LOG-5007]) - -* Before this update, there was an internal buffering behavior to `drop_newest` to address high memory consumption by the collector resulting in significant log loss. With this update, the behavior reverts to using the collector defaults. (link:https://issues.redhat.com/browse/LOG-5123[LOG-5123]) - -* Before this update, the {loki-op} `ServiceMonitor` in the `openshift-operators-redhat` namespace used static token and CA files for authentication, causing errors in the Prometheus Operator in the User Workload Monitoring spec on the `ServiceMonitor` configuration. With this update, the {loki-op} `ServiceMonitor` in `openshift-operators-redhat` namespace now references a service account token secret by a `LocalReference` object. This approach allows the User Workload Monitoring spec in the Prometheus Operator to handle the {loki-op} `ServiceMonitor` successfully, enabling Prometheus to scrape the {loki-op} metrics. (link:https://issues.redhat.com/browse/LOG-5212[LOG-5165]) - -* Before this update, the configuration of the {loki-op} `ServiceMonitor` could match many Kubernetes services, resulting in the {loki-op} metrics being collected multiple times. With this update, the configuration of `ServiceMonitor` now only matches the dedicated metrics service. (link:https://issues.redhat.com/browse/LOG-5212[LOG-5212]) - -[id="logging-release-notes-5-9-0-known-issues"] -== Known Issues -None. - -[id="logging-release-notes-5-9-0-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2023-5363[CVE-2023-5363] -* link:https://access.redhat.com/security/cve/CVE-2023-5981[CVE-2023-5981] -* link:https://access.redhat.com/security/cve/CVE-2023-46218[CVE-2023-46218] -* link:https://access.redhat.com/security/cve/CVE-2024-0553[CVE-2024-0553] -* link:https://access.redhat.com/security/cve/CVE-2024-0567[CVE-2023-0567] diff --git a/modules/logging-release-notes-5-9-1.adoc b/modules/logging-release-notes-5-9-1.adoc deleted file mode 100644 index 63960e167e38..000000000000 --- a/modules/logging-release-notes-5-9-1.adoc +++ /dev/null @@ -1,36 +0,0 @@ -//module included in logging-5-9-release-notes.adoc -:_mod-docs-content-type: REFERENCE -[id="logging-release-notes-5-9-1_{context}"] -= Logging 5.9.1 -This release includes link:https://access.redhat.com/errata/RHSA-2024:2096[OpenShift Logging Bug Fix Release 5.9.1] - -[id="logging-release-notes-5-9-1-enhancements"] -== Enhancements - -* Before this update, the {loki-op} configured Loki to use path-based style access for the Amazon Simple Storage Service (S3), which has been deprecated. With this update, the {loki-op} defaults to virtual-host style without users needing to change their configuration. (link:https://issues.redhat.com/browse/LOG-5401[LOG-5401]) - -* Before this update, the {loki-op} did not validate the Amazon Simple Storage Service (S3) endpoint used in the storage secret. With this update, the validation process ensures the S3 endpoint is a valid S3 URL, and the `LokiStack` status updates to indicate any invalid URLs. (link:https://issues.redhat.com/browse/LOG-5395[LOG-5395]) - -[id="logging-release-notes-5-9-1-bug-fixes"] -== Bug Fixes - -* Before this update, a bug in LogQL parsing left out some line filters from the query. With this update, the parsing now includes all the line filters while keeping the original query unchanged. (link:https://issues.redhat.com/browse/LOG-5268[LOG-5268]) - -* Before this update, a prune filter without a defined `pruneFilterSpec` would cause a segfault. With this update, there is a validation error if a prune filter is without a defined `puneFilterSpec`. (link:https://issues.redhat.com/browse/LOG-5322[LOG-5322]) - -* Before this update, a drop filter without a defined `dropTestsSpec` would cause a segfault. With this update, there is a validation error if a prune filter is without a defined `puneFilterSpec`. (link:https://issues.redhat.com/browse/LOG-5323[LOG-5323]) - -* Before this update, the {loki-op} did not validate the Amazon Simple Storage Service (S3) endpoint URL format used in the storage secret. With this update, the S3 endpoint URL goes through a validation step that reflects on the status of the `LokiStack`. (link:https://issues.redhat.com/browse/LOG-5397[LOG-5397]) - -* Before this update, poorly formatted timestamp fields in audit log records led to `WARN` messages in {clo} logs. With this update, a remap transformation ensures that the timestamp field is properly formatted. (link:https://issues.redhat.com/browse/LOG-4672[LOG-4672]) - -* Before this update, the error message thrown while validating a `ClusterLogForwarder` resource name and namespace did not correspond to the correct error. With this update, the system checks if a `ClusterLogForwarder` resource with the same name exists in the same namespace. If not, it corresponds to the correct error. (link:https://issues.redhat.com/browse/LOG-5062[LOG-5062]) - -* Before this update, the validation feature for output config required a TLS URL, even for services such as Amazon CloudWatch or {gcp-full} Logging where a URL is not needed by design. With this update, the validation logic for services without URLs are improved, and the error message are more informative. (link:https://issues.redhat.com/browse/LOG-5307[LOG-5307]) - -* Before this update, defining an infrastructure input type did not exclude {logging} workloads from the collection. With this update, the collection excludes {logging} services to avoid feedback loops. (link:https://issues.redhat.com/browse/LOG-5309[LOG-5309]) - - -[id="logging-release-notes-5-9-1-CVEs"] -== CVEs -No CVEs. \ No newline at end of file diff --git a/modules/logging-release-notes-5-9-2.adoc b/modules/logging-release-notes-5-9-2.adoc deleted file mode 100644 index 928aa474a73b..000000000000 --- a/modules/logging-release-notes-5-9-2.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// module included in logging-5-9-release-notes.adoc -:_mod-docs-content-type: REFERENCE -[id="logging-release-notes-5-9-2_{context}"] -= Logging 5.9.2 -This release includes link:https://access.redhat.com/errata/RHSA-2024:2933[OpenShift Logging Bug Fix Release 5.9.2] - -[id="logging-release-notes-5-9-2-bug-fixes"] -== Bug Fixes - -* Before this update, changes to the Logging Operator caused an error due to an incorrect configuration in the `ClusterLogForwarder` CR. As a result, upgrades to {logging} deleted the daemonset collector. With this update, the Logging Operator re-creates collector daemonsets except when a `Not authorized to collect` error occurs. (link:https://issues.redhat.com/browse/LOG-4910[LOG-4910]) - -* Before this update, the rotated infrastructure log files were sent to the application index in some scenarios due to an incorrect configuration in the Vector log collector. With this update, the Vector log collector configuration avoids collecting any rotated infrastructure log files. (link:https://issues.redhat.com/browse/LOG-5156[LOG-5156]) - -* Before this update, the Logging Operator did not monitor changes to the `grafana-dashboard-cluster-logging` config map. With this update, the Logging Operator monitors changes in the `ConfigMap` objects, ensuring the system stays synchronized and responds effectively to config map modifications. (link:https://issues.redhat.com/browse/LOG-5308[LOG-5308]) - -* Before this update, an issue in the metrics collection code of the Logging Operator caused it to report stale telemetry metrics. With this update, the Logging Operator does not report stale telemetry metrics. (link:https://issues.redhat.com/browse/LOG-5426[LOG-5426]) - -* Before this change, the Fluentd `out_http` plugin ignored the `no_proxy` environment variable. With this update, the Fluentd patches the `HTTP#start` method of ruby to honor the `no_proxy` environment variable. (link:https://issues.redhat.com/browse/LOG-5466[LOG-5466]) - -[id="logging-release-notes-5-9-2-CVEs"] -== CVEs - -* link:https://access.redhat.com/security/cve/CVE-2022-48554[CVE-2022-48554] -* link:https://access.redhat.com/security/cve/CVE-2023-2975[CVE-2023-2975] -* link:https://access.redhat.com/security/cve/CVE-2023-3446[CVE-2023-3446] -* link:https://access.redhat.com/security/cve/CVE-2023-3817[CVE-2023-3817] -* link:https://access.redhat.com/security/cve/CVE-2023-5678[CVE-2023-5678] -* link:https://access.redhat.com/security/cve/CVE-2023-6129[CVE-2023-6129] -* link:https://access.redhat.com/security/cve/CVE-2023-6237[CVE-2023-6237] -* link:https://access.redhat.com/security/cve/CVE-2023-7008[CVE-2023-7008] -* link:https://access.redhat.com/security/cve/CVE-2023-45288[CVE-2023-45288] -* link:https://access.redhat.com/security/cve/CVE-2024-0727[CVE-2024-0727] -* link:https://access.redhat.com/security/cve/CVE-2024-22365[CVE-2024-22365] -* link:https://access.redhat.com/security/cve/CVE-2024-25062[CVE-2024-25062] -* link:https://access.redhat.com/security/cve/CVE-2024-28834[CVE-2024-28834] -* link:https://access.redhat.com/security/cve/CVE-2024-28835[CVE-2024-28835] diff --git a/modules/logging-release-notes-5-9-3.adoc b/modules/logging-release-notes-5-9-3.adoc deleted file mode 100644 index e13553abca85..000000000000 --- a/modules/logging-release-notes-5-9-3.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// module included in logging-5-9-release-notes.adoc -:_mod-docs-content-type: REFERENCE -[id="logging-release-notes-5-9-3_{context}"] -= Logging 5.9.3 -This release includes link:https://access.redhat.com/errata/RHBA-2024:3736[OpenShift Logging Bug Fix Release 5.9.3] - -[id="logging-release-notes-5-9-3-bug-fixes"] -== Bug Fixes - -* Before this update, there was a delay in restarting Ingesters when configuring `LokiStack`, because the {loki-op} sets the write-ahead log `replay_memory_ceiling` to zero bytes for the `1x.demo` size. With this update, the minimum value used for the `replay_memory_ceiling` has been increased to avoid delays. (link:https://issues.redhat.com/browse/LOG-5614[LOG-5614]) - -* Before this update, monitoring the Vector collector output buffer state was not possible. With this update, monitoring and alerting the Vector collector output buffer size is possible that improves observability capabilities and helps keep the system running optimally. (link:https://issues.redhat.com/browse/LOG-5586[LOG-5586]) - -[id="logging-release-notes-5-9-3-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2024-2961[CVE-2024-2961] -* link:https://access.redhat.com/security/cve/CVE-2024-28182[CVE-2024-28182] -* link:https://access.redhat.com/security/cve/CVE-2024-33599[CVE-2024-33599] -* link:https://access.redhat.com/security/cve/CVE-2024-33600[CVE-2024-33600] -* link:https://access.redhat.com/security/cve/CVE-2024-33601[CVE-2024-33601] -* link:https://access.redhat.com/security/cve/CVE-2024-33602[CVE-2024-33602] \ No newline at end of file diff --git a/modules/logging-rn-5.7.0.adoc b/modules/logging-rn-5.7.0.adoc deleted file mode 100644 index d368e78c56b5..000000000000 --- a/modules/logging-rn-5.7.0.adoc +++ /dev/null @@ -1,25 +0,0 @@ -//module included in logging-5-7-release-notes.adoc -:_mod-docs-content-type: REFERENCE -[id="logging-release-notes-5-7-0_{context}"] -= Logging 5.7.0 - -This release includes link:https://access.redhat.com/errata/RHBA-2023:2133[OpenShift Logging Bug Fix Release 5.7.0]. - -[id="logging-5-7-enhancements"] -== Enhancements -With this update, you can enable logging to detect multi-line exceptions and reassemble them into a single log entry. - -To enable logging to detect multi-line exceptions and reassemble them into a single log entry, ensure that the `ClusterLogForwarder` Custom Resource (CR) contains a `detectMultilineErrors` field, with a value of `true`. - -[id="logging-5-7-known-issues"] -== Known Issues -None. - -[id="logging-5-7-0-bug-fixes"] -== Bug fixes -* Before this update, the `nodeSelector` attribute for the Gateway component of the LokiStack did not impact node scheduling. With this update, the `nodeSelector` attribute works as expected. (link:https://issues.redhat.com/browse/LOG-3713[LOG-3713]) - -[id="logging-5-7-0-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2023-1999[CVE-2023-1999] -* link:https://access.redhat.com/security/cve/CVE-2023-28617[CVE-2023-28617] diff --git a/modules/logging-rn-5.7.1.adoc b/modules/logging-rn-5.7.1.adoc deleted file mode 100644 index 2ce3a5752a24..000000000000 --- a/modules/logging-rn-5.7.1.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// logging-5-7-release-notes.adoc -// cluster-logging-release-notes.adoc -:_mod-docs-content-type: REFERENCE -[id="logging-release-notes-5-7-1_{context}"] -= Logging 5.7.1 -This release includes: link:https://access.redhat.com/errata/RHBA-2023:3197[OpenShift Logging Bug Fix Release 5.7.1]. - -[id="logging-5-7-1-bug-fixes_{context}"] -== Bug fixes -* Before this update, the presence of numerous noisy messages within the Cluster Logging Operator pod logs caused reduced log readability, and increased difficulty in identifying important system events. With this update, the issue is resolved by significantly reducing the noisy messages within Cluster Logging Operator pod logs. (link:https://issues.redhat.com/browse/LOG-3482[LOG-3482]) - -* Before this update, the API server would reset the value for the `CollectorSpec.Type` field to `vector`, even when the custom resource used a different value. This update removes the default for the `CollectorSpec.Type` field to restore the previous behavior. (link:https://issues.redhat.com/browse/LOG-4086[LOG-4086]) - -* Before this update, a time range could not be selected in the {Product-Title} web console by clicking and dragging over the logs histogram. With this update, clicking and dragging can be used to successfully select a time range. (link:https://issues.redhat.com/browse/LOG-4501[LOG-4501]) - -* Before this update, clicking on the *Show Resources* link in the {Product-Title} web console did not produce any effect. With this update, the issue is resolved by fixing the functionality of the "Show Resources" link to toggle the display of resources for each log entry. (link:https://issues.redhat.com/browse/LOG-3218[LOG-3218]) - -[id="logging-5-7-1-CVEs_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2023-21930[CVE-2023-21930] -* link:https://access.redhat.com/security/cve/CVE-2023-21937[CVE-2023-21937] -* link:https://access.redhat.com/security/cve/CVE-2023-21938[CVE-2023-21938] -* link:https://access.redhat.com/security/cve/CVE-2023-21939[CVE-2023-21939] -* link:https://access.redhat.com/security/cve/CVE-2023-21954[CVE-2023-21954] -* link:https://access.redhat.com/security/cve/CVE-2023-21967[CVE-2023-21967] -* link:https://access.redhat.com/security/cve/CVE-2023-21968[CVE-2023-21968] -* link:https://access.redhat.com/security/cve/CVE-2023-28617[CVE-2023-28617] diff --git a/modules/logging-rn-5.7.2.adoc b/modules/logging-rn-5.7.2.adoc deleted file mode 100644 index 778ca6e34759..000000000000 --- a/modules/logging-rn-5.7.2.adoc +++ /dev/null @@ -1,122 +0,0 @@ -// Module included in the following assemblies: -// cluster-logging-release-notes.adoc -:_mod-docs-content-type: REFERENCE -[id="cluster-logging-release-notes-5-7-2_{context}"] -= Logging 5.7.2 -This release includes link:https://access.redhat.com/errata/RHSA-2023:3495[OpenShift Logging Bug Fix Release 5.7.2]. - -[id="openshift-logging-5-7-2-bug-fixes_{context}"] -== Bug fixes -* Before this update, it was not possible to delete the `openshift-logging` namespace directly due to the presence of a pending finalizer. With this update, the finalizer is no longer utilized, enabling direct deletion of the namespace. (link:https://issues.redhat.com/browse/LOG-3316[LOG-3316]) - -* Before this update, the `run.sh` script would display an incorrect `chunk_limit_size` value if it was changed according to the {product-title} documentation. However, when setting the `chunk_limit_size` via the environment variable `$BUFFER_SIZE_LIMIT`, the script would show the correct value. With this update, the `run.sh` script now consistently displays the correct `chunk_limit_size` value in both scenarios. (link:https://issues.redhat.com/browse/LOG-3330[LOG-3330]) - -* Before this update, the {product-title} web console's logging view plugin did not allow for custom node placement or tolerations. This update adds the ability to define node placement and tolerations for the logging view plugin. (link:https://issues.redhat.com/browse/LOG-3749[LOG-3749]) - -* Before this update, the Cluster Logging Operator encountered an Unsupported Media Type exception when trying to send logs to DataDog via the Fluentd HTTP Plugin. With this update, users can seamlessly assign the content type for log forwarding by configuring the HTTP header Content-Type. The value provided is automatically assigned to the `content_type` parameter within the plugin, ensuring successful log transmission. (link:https://issues.redhat.com/browse/LOG-3784[LOG-3784]) - -* Before this update, when the `detectMultilineErrors` field was set to `true` in the `ClusterLogForwarder` custom resource (CR), PHP multi-line errors were recorded as separate log entries, causing the stack trace to be split across multiple messages. With this update, multi-line error detection for PHP is enabled, ensuring that the entire stack trace is included in a single log message. (link:https://issues.redhat.com/browse/LOG-3878[LOG-3878]) - -* Before this update, `ClusterLogForwarder` pipelines containing a space in their name caused the Vector collector pods to continuously crash. With this update, all spaces, dashes (-), and dots (.) in pipeline names are replaced with underscores (_). (link:https://issues.redhat.com/browse/LOG-3945[LOG-3945]) - -* Before this update, the `log_forwarder_output` metric did not include the `http` parameter. This update adds the missing parameter to the metric. (link:https://issues.redhat.com/browse/LOG-3997[LOG-3997]) - -* Before this update, Fluentd did not identify some multi-line JavaScript client exceptions when they ended with a colon. With this update, the Fluentd buffer name is prefixed with an underscore, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4019[LOG-4019]) - -* Before this update, when configuring log forwarding to write to a Kafka output topic which matched a key in the payload, logs dropped due to an error. With this update, Fluentd's buffer name has been prefixed with an underscore, resolving the issue.(link:https://issues.redhat.com/browse/LOG-4027[LOG-4027]) - -* Before this update, the LokiStack gateway returned label values for namespaces without applying the access rights of a user. With this update, the LokiStack gateway applies permissions to label value requests, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4049[LOG-4049]) - -* Before this update, the Cluster Logging Operator API required a certificate to be provided by a secret when the `tls.insecureSkipVerify` option was set to `true`. With this update, the Cluster Logging Operator API no longer requires a certificate to be provided by a secret in such cases. The following configuration has been added to the Operator's CR: -+ -[source,yaml] ----- -tls.verify_certificate = false -tls.verify_hostname = false ----- -+ -(link:https://issues.redhat.com/browse/LOG-3445[LOG-3445]) - -* Before this update, the LokiStack route configuration caused queries running longer than 30 seconds to timeout. With this update, the LokiStack global and per-tenant `queryTimeout` settings affect the route timeout settings, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4052[LOG-4052]) - -* Before this update, a prior fix to remove defaulting of the `collection.type` resulted in the Operator no longer honoring the deprecated specs for resource, node selections, and tolerations. This update modifies the Operator behavior to always prefer the `collection.logs` spec over those of `collection`. This varies from previous behavior that allowed using both the preferred fields and deprecated fields but would ignore the deprecated fields when `collection.type` was populated. (link:https://issues.redhat.com/browse/LOG-4185[LOG-4185]) - -* Before this update, the Vector log collector did not generate TLS configuration for forwarding logs to multiple Kafka brokers if the broker URLs were not specified in the output. With this update, TLS configuration is generated appropriately for multiple brokers. (link:https://issues.redhat.com/browse/LOG-4163[LOG-4163]) - -* Before this update, the option to enable passphrase for log forwarding to Kafka was unavailable. This limitation presented a security risk as it could potentially expose sensitive information. With this update, users now have a seamless option to enable passphrase for log forwarding to Kafka. (link:https://issues.redhat.com/browse/LOG-3314[LOG-3314]) - -* Before this update, Vector log collector did not honor the `tlsSecurityProfile` settings for outgoing TLS connections. After this update, Vector handles TLS connection settings appropriately. (link:https://issues.redhat.com/browse/LOG-4011[LOG-4011]) - -* Before this update, not all available output types were included in the `log_forwarder_output_info` metrics. With this update, metrics contain Splunk and {gcp-full} Logging data which was missing previously. (link:https://issues.redhat.com/browse/LOG-4098[LOG-4098]) - -* Before this update, when `follow_inodes` was set to `true`, the Fluentd collector could crash on file rotation. With this update, the `follow_inodes` setting does not crash the collector. (link:https://issues.redhat.com/browse/LOG-4151[LOG-4151]) - -* Before this update, the Fluentd collector could incorrectly close files that should be watched because of how those files were tracked. With this update, the tracking parameters have been corrected. (link:https://issues.redhat.com/browse/LOG-4149[LOG-4149]) - -* Before this update, forwarding logs with the Vector collector and naming a pipeline in the `ClusterLogForwarder` instance `audit`, `application` or `infrastructure` resulted in collector pods staying in the `CrashLoopBackOff` state with the following error in the collector log: -+ -[source,text] ----- -ERROR vector::cli: Configuration error. error=redefinition of table transforms.audit for key transforms.audit ----- -+ -After this update, pipeline names no longer clash with reserved input names, and pipelines can be named `audit`, `application` or `infrastructure`. (link:https://issues.redhat.com/browse/LOG-4218[LOG-4218]) - -* Before this update, when forwarding logs to a syslog destination with the Vector collector and setting the `addLogSource` flag to `true`, the following extra empty fields were added to the forwarded messages: `namespace_name=`, `container_name=`, and `pod_name=`. With this update, these fields are no longer added to journal logs. (link:https://issues.redhat.com/browse/[LOG-4219]) - -* Before this update, when a `structuredTypeKey` was not found, and a `structuredTypeName` was not specified, log messages were still parsed into structured object. With this update, parsing of logs is as expected. (link:https://issues.redhat.com/browse/LOG-4220[LOG-4220]) - - -[id="openshift-logging-5-7-2-CVEs_{context}"] -== CVEs - -* link:https://access.redhat.com/security/cve/CVE-2021-26341[CVE-2021-26341] -* link:https://access.redhat.com/security/cve/CVE-2021-33655[CVE-2021-33655] -* link:https://access.redhat.com/security/cve/CVE-2021-33656[CVE-2021-33656] -* link:https://access.redhat.com/security/cve/CVE-2022-1462[CVE-2022-1462] -* link:https://access.redhat.com/security/cve/CVE-2022-1679[CVE-2022-1679] -* link:https://access.redhat.com/security/cve/CVE-2022-1789[CVE-2022-1789] -* link:https://access.redhat.com/security/cve/CVE-2022-2196[CVE-2022-2196] -* link:https://access.redhat.com/security/cve/CVE-2022-2663[CVE-2022-2663] -* link:https://access.redhat.com/security/cve/CVE-2022-3028[CVE-2022-3028] -* link:https://access.redhat.com/security/cve/CVE-2022-3239[CVE-2022-3239] -* link:https://access.redhat.com/security/cve/CVE-2022-3522[CVE-2022-3522] -* link:https://access.redhat.com/security/cve/CVE-2022-3524[CVE-2022-3524] -* link:https://access.redhat.com/security/cve/CVE-2022-3564[CVE-2022-3564] -* link:https://access.redhat.com/security/cve/CVE-2022-3566[CVE-2022-3566] -* link:https://access.redhat.com/security/cve/CVE-2022-3567[CVE-2022-3567] -* link:https://access.redhat.com/security/cve/CVE-2022-3619[CVE-2022-3619] -* link:https://access.redhat.com/security/cve/CVE-2022-3623[CVE-2022-3623] -* link:https://access.redhat.com/security/cve/CVE-2022-3625[CVE-2022-3625] -* link:https://access.redhat.com/security/cve/CVE-2022-3627[CVE-2022-3627] -* link:https://access.redhat.com/security/cve/CVE-2022-3628[CVE-2022-3628] -* link:https://access.redhat.com/security/cve/CVE-2022-3707[CVE-2022-3707] -* link:https://access.redhat.com/security/cve/CVE-2022-3970[CVE-2022-3970] -* link:https://access.redhat.com/security/cve/CVE-2022-4129[CVE-2022-4129] -* link:https://access.redhat.com/security/cve/CVE-2022-20141[CVE-2022-20141] -* link:https://access.redhat.com/security/cve/CVE-2022-25147[CVE-2022-25147] -* link:https://access.redhat.com/security/cve/CVE-2022-25265[CVE-2022-25265] -* link:https://access.redhat.com/security/cve/CVE-2022-30594[CVE-2022-30594] -* link:https://access.redhat.com/security/cve/CVE-2022-36227[CVE-2022-36227] -* link:https://access.redhat.com/security/cve/CVE-2022-39188[CVE-2022-39188] -* link:https://access.redhat.com/security/cve/CVE-2022-39189[CVE-2022-39189] -* link:https://access.redhat.com/security/cve/CVE-2022-41218[CVE-2022-41218] -* link:https://access.redhat.com/security/cve/CVE-2022-41674[CVE-2022-41674] -* link:https://access.redhat.com/security/cve/CVE-2022-42703[CVE-2022-42703] -* link:https://access.redhat.com/security/cve/CVE-2022-42720[CVE-2022-42720] -* link:https://access.redhat.com/security/cve/CVE-2022-42721[CVE-2022-42721] -* link:https://access.redhat.com/security/cve/CVE-2022-42722[CVE-2022-42722] -* link:https://access.redhat.com/security/cve/CVE-2022-43750[CVE-2022-43750] -* link:https://access.redhat.com/security/cve/CVE-2022-47929[CVE-2022-47929] -* link:https://access.redhat.com/security/cve/CVE-2023-0394[CVE-2023-0394] -* link:https://access.redhat.com/security/cve/CVE-2023-0461[CVE-2023-0461] -* link:https://access.redhat.com/security/cve/CVE-2023-1195[CVE-2023-1195] -* link:https://access.redhat.com/security/cve/CVE-2023-1582[CVE-2023-1582] -* link:https://access.redhat.com/security/cve/CVE-2023-2491[CVE-2023-2491] -* link:https://access.redhat.com/security/cve/CVE-2023-22490[CVE-2023-22490] -* link:https://access.redhat.com/security/cve/CVE-2023-23454[CVE-2023-23454] -* link:https://access.redhat.com/security/cve/CVE-2023-23946[CVE-2023-23946] -* link:https://access.redhat.com/security/cve/CVE-2023-25652[CVE-2023-25652] -* link:https://access.redhat.com/security/cve/CVE-2023-25815[CVE-2023-25815] -* link:https://access.redhat.com/security/cve/CVE-2023-27535[CVE-2023-27535] -* link:https://access.redhat.com/security/cve/CVE-2023-29007[CVE-2023-29007] diff --git a/modules/logging-rn-5.7.3.adoc b/modules/logging-rn-5.7.3.adoc deleted file mode 100644 index 423775cb8e81..000000000000 --- a/modules/logging-rn-5.7.3.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// cluster-logging-release-notes.adoc -:_mod-docs-content-type: REFERENCE -[id="cluster-logging-release-notes-5-7-3_{context}"] -= Logging 5.7.3 -This release includes link:https://access.redhat.com/errata/RHSA-2023:3998[OpenShift Logging Bug Fix Release 5.7.3]. - -[id="openshift-logging-5-7-3-bug-fixes_{context}"] -== Bug fixes -* Before this update, when viewing logs within the {product-title} web console, cached files caused the data to not refresh. With this update the bootstrap files are not cached, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4100[LOG-4100]) - -* Before this update, the {loki-op} reset errors in a way that made identifying configuration problems difficult to troubleshoot. With this update, errors persist until the configuration error is resolved. (link:https://issues.redhat.com/browse/LOG-4156[LOG-4156]) - -* Before this update, the LokiStack ruler did not restart after changes were made to the `RulerConfig` custom resource (CR). With this update, the {loki-op} restarts the ruler pods after the `RulerConfig` CR is updated. (link:https://issues.redhat.com/browse/LOG-4161[LOG-4161]) - -* Before this update, the vector collector terminated unexpectedly when input match label values contained a `/` character within the `ClusterLogForwarder`. This update resolves the issue by quoting the match label, enabling the collector to start and collect logs. (link:https://issues.redhat.com/browse/LOG-4176[LOG-4176]) - -* Before this update, the {loki-op} terminated unexpectedly when a `LokiStack` CR defined tenant limits, but not global limits. With this update, the {loki-op} can process `LokiStack` CRs without global limits, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4198[LOG-4198]) - -* Before this update, Fluentd did not send logs to an Elasticsearch cluster when the private key provided was passphrase-protected. With this update, Fluentd properly handles passphrase-protected private keys when establishing a connection with Elasticsearch. (link:https://issues.redhat.com/browse/LOG-4258[LOG-4258]) - -* Before this update, clusters with more than 8,000 namespaces caused Elasticsearch to reject queries because the list of namespaces was larger than the `http.max_header_size` setting. With this update, the default value for header size has been increased, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4277[LOG-4277]) - -* Before this update, label values containing a `/` character within the `ClusterLogForwarder` CR would cause the collector to terminate unexpectedly. -With this update, slashes are replaced with underscores, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4095[LOG-4095]) - -* Before this update, the Cluster Logging Operator terminated unexpectedly when set to an unmanaged state. With this update, a check to ensure that the `ClusterLogging` resource is in the correct Management state before initiating the reconciliation of the `ClusterLogForwarder` CR, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4177[LOG-4177]) - -* Before this update, when viewing logs within the {product-title} web console, selecting a time range by dragging over the histogram did not work on the aggregated logs view inside the pod detail. With this update, the time range can be selected by dragging on the histogram in this view. (link:https://issues.redhat.com/browse/LOG-4108[LOG-4108]) - -* Before this update, when viewing logs within the {product-title} web console, queries longer than 30 seconds timed out. With this update, the timeout value can be configured in the configmap/logging-view-plugin. (link:https://issues.redhat.com/browse/LOG-3498[LOG-3498]) - -* Before this update, when viewing logs within the {product-title} web console, clicking the *more data available* option loaded more log entries only the first time it was clicked. With this update, more entries are loaded with each click. (link:https://issues.redhat.com/browse/OU-188[OU-188]) - -* Before this update, when viewing logs within the {product-title} web console, clicking the *streaming* option would only display the *streaming logs* message without showing the actual logs. With this update, both the message and the log stream are displayed correctly. (link:https://issues.redhat.com/browse/OU-166[OU-166]) - -[id="openshift-logging-5-7-3-CVEs_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2020-24736[CVE-2020-24736] -* link:https://access.redhat.com/security/cve/CVE-2022-48281[CVE-2022-48281] -* link:https://access.redhat.com/security/cve/CVE-2023-1667[CVE-2023-1667] -* link:https://access.redhat.com/security/cve/CVE-2023-2283[CVE-2023-2283] -* link:https://access.redhat.com/security/cve/CVE-2023-24329[CVE-2023-24329] -* link:https://access.redhat.com/security/cve/CVE-2023-26115[CVE-2023-26115] -* link:https://access.redhat.com/security/cve/CVE-2023-26136[CVE-2023-26136] -* link:https://access.redhat.com/security/cve/CVE-2023-26604[CVE-2023-26604] -* link:https://access.redhat.com/security/cve/CVE-2023-28466[CVE-2023-28466] diff --git a/modules/logging-rn-5.7.4.adoc b/modules/logging-rn-5.7.4.adoc deleted file mode 100644 index 0e0a66e578c6..000000000000 --- a/modules/logging-rn-5.7.4.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// cluster-logging-release-notes.adoc -// logging-5-7-release-notes.adoc -:_mod-docs-content-type: REFERENCE -[id="cluster-logging-release-notes-5-7-4_{context}"] -= Logging 5.7.4 -This release includes link:https://access.redhat.com/errata/RHSA-2023:4341[OpenShift Logging Bug Fix Release 5.7.4]. - -[id="openshift-logging-5-7-4-bug-fixes_{context}"] -== Bug fixes -* Before this update, when forwarding logs to CloudWatch, a `namespaceUUID` value was not appended to the `logGroupName` field. With this update, the `namespaceUUID` value is included, so a `logGroupName` in CloudWatch appears as `logGroupName: vectorcw.b443fb9e-bd4c-4b6a-b9d3-c0097f9ed286`. (link:https://issues.redhat.com/browse/LOG-2701[LOG-2701]) - -* Before this update, when forwarding logs over HTTP to an off-cluster destination, the Vector collector was unable to authenticate to the cluster-wide HTTP proxy even though correct credentials were provided in the proxy URL. With this update, the Vector log collector can now authenticate to the cluster-wide HTTP proxy. (link:https://issues.redhat.com/browse/LOG-3381[LOG-3381]) - -* Before this update, the Operator would fail if the Fluentd collector was configured with Splunk as an output, due to this configuration being unsupported. With this update, configuration validation rejects unsupported outputs, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4237[LOG-4237]) - -* Before this update, when the Vector collector was updated an `enabled = true` value in the TLS configuration for AWS Cloudwatch logs and the {gcp-short} Stackdriver caused a configuration error. With this update, `enabled = true` value will be removed for these outputs, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4242[LOG-4242]) - -* Before this update, the Vector collector occasionally panicked with the following error message in its log: -`thread 'vector-worker' panicked at 'all branches are disabled and there is no else branch', src/kubernetes/reflector.rs:26:9`. With this update, the error has been resolved. (link:https://issues.redhat.com/browse/LOG-4275[LOG-4275]) - -* Before this update, an issue in the {loki-op} caused the `alert-manager` configuration for the application tenant to disappear if the Operator was configured with additional options for that tenant. With this update, the generated Loki configuration now contains both the custom and the auto-generated configuration. (link:https://issues.redhat.com/browse/LOG-4361[LOG-4361]) - -* Before this update, when multiple roles were used to authenticate using STS with AWS Cloudwatch forwarding, a recent update caused the credentials to be non-unique. With this update, multiple combinations of STS roles and static credentials can once again be used to authenticate with AWS Cloudwatch. (link:https://issues.redhat.com/browse/LOG-4368[LOG-4368]) - -* Before this update, Loki filtered label values for active streams but did not remove duplicates, making Grafana's Label Browser unusable. With this update, Loki filters out duplicate label values for active streams, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4389[LOG-4389]) - -* Pipelines with no `name` field specified in the `ClusterLogForwarder` custom resource (CR) stopped working after upgrading to OpenShift Logging 5.7. With this update, the error has been resolved. (link:https://issues.redhat.com/browse/LOG-4120[LOG-4120]) - -// Release notes text field empty. * (link:https://issues.redhat.com/browse/LOG-4302[LOG-4302]) -// Release notes text field empty. * (link:https://issues.redhat.com/browse/LOG-4015[LOG-4015]) -// Release notes text field empty. * (link:https://issues.redhat.com/browse/LOG-4372[LOG-4372]) - -[id="openshift-logging-5-7-4-CVEs_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-25883[CVE-2022-25883] -* link:https://access.redhat.com/security/cve/CVE-2023-22796[CVE-2023-22796] diff --git a/modules/logging-rn-5.7.6.adoc b/modules/logging-rn-5.7.6.adoc deleted file mode 100644 index 6281f6e1aaa6..000000000000 --- a/modules/logging-rn-5.7.6.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// cluster-logging-release-notes.adoc -// logging-5-7-release-notes.adoc -:_mod-docs-content-type: REFERENCE -[id="cluster-logging-release-notes-5-7-6_{context}"] -= Logging 5.7.6 -This release includes link:https://access.redhat.com/errata/RHSA-2023:4933[OpenShift Logging Bug Fix Release 5.7.6]. - -[id="openshift-logging-5-7-6-bug-fixes_{context}"] -== Bug fixes -* Before this update, the collector relied on the default configuration settings for reading the container log lines. As a result, the collector did not read the rotated files efficiently. With this update, there is an increase in the number of bytes read, which allows the collector to efficiently process rotated files. (link:https://issues.redhat.com/browse/LOG-4501[LOG-4501]) - -* Before this update, when users pasted a URL with predefined filters, some filters did not reflect. With this update, the UI reflects all the filters in the URL. (link:https://issues.redhat.com/browse/LOG-4459[LOG-4459]) - -* Before this update, forwarding to Loki using custom labels generated an error when switching from Fluentd to Vector. With this update, the Vector configuration sanitizes labels in the same way as Fluentd to ensure the collector starts and correctly processes labels. (link:https://issues.redhat.com/browse/LOG-4460[LOG-4460]) - -* Before this update, the Observability Logs console search field did not accept special characters that it should escape. With this update, it is escaping special characters properly in the query. (link:https://issues.redhat.com/browse/LOG-4456[LOG-4456]) - -* Before this update, the following warning message appeared while sending logs to Splunk: `Timestamp was not found.` With this update, the change overrides the name of the log field used to retrieve the Timestamp and sends it to Splunk without warning. (link:https://issues.redhat.com/browse/LOG-4413[LOG-4413]) - -* Before this update, the CPU and memory usage of Vector was increasing over time. With this update, the Vector configuration now contains the `expire_metrics_secs=60` setting to limit the lifetime of the metrics and cap the associated CPU usage and memory footprint. (link:https://issues.redhat.com/browse/LOG-4171[LOG-4171]) - -* Before this update, the LokiStack gateway cached authorized requests very broadly. As a result, this caused wrong authorization results. With this update, LokiStack gateway caches on a more fine-grained basis which resolves this issue. (link:https://issues.redhat.com/browse/LOG-4393[LOG-4393]) - -* Before this update, the Fluentd runtime image included builder tools which were unnecessary at runtime. With this update, the builder tools are removed, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4467[LOG-4467]) - -[id="openshift-logging-5-7-6-CVEs_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2023-3899[CVE-2023-3899] -* link:https://access.redhat.com/security/cve/CVE-2023-4456[CVE-2023-4456] -* link:https://access.redhat.com/security/cve/CVE-2023-32360[CVE-2023-32360] -* link:https://access.redhat.com/security/cve/CVE-2023-34969[CVE-2023-34969] \ No newline at end of file diff --git a/modules/logging-rn-5.7.7.adoc b/modules/logging-rn-5.7.7.adoc deleted file mode 100644 index e925c1984f5f..000000000000 --- a/modules/logging-rn-5.7.7.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// cluster-logging-release-notes.adoc -// logging-5-7-release-notes.adoc -:_mod-docs-content-type: REFERENCE -[id="cluster-logging-release-notes-5-7-7_{context}"] -= Logging 5.7.7 -This release includes link:https://access.redhat.com/errata/RHSA-2023:5530[OpenShift Logging Bug Fix Release 5.7.7]. - -[id="openshift-logging-5-7-7-bug-fixes_{context}"] -== Bug fixes -* Before this update, FluentD normalized the logs emitted by the EventRouter differently from Vector. With this update, the Vector produces log records in a consistent format. (link:https://issues.redhat.com/browse/LOG-4178[LOG-4178]) - -* Before this update, there was an error in the query used for the *FluentD Buffer Availability* graph in the metrics dashboard created by the Cluster Logging Operator as it showed the minimum buffer usage. With this update, the graph shows the maximum buffer usage and is now renamed to *FluentD Buffer Usage*. (link:https://issues.redhat.com/browse/LOG-4555[LOG-4555]) - -* Before this update, deploying a LokiStack on IPv6-only or dual-stack {product-title} clusters caused the LokiStack memberlist registration to fail. As a result, the distributor pods went into a crash loop. With this update, an administrator can enable IPv6 by setting the `lokistack.spec.hashRing.memberlist.enableIPv6:` value to `true`, which resolves the issue. (link:https://issues.redhat.com/browse/LOG-4569[LOG-4569]) - -* Before this update, the log collector relied on the default configuration settings for reading the container log lines. As a result, the log collector did not read the rotated files efficiently. With this update, there is an increase in the number of bytes read, which allows the log collector to efficiently process rotated files. (link:https://issues.redhat.com/browse/LOG-4575[LOG-4575]) - -* Before this update, the unused metrics in the Event Router caused the container to fail due to excessive memory usage. With this update, there is reduction in the memory usage of the Event Router by removing the unused metrics. (link:https://issues.redhat.com/browse/LOG-4686[LOG-4686]) - -[id="openshift-logging-5-7-7-CVEs_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2023-0800[CVE-2023-0800] -* link:https://access.redhat.com/security/cve/CVE-2023-0801[CVE-2023-0801] -* link:https://access.redhat.com/security/cve/CVE-2023-0802[CVE-2023-0802] -* link:https://access.redhat.com/security/cve/CVE-2023-0803[CVE-2023-0803] -* link:https://access.redhat.com/security/cve/CVE-2023-0804[CVE-2023-0804] -* link:https://access.redhat.com/security/cve/CVE-2023-2002[CVE-2023-2002] -* link:https://access.redhat.com/security/cve/CVE-2023-3090[CVE-2023-3090] -* link:https://access.redhat.com/security/cve/CVE-2023-3390[CVE-2023-3390] -* link:https://access.redhat.com/security/cve/CVE-2023-3776[CVE-2023-3776] -* link:https://access.redhat.com/security/cve/CVE-2023-4004[CVE-2023-4004] -* link:https://access.redhat.com/security/cve/CVE-2023-4527[CVE-2023-4527] -* link:https://access.redhat.com/security/cve/CVE-2023-4806[CVE-2023-4806] -* link:https://access.redhat.com/security/cve/CVE-2023-4813[CVE-2023-4813] -* link:https://access.redhat.com/security/cve/CVE-2023-4863[CVE-2023-4863] -* link:https://access.redhat.com/security/cve/CVE-2023-4911[CVE-2023-4911] -* link:https://access.redhat.com/security/cve/CVE-2023-5129[CVE-2023-5129] -* link:https://access.redhat.com/security/cve/CVE-2023-20593[CVE-2023-20593] -* link:https://access.redhat.com/security/cve/CVE-2023-29491[CVE-2023-29491] -* link:https://access.redhat.com/security/cve/CVE-2023-30630[CVE-2023-30630] -* link:https://access.redhat.com/security/cve/CVE-2023-35001[CVE-2023-35001] -* link:https://access.redhat.com/security/cve/CVE-2023-35788[CVE-2023-35788] \ No newline at end of file diff --git a/modules/logging-set-input-rate-limit.adoc b/modules/logging-set-input-rate-limit.adoc deleted file mode 100644 index f4fd4268db37..000000000000 --- a/modules/logging-set-input-rate-limit.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/performance_reliability/logging-flow-control-mechanisms.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-set-input-rate-limit_{context}"] -= Configuring log forwarder input rate limits - -You can limit the rate of incoming logs that are collected by configuring the `ClusterLogForwarder` custom resource (CR). You can set input limits on a per-container or per-namespace basis. - -.Prerequisites - -* You have installed the {clo}. -* You have administrator permissions. - -.Procedure - -. Add a `maxRecordsPerSecond` limit value to the `ClusterLogForwarder` CR for a specified input. -+ -The following examples show how to configure input rate limits for different scenarios: -+ -.Example `ClusterLogForwarder` CR that sets a per-container limit for containers with certain labels -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: -# ... -spec: -# ... - inputs: - - name: <1> - application: - selector: - matchLabels: { example: label } <2> - containerLimit: - maxRecordsPerSecond: 0 <3> -# ... ----- -<1> The input name. -<2> A list of labels. If these labels match labels that are applied to a pod, the per-container limit specified in the `maxRecordsPerSecond` field is applied to those containers. -<3> Configures the rate limit. Setting the `maxRecordsPerSecond` field to `0` means that no logs are collected for the container. Setting the `maxRecordsPerSecond` field to some other value means that a maximum of that number of records per second are collected for the container. -+ -.Example `ClusterLogForwarder` CR that sets a per-container limit for containers in selected namespaces -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: -# ... -spec: -# ... - inputs: - - name: <1> - application: - namespaces: [ example-ns-1, example-ns-2 ] <2> - containerLimit: - maxRecordsPerSecond: 10 <3> - - name: - application: - namespaces: [ test ] - containerLimit: - maxRecordsPerSecond: 1000 -# ... ----- -<1> The input name. -<2> A list of namespaces. The per-container limit specified in the `maxRecordsPerSecond` field is applied to all containers in the namespaces listed. -<3> Configures the rate limit. Setting the `maxRecordsPerSecond` field to `10` means that a maximum of 10 records per second are collected for each container in the namespaces listed. - -. Apply the `ClusterLogForwarder` CR: -+ -.Example command -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/logging-set-output-rate-limit.adoc b/modules/logging-set-output-rate-limit.adoc deleted file mode 100644 index d983b9792e32..000000000000 --- a/modules/logging-set-output-rate-limit.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/performance_reliability/logging-flow-control-mechanisms.adoc - -:_mod-docs-content-type: PROCEDURE -[id="logging-set-output-rate-limit_{context}"] -= Configuring log forwarder output rate limits - -You can limit the rate of outbound logs to a specified output by configuring the `ClusterLogForwarder` custom resource (CR). - -.Prerequisites - -* You have installed the {clo}. -* You have administrator permissions. - -.Procedure - -. Add a `maxRecordsPerSecond` limit value to the `ClusterLogForwarder` CR for a specified output. -+ -The following example shows how to configure a per collector output rate limit for a Kafka broker output named `kafka-example`: -+ -.Example `ClusterLogForwarder` CR -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: -# ... -spec: -# ... - outputs: - - name: kafka-example <1> - type: kafka <2> - limit: - maxRecordsPerSecond: 1000000 <3> -# ... ----- -<1> The output name. -<2> The type of output. -<3> The log output rate limit. This value sets the maximum link:https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/[Quantity] of logs that can be sent to the Kafka broker per second. This value is not set by default. The default behavior is best effort, and records are dropped if the log forwarder cannot keep up. If this value is `0`, no logs are forwarded. - -. Apply the `ClusterLogForwarder` CR: -+ -.Example command -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/logging-vector-collector-alerts.adoc b/modules/logging-vector-collector-alerts.adoc deleted file mode 100644 index aeab74952b8c..000000000000 --- a/modules/logging-vector-collector-alerts.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging_alerts/default-logging-alerts.adoc - -:_mod-docs-content-type: REFERENCE -[id="logging-vector-collector-alerts_{context}"] -= Vector collector alerts - -In logging 5.7 and later versions, the following alerts are generated by the Vector collector. You can view these alerts in the {product-title} web console. - -.Vector collector alerts -[cols="2,2,2,1",options="header"] -|=== -|Alert |Message |Description |Severity - -|`CollectorHighErrorRate` -|` of records have resulted in an error by vector .` -|The number of vector output errors is high, by default more than 10 in the previous 15 minutes. -|Warning - -|`CollectorNodeDown` -|`Prometheus could not scrape vector for more than 10m.` -|Vector is reporting that Prometheus could not scrape a specific Vector instance. -|Critical - -|`CollectorVeryHighErrorRate` -|` of records have resulted in an error by vector .` -|The number of Vector component errors are very high, by default more than 25 in the previous 15 minutes. -|Critical - -|`FluentdQueueLengthIncreasing` -|`In the last 1h, fluentd buffer queue length constantly increased more than 1. Current value is .` -|Fluentd is reporting that the queue size is increasing. -|Warning - -|=== diff --git a/modules/logging-vector-fluentd-feature-comparison.adoc b/modules/logging-vector-fluentd-feature-comparison.adoc deleted file mode 100644 index 090be1899d03..000000000000 --- a/modules/logging-vector-fluentd-feature-comparison.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/log-forwarding.adoc - -:_mod-docs-content-type: REFERENCE -[id="logging-vector-fluentd-feature-comparison_{context}"] -= Log collector features by type - -.Log Sources -[options="header"] -|=============================================================== -| Feature | Fluentd | Vector -| App container logs | ✓ | ✓ -| App-specific routing | ✓ | ✓ -| App-specific routing by namespace | ✓ | ✓ -| Infra container logs | ✓ | ✓ -| Infra journal logs | ✓ | ✓ -| Kube API audit logs | ✓ | ✓ -| OpenShift API audit logs | ✓ | ✓ -| Open Virtual Network (OVN) audit logs| ✓ | ✓ -|=============================================================== - -.Authorization and Authentication -[options="header"] -|================================================================= -| Feature | Fluentd | Vector -| Elasticsearch certificates | ✓ | ✓ -| Elasticsearch username / password | ✓ | ✓ -| Amazon Cloudwatch keys | ✓ | ✓ -| Amazon Cloudwatch STS | ✓ | ✓ -| Kafka certificates | ✓ | ✓ -| Kafka username / password | ✓ | ✓ -| Kafka SASL | ✓ | ✓ -| Loki bearer token | ✓ | ✓ -|================================================================= - -.Normalizations and Transformations -[options="header"] -|============================================================================ -| Feature | Fluentd | Vector -| Viaq data model - app | ✓ | ✓ -| Viaq data model - infra | ✓ | ✓ -| Viaq data model - infra(journal) | ✓ | ✓ -| Viaq data model - Linux audit | ✓ | ✓ -| Viaq data model - kube-apiserver audit | ✓ | ✓ -| Viaq data model - OpenShift API audit | ✓ | ✓ -| Viaq data model - OVN | ✓ | ✓ -| Loglevel Normalization | ✓ | ✓ -| JSON parsing | ✓ | ✓ -| Structured Index | ✓ | ✓ -| Multiline error detection | ✓ | ✓ -| Multicontainer / split indices | ✓ | ✓ -| Flatten labels | ✓ | ✓ -| CLF static labels | ✓ | ✓ -|============================================================================ - -.Tuning -[options="header"] -|========================================================== -| Feature | Fluentd | Vector -| Fluentd readlinelimit | ✓ | -| Fluentd buffer | ✓ | -| - chunklimitsize | ✓ | -| - totallimitsize | ✓ | -| - overflowaction | ✓ | -| - flushthreadcount | ✓ | -| - flushmode | ✓ | -| - flushinterval | ✓ | -| - retrywait | ✓ | -| - retrytype | ✓ | -| - retrymaxinterval | ✓ | -| - retrytimeout | ✓ | -|========================================================== - -.Visibility -[options="header"] -|===================================================== -| Feature | Fluentd | Vector -| Metrics | ✓ | ✓ -| Dashboard | ✓ | ✓ -| Alerts | ✓ | ✓ -|===================================================== - -.Miscellaneous -[options="header"] -|=========================================================== -| Feature | Fluentd | Vector -| Global proxy support | ✓ | ✓ -| x86 support | ✓ | ✓ -| ARM support | ✓ | ✓ -| {ibm-power-name} support | ✓ | ✓ -| {ibm-z-name} support | ✓ | ✓ -| IPv6 support | ✓ | ✓ -| Log event buffering | ✓ | -| Disconnected Cluster | ✓ | ✓ -|=========================================================== diff --git a/modules/loki-create-object-storage-secret-cli.adoc b/modules/loki-create-object-storage-secret-cli.adoc deleted file mode 100644 index c2f1d3170b55..000000000000 --- a/modules/loki-create-object-storage-secret-cli.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * list assemblies - -:_mod-docs-content-type: PROCEDURE -[id="loki-create-object-storage-secret-cli_{context}"] -= Creating a secret for Loki object storage by using the CLI - -To configure Loki object storage, you must create a secret. You can do this by using the {oc-first}. - -.Prerequisites - -* You have administrator permissions. -* You installed the {loki-op}. -* You installed the {oc-first}. - -.Procedure - -* Create a secret in the directory that contains your certificate and key files by running the following command: -+ -[source,terminal] ----- -$ oc create secret generic -n openshift-logging \ - --from-file=tls.key= - --from-file=tls.crt= - --from-file=ca-bundle.crt= - --from-literal=username= - --from-literal=password= ----- - -[NOTE] -==== -Use generic or opaque secrets for best results. -==== - -.Verification - -* Verify that a secret was created by running the following command: -+ -[source,terminal] ----- -$ oc get secrets ----- diff --git a/modules/loki-create-object-storage-secret-console.adoc b/modules/loki-create-object-storage-secret-console.adoc deleted file mode 100644 index 15a988c31907..000000000000 --- a/modules/loki-create-object-storage-secret-console.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * list assemblies - -:_mod-docs-content-type: PROCEDURE -[id="loki-create-object-storage-secret-console_{context}"] -= Creating a secret for Loki object storage by using the web console - -To configure Loki object storage, you must create a secret. You can create a secret by using the {product-title} web console. - -.Prerequisites - -* You have administrator permissions. -* You have access to the {product-title} web console. -* You installed the {loki-op}. - -.Procedure - -. Go to *Workloads* -> *Secrets* in the *Administrator* perspective of the {product-title} web console. - -. From the *Create* drop-down list, select *From YAML*. - -. Create a secret that uses the `access_key_id` and `access_key_secret` fields to specify your credentials and the `bucketnames`, `endpoint`, and `region` fields to define the object storage location. AWS is used in the following example: -+ -.Example `Secret` object -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: logging-loki-s3 - namespace: openshift-logging -stringData: - access_key_id: AKIAIOSFODNN7EXAMPLE - access_key_secret: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY - bucketnames: s3-bucket-name - endpoint: https://s3.eu-central-1.amazonaws.com - region: eu-central-1 ----- diff --git a/modules/loki-rate-limit-errors.adoc b/modules/loki-rate-limit-errors.adoc deleted file mode 100644 index cf23ff428900..000000000000 --- a/modules/loki-rate-limit-errors.adoc +++ /dev/null @@ -1,84 +0,0 @@ -// Module is included in the following assemblies: -// * logging/cluster-logging-loki.adoc -// * observability/logging/log_collection_forwarding/log-forwarding.adoc -// * observability/logging/troubleshooting/log-forwarding-troubleshooting.adoc - -:_mod-docs-content-type: PROCEDURE -[id="loki-rate-limit-errors_{context}"] -= Troubleshooting Loki rate limit errors - -If the Log Forwarder API forwards a large block of messages that exceeds the rate limit to Loki, Loki generates rate limit (`429`) errors. - -These errors can occur during normal operation. For example, when adding the {logging} to a cluster that already has some logs, rate limit errors might occur while the {logging} tries to ingest all of the existing log entries. In this case, if the rate of addition of new logs is less than the total rate limit, the historical data is eventually ingested, and the rate limit errors are resolved without requiring user intervention. - -In cases where the rate limit errors continue to occur, you can fix the issue by modifying the `LokiStack` custom resource (CR). - -[IMPORTANT] -==== -The `LokiStack` CR is not available on Grafana-hosted Loki. This topic does not apply to Grafana-hosted Loki servers. -==== - -.Conditions - -* The Log Forwarder API is configured to forward logs to Loki. - -* Your system sends a block of messages that is larger than 2 MB to Loki. For example: -+ -[source,text] ----- -"values":[["1630410392689800468","{\"kind\":\"Event\",\"apiVersion\":\ -....... -...... -...... -...... -\"received_at\":\"2021-08-31T11:46:32.800278+00:00\",\"version\":\"1.7.4 1.6.0\"}},\"@timestamp\":\"2021-08-31T11:46:32.799692+00:00\",\"viaq_index_name\":\"audit-write\",\"viaq_msg_id\":\"MzFjYjJkZjItNjY0MC00YWU4LWIwMTEtNGNmM2E5ZmViMGU4\",\"log_type\":\"audit\"}"]]}]} ----- - -* After you enter `oc logs -n openshift-logging -l component=collector`, the collector logs in your cluster show a line containing one of the following error messages: -+ -[source,text] ----- -429 Too Many Requests Ingestion rate limit exceeded ----- -+ -.Example Vector error message -[source,text] ----- -2023-08-25T16:08:49.301780Z WARN sink{component_kind="sink" component_id=default_loki_infra component_type=loki component_name=default_loki_infra}: vector::sinks::util::retries: Retrying after error. error=Server responded with an error: 429 Too Many Requests internal_log_rate_limit=true ----- -+ -.Example Fluentd error message -[source,text] ----- -2023-08-30 14:52:15 +0000 [warn]: [default_loki_infra] failed to flush the buffer. retry_times=2 next_retry_time=2023-08-30 14:52:19 +0000 chunk="604251225bf5378ed1567231a1c03b8b" error_class=Fluent::Plugin::LokiOutput::LogPostError error="429 Too Many Requests Ingestion rate limit exceeded for user infrastructure (limit: 4194304 bytes/sec) while attempting to ingest '4082' lines totaling '7820025' bytes, reduce log volume or contact your Loki administrator to see if the limit can be increased\n" ----- -+ -The error is also visible on the receiving end. For example, in the LokiStack ingester pod: -+ -.Example Loki ingester error message -[source,text] ----- -level=warn ts=2023-08-30T14:57:34.155592243Z caller=grpc_logging.go:43 duration=1.434942ms method=/logproto.Pusher/Push err="rpc error: code = Code(429) desc = entry with timestamp 2023-08-30 14:57:32.012778399 +0000 UTC ignored, reason: 'Per stream rate limit exceeded (limit: 3MB/sec) while attempting to ingest for stream ----- - -.Procedure - -* Update the `ingestionBurstSize` and `ingestionRate` fields in the `LokiStack` CR: -+ -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: - limits: - global: - ingestion: - ingestionBurstSize: 16 # <1> - ingestionRate: 8 # <2> -# ... ----- -<1> The `ingestionBurstSize` field defines the maximum local rate-limited sample size per distributor replica in MB. This value is a hard limit. Set this value to at least the maximum logs size expected in a single push request. Single requests that are larger than the `ingestionBurstSize` value are not permitted. -<2> The `ingestionRate` field is a soft limit on the maximum amount of ingested samples per second in MB. Rate limit errors occur if the rate of logs exceeds the limit, but the collector retries sending the logs. As long as the total average is lower than the limit, the system recovers and errors are resolved without user intervention. diff --git a/modules/loki-rbac-rules-permissions.adoc b/modules/loki-rbac-rules-permissions.adoc deleted file mode 100644 index 0b527c6572dd..000000000000 --- a/modules/loki-rbac-rules-permissions.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging_alerts/custom-logging-alerts.adoc - -:_mod-docs-content-type: REFERENCE -[id="loki-rbac-rules-permissions_{context}"] -= Authorizing LokiStack rules RBAC permissions - -Administrators can allow users to create and manage their own alerting and recording rules by binding cluster roles to usernames. -Cluster roles are defined as `ClusterRole` objects that contain necessary role-based access control (RBAC) permissions for users. - -In logging 5.8 and later, the following cluster roles for alerting and recording rules are available for LokiStack: - -[options="header"] -|=== -|Rule name |Description - -|`alertingrules.loki.grafana.com-v1-admin` -|Users with this role have administrative-level access to manage alerting rules. This cluster role grants permissions to create, read, update, delete, list, and watch `AlertingRule` resources within the `loki.grafana.com/v1` API group. - -|`alertingrules.loki.grafana.com-v1-crdview` -|Users with this role can view the definitions of Custom Resource Definitions (CRDs) related to `AlertingRule` resources within the `loki.grafana.com/v1` API group, but do not have permissions for modifying or managing these resources. - -|`alertingrules.loki.grafana.com-v1-edit` -|Users with this role have permission to create, update, and delete `AlertingRule` resources. - -|`alertingrules.loki.grafana.com-v1-view` -|Users with this role can read `AlertingRule` resources within the `loki.grafana.com/v1` API group. They can inspect configurations, labels, and annotations for existing alerting rules but cannot make any modifications to them. - -|`recordingrules.loki.grafana.com-v1-admin` -|Users with this role have administrative-level access to manage recording rules. This cluster role grants permissions to create, read, update, delete, list, and watch `RecordingRule` resources within the `loki.grafana.com/v1` API group. - -|`recordingrules.loki.grafana.com-v1-crdview` -|Users with this role can view the definitions of Custom Resource Definitions (CRDs) related to `RecordingRule` resources within the `loki.grafana.com/v1` API group, but do not have permissions for modifying or managing these resources. - -|`recordingrules.loki.grafana.com-v1-edit` -|Users with this role have permission to create, update, and delete `RecordingRule` resources. - -|`recordingrules.loki.grafana.com-v1-view` -|Users with this role can read `RecordingRule` resources within the `loki.grafana.com/v1` API group. They can inspect configurations, labels, and annotations for existing alerting rules but cannot make any modifications to them. - -|=== - -[id="loki-rbac-rules-permissions-examples"] -== Examples - -To apply cluster roles for a user, you must bind an existing cluster role to a specific username. - -Cluster roles can be cluster or namespace scoped, depending on which type of role binding you use. -When a `RoleBinding` object is used, as when using the `oc adm policy add-role-to-user` command, the cluster role only applies to the specified namespace. -When a `ClusterRoleBinding` object is used, as when using the `oc adm policy add-cluster-role-to-user` command, the cluster role applies to all namespaces in the cluster. - -The following example command gives the specified user create, read, update and delete (CRUD) permissions for alerting rules in a specific namespace in the cluster: - -.Example cluster role binding command for alerting rule CRUD permissions in a specific namespace -[source,terminal] ----- -$ oc adm policy add-role-to-user alertingrules.loki.grafana.com-v1-admin -n ----- - -The following command gives the specified user administrator permissions for alerting rules in all namespaces: - -.Example cluster role binding command for administrator permissions -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user alertingrules.loki.grafana.com-v1-admin ----- diff --git a/modules/redeploying-fluentd-pods.adoc b/modules/redeploying-fluentd-pods.adoc deleted file mode 100644 index 2b22053c5219..000000000000 --- a/modules/redeploying-fluentd-pods.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/troubleshooting/log-forwarding-troubleshooting.adoc - -:_mod-docs-content-type: PROCEDURE -[id="redeploying-fluentd-pods_{context}"] -= Redeploying Fluentd pods - -When you create a `ClusterLogForwarder` custom resource (CR), if the {clo} does not redeploy the Fluentd pods automatically, you can delete the Fluentd pods to force them to redeploy. - -.Prerequisites - -* You have created a `ClusterLogForwarder` custom resource (CR) object. - -.Procedure - -* Delete the Fluentd pods to force them to redeploy by running the following command: -+ -[source,terminal] ----- -$ oc delete pod --selector logging-infra=collector ----- diff --git a/modules/ref_cluster-logging-elasticsearch-cluster-status.adoc b/modules/ref_cluster-logging-elasticsearch-cluster-status.adoc deleted file mode 100644 index bd05f19b6172..000000000000 --- a/modules/ref_cluster-logging-elasticsearch-cluster-status.adoc +++ /dev/null @@ -1,70 +0,0 @@ -:_module-type: REFERENCE - -[id="ref_cluster-logging-elasticsearch-cluster-status_{context}"] -= Elasticsearch cluster status - -[role="_abstract"] -A dashboard in the *Observe* section of the -ifndef::openshift-rosa,openshift-dedicated[] -{product-title} web console -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -{cluster-manager-url} -endif::[] -displays the status of the Elasticsearch cluster. - -To get the status of the OpenShift Elasticsearch cluster, visit the dashboard in the *Observe* section of the -ifndef::openshift-rosa,openshift-dedicated[] -{product-title} web console -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -{cluster-manager-url} -endif::[] -at -`/monitoring/dashboards/grafana-dashboard-cluster-logging`. - -.Elasticsearch status fields - -`eo_elasticsearch_cr_cluster_management_state`:: Shows whether the Elasticsearch cluster is in a managed or unmanaged state. For example: -+ -[source,terminal] ----- -eo_elasticsearch_cr_cluster_management_state{state="managed"} 1 -eo_elasticsearch_cr_cluster_management_state{state="unmanaged"} 0 ----- - -`eo_elasticsearch_cr_restart_total`:: Shows the number of times the Elasticsearch nodes have restarted for certificate restarts, rolling restarts, or scheduled restarts. For example: -+ -[source,terminal] ----- -eo_elasticsearch_cr_restart_total{reason="cert_restart"} 1 -eo_elasticsearch_cr_restart_total{reason="rolling_restart"} 1 -eo_elasticsearch_cr_restart_total{reason="scheduled_restart"} 3 ----- - -`es_index_namespaces_total`:: Shows the total number of Elasticsearch index namespaces. For example: -+ -[source,terminal] ----- -Total number of Namespaces. -es_index_namespaces_total 5 ----- - -`es_index_document_count`:: Shows the number of records for each namespace. For example: -+ -[source,terminal] ----- -es_index_document_count{namespace="namespace_1"} 25 -es_index_document_count{namespace="namespace_2"} 10 -es_index_document_count{namespace="namespace_3"} 5 ----- - -.The "Secret Elasticsearch fields are either missing or empty" message - -If Elasticsearch is missing the `admin-cert`, `admin-key`, `logging-es.crt`, or `logging-es.key` files, the dashboard shows a status message similar to the following example: - -[source,terminal] ----- -message": "Secret \"elasticsearch\" fields are either missing or empty: [admin-cert, admin-key, logging-es.crt, logging-es.key]", -"reason": "Missing Required Secrets", ----- diff --git a/modules/rosa-cluster-logging-collector-log-forward-sts-cloudwatch.adoc b/modules/rosa-cluster-logging-collector-log-forward-sts-cloudwatch.adoc deleted file mode 100644 index 3f52f70ba95d..000000000000 --- a/modules/rosa-cluster-logging-collector-log-forward-sts-cloudwatch.adoc +++ /dev/null @@ -1,185 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/log-forwarding.adoc - -:_mod-docs-content-type: PROCEDURE - -[id="rosa-cluster-logging-collector-log-forward-sts-cloudwatch_{context}"] -= Forwarding logs to Amazon CloudWatch from STS enabled clusters - -For clusters with AWS Security Token Service (STS) enabled, you must create the AWS IAM roles and policies that enable log forwarding, and a `ClusterLogForwarder` custom resource (CR) with an output for CloudWatch. - -.Prerequisites - -* {logging-title-uc}: 5.5 and later - -.Procedure -. Prepare the AWS account: -.. Create an IAM policy JSON file with the following content: -+ -[source,json] ----- -{ -"Version": "2012-10-17", -"Statement": [ - { - "Effect": "Allow", - "Action": [ - "logs:CreateLogGroup", - "logs:CreateLogStream", - "logs:DescribeLogGroups", - "logs:DescribeLogStreams", - "logs:PutLogEvents", - "logs:PutRetentionPolicy" - ], - "Resource": "arn:aws:logs:*:*:*" - } - ] -} ----- -+ -.. Create an IAM trust JSON file with the following content: -+ -[source,json] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam:::oidc-provider/" <1> - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - ":sub": "system:serviceaccount:openshift-logging:logcollector" <2> - } - } - } - ] -} ----- -+ --- -<1> Specify your AWS account ID and the OpenShift OIDC provider endpoint. Obtain the endpoint by running the following command: -+ -[source,terminal] ----- -$ rosa describe cluster \ - -c $(oc get clusterversion -o jsonpath='{.items[].spec.clusterID}{"\n"}') \ - -o yaml | awk '/oidc_endpoint_url/ {print $2}' | cut -d '/' -f 3,4 ----- -+ -<2> Specify the OpenShift OIDC endpoint again. --- - -.. Create the IAM role: -+ -[source,terminal] ----- -$ aws iam create-role - --role-name “-RosaCloudWatch” \ - --assume-role-policy-document file://.json \ - --query Role.Arn \ - --output text ----- -+ -Save the output. You will use it in the next steps. -+ -.. Create the IAM policy: -+ -[source,terminal] ----- -$ aws iam create-policy \ ---policy-name "RosaCloudWatch" \ ---policy-document file:///.json \ ---query Policy.Arn \ ---output text ----- -+ -Save the output. You will use it in the next steps. - -.. Attach the IAM policy to the IAM role: -+ -[source,terminal] ----- -$ aws iam attach-role-policy \ - --role-name “-RosaCloudWatch” \ - --policy-arn <1> ----- -+ -<1> Replace `policy_ARN` with the output you saved while creating the policy. - -. Create a `Secret` YAML file for the {clo}: -+ --- -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: cloudwatch-credentials - namespace: openshift-logging -stringData: - credentials: |- - [default] - sts_regional_endpoints = regional - role_arn: <1> - web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token ----- -<1> Replace `role_ARN` with the output you saved while creating the role. --- - -. Create the secret: -+ -[source,terminal] ----- -$ oc apply -f cloudwatch-credentials.yaml ----- - -. Create or edit a `ClusterLogForwarder` custom resource: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: <1> - namespace: <2> -spec: - serviceAccountName: <3> - outputs: - - name: cw <4> - type: cloudwatch <5> - cloudwatch: - groupBy: logType <6> - groupPrefix: <7> - region: us-east-2 <8> - secret: - name: <9> - pipelines: - - name: to-cloudwatch <10> - inputRefs: <11> - - infrastructure - - audit - - application - outputRefs: - - cw <12> ----- -<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name. -<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace. -<3> The name of your service account. The service account is only required in multi log forwarder implementations if the log forwarder is not deployed in the `openshift-logging` namespace. -<4> Specify a name for the output. -<5> Specify the `cloudwatch` type. -<6> Optional: Specify how to group the logs: -+ -* `logType` creates log groups for each log type -* `namespaceName` creates a log group for each application name space. Infrastructure and audit logs are unaffected, remaining grouped by `logType`. -* `namespaceUUID` creates a new log groups for each application namespace UUID. It also creates separate log groups for infrastructure and audit logs. -<7> Optional: Specify a string to replace the default `infrastructureName` prefix in the names of the log groups. -<8> Specify the AWS region. -<9> Specify the name of the secret you created previously. -<10> Optional: Specify a name for the pipeline. -<11> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<12> Specify the name of the output to use when forwarding logs with this pipeline. diff --git a/modules/supported-log-outputs.adoc b/modules/supported-log-outputs.adoc deleted file mode 100644 index 8161720aceb7..000000000000 --- a/modules/supported-log-outputs.adoc +++ /dev/null @@ -1,90 +0,0 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/logging-output-types.adoc - -:_mod-docs-content-type: REFERENCE -[id="supported-log-outputs_{context}"] -= Supported log forwarding outputs - -Outputs can be any of the following types: - -.Supported log output types -[cols="5",options="header"] -|=== -|Output type -|Protocol -|Tested with -|Logging versions -|Supported collector type - -|Elasticsearch v6 -|HTTP 1.1 -|6.8.1, 6.8.23 -|5.6+ -|Fluentd, Vector - -|Elasticsearch v7 -|HTTP 1.1 -|7.12.2, 7.17.7, 7.10.1 -|5.6+ -|Fluentd, Vector - -|Elasticsearch v8 -|HTTP 1.1 -|8.4.3, 8.6.1 -|5.6+ -|Fluentd ^[1]^, Vector - -|Fluent Forward -|Fluentd forward v1 -|Fluentd 1.14.6, Logstash 7.10.1, Fluentd 1.14.5 -|5.4+ -|Fluentd - -|{gcp-full} Logging -|REST over HTTPS -|Latest -|5.7+ -|Vector - -|HTTP -|HTTP 1.1 -|Fluentd 1.14.6, Vector 0.21 -|5.7+ -|Fluentd, Vector - -|Kafka -|Kafka 0.11 -|Kafka 2.4.1, 2.7.0, 3.3.1 -|5.4+ -|Fluentd, Vector - -|Loki -|REST over HTTP and HTTPS -|2.3.0, 2.5.0, 2.7, 2.2.1 -|5.4+ -|Fluentd, Vector - -|Splunk -|HEC -|8.2.9, 9.0.0 -|5.7+ -|Vector - -|Syslog -|RFC3164, RFC5424 -|Rsyslog 8.37.0-9.el7, rsyslog-8.39.0 -|5.4+ -|Fluentd, Vector ^[2]^ - -|Amazon CloudWatch -|REST over HTTPS -|Latest -|5.4+ -|Fluentd, Vector -|=== -[.small] --- -1. Fluentd does not support Elasticsearch 8 in the {logging} version 5.6.2. -2. Vector supports Syslog in the {logging} version 5.7 and higher. --- diff --git a/observability/logging/about-logging.adoc b/observability/logging/about-logging.adoc index 09fa38f00a1a..9614cc51187e 100644 --- a/observability/logging/about-logging.adoc +++ b/observability/logging/about-logging.adoc @@ -7,7 +7,7 @@ include::_attributes/attributes-openshift-dedicated.adoc[] toc::[] -As a cluster administrator, you can deploy {logging} on your {product-title} cluster, and use it to collect and aggregate node system audit logs, application container logs, and infrastructure logs. +As a cluster administrator, you can deploy {logging} on an {product-title} cluster, and use it to collect and aggregate node system audit logs, application container logs, and infrastructure logs. You can use {logging} to perform the following tasks: diff --git a/observability/logging/api_reference/_attributes b/observability/logging/api_reference/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/observability/logging/api_reference/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/observability/logging/api_reference/images b/observability/logging/api_reference/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/observability/logging/api_reference/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/observability/logging/api_reference/logging-5-6-reference.adoc b/observability/logging/api_reference/logging-5-6-reference.adoc deleted file mode 100644 index b28f99945c7c..000000000000 --- a/observability/logging/api_reference/logging-5-6-reference.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="logging-5-6-reference"] -= 5.6 Logging API reference -include::_attributes/common-attributes.adoc[] -:context: logging-5-6-reference - -toc::[] - -include::modules/logging-5.6-api-ref.adoc[leveloffset=+1] diff --git a/observability/logging/api_reference/logging-5-7-reference.adoc b/observability/logging/api_reference/logging-5-7-reference.adoc deleted file mode 100644 index 9f1b05a9d000..000000000000 --- a/observability/logging/api_reference/logging-5-7-reference.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="logging-5-7-reference"] -= 5.7 Logging API reference -include::_attributes/common-attributes.adoc[] -:context: logging-5-7-reference - -toc::[] - -Many factors, including hosted cluster workload and worker node count, affect how many hosted clusters can fit within a certain number of control-plane nodes. Use this sizing guide to help with hosted cluster capacity planning. This guidance assumes a highly available {hcp} topology. The load-based sizing examples were measured on a bare-metal cluster. Cloud-based instances might have different limiting factors, such as memory size. - -You can override the following resource utilization sizing measurements and disable the metric service monitoring. - -See the following highly available {hcp} requirements, which were tested with {product-title} version 4.12.9 and later: - -* 78 pods -* Three 8 GiB PVs for etcd -* Minimum vCPU: approximately 5.5 cores -* Minimum memory: approximately 19 GiB - -// [role="_additional-resources"] -// .Additional resources - -// * For more information about disabling the metric service monitoring, see xref:../../hosted_control_planes/hcp-prepare/hcp-override-resource-util.adoc#hcp-override-resource-util[Overriding resource utilization measurements]. -// * For more information about highly available {hcp} topology, see xref:../../hosted_control_planes/hcp-prepare/hcp-distribute-workloads.adoc#hcp-distribute-workloads[Distributing hosted cluster workloads]. - -include::modules/hcp-pod-limits.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* For more information about supported identity providers, see xref:../../nodes/nodes/nodes-nodes-managing-max-pods.adoc#nodes-nodes-managing-max-pods-proc_nodes-nodes-managing-max-pods[Configuring the maximum number of pods per node] in _Managing the maximum number of pods per node_. - -include::modules/hcp-resource-limit.adoc[leveloffset=+1] -include::modules/hcp-load-based-limit.adoc[leveloffset=+1] -include::modules/hcp-sizing-calculation.adoc[leveloffset=+1] \ No newline at end of file diff --git a/observability/logging/api_reference/logging-5-8-reference.adoc b/observability/logging/api_reference/logging-5-8-reference.adoc deleted file mode 100644 index 8fb45f49a6c9..000000000000 --- a/observability/logging/api_reference/logging-5-8-reference.adoc +++ /dev/null @@ -1,7 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="logging-5-8-reference"] -= 5.8 Logging API reference -include::_attributes/common-attributes.adoc[] -:context: logging-5-8-reference - -toc::[] diff --git a/observability/logging/api_reference/modules b/observability/logging/api_reference/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/observability/logging/api_reference/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/observability/logging/api_reference/snippets b/observability/logging/api_reference/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/observability/logging/api_reference/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/observability/logging/cluster-logging-deploying.adoc b/observability/logging/cluster-logging-deploying.adoc deleted file mode 100644 index e2c7bc5f92ec..000000000000 --- a/observability/logging/cluster-logging-deploying.adoc +++ /dev/null @@ -1,54 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-deploying -[id="cluster-logging-deploying"] -= Installing Logging -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -toc::[] - -{Product-title} Operators use custom resources (CR) to manage applications and their components. High-level configuration and settings are provided by the user within a CR. The Operator translates high-level directives into low-level actions, based on best practices embedded within the Operator’s logic. A custom resource definition (CRD) defines a CR and lists all the configurations available to users of the Operator. Installing an Operator creates the CRDs, which are then used to generate CRs. - -[IMPORTANT] -==== -You must install the {clo} *after* the log store Operator. -==== - -You deploy {logging} by installing the {loki-op} or {es-op} to manage your log store, followed by the {clo} to manage the components of logging. You can use either the {product-title} web console or the {product-title} CLI to install or configure {logging}. - -include::snippets/logging-elastic-dep-snip.adoc[leveloffset=+1] - -[TIP] -==== -You can alternatively apply all example objects. -==== - -ifdef::openshift-origin[] -[id="prerequisites_cluster-logging-deploying"] -== Prerequisites -* Ensure that you have downloaded the {cluster-manager-url-pull} as shown in _Obtaining the installation program_ in the installation documentation for your platform. -+ -If you have the pull secret, add the `redhat-operators` catalog to the OperatorHub custom resource (CR) as shown in _Configuring {product-title} to use Red{nbsp}Hat Operators_. -endif::[] - -include::modules/logging-es-deploy-console.adoc[leveloffset=+1] - -include::modules/logging-es-deploy-cli.adoc[leveloffset=+1] - --- -include::snippets/logging-retention-period-snip.adoc[leveloffset=+1] --- - -include::modules/logging-loki-cli-install.adoc[leveloffset=+1] - -include::modules/logging-loki-gui-install.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -ifdef::openshift-enterprise,openshift-origin[] -* xref:../../networking/ovn_kubernetes_network_provider/about-ovn-kubernetes.adoc#ovn-k-network-policy[About OVN-Kubernetes network policy] -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -* link:https://docs.openshift.com/container-platform/latest/networking/ovn_kubernetes_network_provider/about-ovn-kubernetes.html[About the OVN-Kubernetes default Container Network Interface (CNI) network provider] -endif::[] diff --git a/observability/logging/cluster-logging-exported-fields.adoc b/observability/logging/cluster-logging-exported-fields.adoc deleted file mode 100644 index 62eb8b902338..000000000000 --- a/observability/logging/cluster-logging-exported-fields.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="cluster-logging-exported-fields"] -= Log Record Fields -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: cluster-logging-exported-fields - -toc::[] - -The following fields can be present in log records exported by the {logging}. Although log records are typically formatted as JSON objects, the same data model can be applied to other encodings. - -To search these fields from Elasticsearch and Kibana, use the full dotted field name when searching. For example, with an Elasticsearch */_search URL*, to look for a Kubernetes pod name, use `/_search/q=kubernetes.pod_name:name-of-my-pod`. - -// The logging system can parse JSON-formatted log entries to external systems. These log entries are formatted as a fluentd message with extra fields such as `kubernetes`. The fields exported by the logging system and available for searching from Elasticsearch and Kibana are documented at the end of this document. - -include::modules/cluster-logging-exported-fields-top-level-fields.adoc[leveloffset=0] - -include::modules/cluster-logging-exported-fields-kubernetes.adoc[leveloffset=0] - -// add modules/cluster-logging-exported-fields-openshift when available diff --git a/observability/logging/cluster-logging-support.adoc b/observability/logging/cluster-logging-support.adoc deleted file mode 100644 index 47ea921ec0bd..000000000000 --- a/observability/logging/cluster-logging-support.adoc +++ /dev/null @@ -1,72 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="cluster-logging-support"] -include::_attributes/common-attributes.adoc[] -= Support -:context: cluster-logging-support - -toc::[] - -include::snippets/logging-supported-config-snip.adoc[] -include::snippets/logging-compatibility-snip.adoc[] - -{logging-uc} {for} is an opinionated collector and normalizer of application, infrastructure, and audit logs. You can use it to forward logs to various supported systems. - -{logging-uc} is not: - -* A high scale log collection system -* Security Information and Event Monitoring (SIEM) compliant -* A "bring your own" (BYO) log collector configuration -* Historical or long term log retention or storage -* A guaranteed log sink -* Secure storage - audit logs are not stored by default - -[id="cluster-logging-support-CRDs_{context}"] -== Supported API custom resource definitions - -The following table describes the supported {logging-uc} APIs. - -.Loki API support states -[cols="3",options="header"] -|=== -|Custom resource definition (CRD) -|`ApiVersion` -|Support state - -|`LokiStack` -|`lokistack.loki.grafana.com/v1` -|Supported from 5.5 - -|`RulerConfig` -|`rulerconfig.loki.grafana/v1` -|Supported from 5.7 - -|`AlertingRule` -|`alertingrule.loki.grafana/v1` -|Supported from 5.7 - -|`RecordingRule` -|`recordingrule.loki.grafana/v1` -|Supported from 5.7 - -|`LogFileMetricExporter` -|`LogFileMetricExporter.logging.openshift.io/v1alpha1` -|Supported from 5.8 - -|`ClusterLogForwarder` -|`clusterlogforwarder.logging.openshift.io/v1` -|Supported from 4.5 -|=== - -include::modules/cluster-logging-maintenance-support-list.adoc[leveloffset=+1] -include::modules/unmanaged-operators.adoc[leveloffset=+1] - -[id="cluster-logging-support-must-gather_{context}"] -== Collecting logging data for Red{nbsp}Hat Support - -When opening a support case, it is helpful to provide debugging information about your cluster to Red{nbsp}Hat Support. - -You can use the xref:../../support/gathering-cluster-data.adoc#gathering-cluster-data[must-gather tool] to collect diagnostic information for project-level resources, cluster-level resources, and each of the {logging} components. -For prompt support, supply diagnostic information for both {product-title} and {logging}. - -include::modules/cluster-logging-must-gather-about.adoc[leveloffset=+2] -include::modules/cluster-logging-must-gather-collecting.adoc[leveloffset=+2] diff --git a/observability/logging/cluster-logging-uninstall.adoc b/observability/logging/cluster-logging-uninstall.adoc deleted file mode 100644 index 1f53a2c7eaa0..000000000000 --- a/observability/logging/cluster-logging-uninstall.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-uninstall -[id="cluster-logging-uninstall"] -= Uninstalling Logging -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can remove {logging} from your {product-title} cluster by removing installed Operators and related custom resources (CRs). - -include::modules/uninstall-cluster-logging-operator.adoc[leveloffset=+1] -include::modules/uninstall-logging-delete-pvcs.adoc[leveloffset=+1] -include::modules/uninstall-loki-operator.adoc[leveloffset=+1] -include::modules/uninstall-es-operator.adoc[leveloffset=+1] - -//Generic deleting operators from a cluster using CLI -include::modules/olm-deleting-operators-from-a-cluster-using-cli.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -ifdef::openshift-enterprise,openshift-origin[] -* xref:../../storage/understanding-persistent-storage.adoc#reclaim-manual_understanding-persistent-storage[Reclaiming a persistent volume manually] -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -* link:https://docs.openshift.com/container-platform/latest/storage/understanding-persistent-storage.html#reclaim-manual_understanding-persistent-storage[Reclaiming a persistent volume manually] -endif::[] diff --git a/observability/logging/cluster-logging-upgrading.adoc b/observability/logging/cluster-logging-upgrading.adoc deleted file mode 100644 index 3f01a00730b3..000000000000 --- a/observability/logging/cluster-logging-upgrading.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-upgrading -include::_attributes/common-attributes.adoc[] -[id="cluster-logging-upgrading"] -= Updating Logging - -toc::[] - -There are two types of {logging} updates: minor release updates (5.y.z) and major release updates (5.y). - -[id="cluster-logging-upgrading-minor"] -== Minor release updates - -If you installed the {logging} Operators using the *Automatic* update approval option, your Operators receive minor version updates automatically. You do not need to complete any manual update steps. - -If you installed the {logging} Operators using the *Manual* update approval option, you must manually approve minor version updates. For more information, see xref:../../operators/admin/olm-upgrading-operators.adoc#olm-approving-pending-upgrade_olm-upgrading-operators[Manually approving a pending Operator update]. - -[id="cluster-logging-upgrading-major"] -== Major release updates - -For major version updates you must complete some manual steps. - -For major release version compatibility and support information, see link:https://access.redhat.com/support/policy/updates/openshift_operators#platform-agnostic[OpenShift Operator Life Cycles]. - -include::modules/logging-operator-upgrading-all-ns.adoc[leveloffset=+1] - -include::modules/logging-upgrading-clo.adoc[leveloffset=+1] - -include::modules/logging-upgrading-loki.adoc[leveloffset=+1] - -include::modules/logging-upgrading-loki-schema.adoc[leveloffset=+1] - -include::modules/cluster-logging-upgrading-elasticsearch.adoc[leveloffset=+1] diff --git a/observability/logging/cluster-logging.adoc b/observability/logging/cluster-logging.adoc deleted file mode 100644 index 49e3049e6fde..000000000000 --- a/observability/logging/cluster-logging.adoc +++ /dev/null @@ -1,50 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="cluster-logging"] -= About Logging -:context: cluster-logging - -toc::[] - -As a cluster administrator, you can deploy {logging} on an {product-title} cluster, and use it to collect and aggregate node system audit logs, application container logs, and infrastructure logs. You can forward logs to your chosen log outputs, including on-cluster, Red{nbsp}Hat managed log storage. You can also visualize your log data in the {product-title} web console, or the Kibana web console, depending on your deployed log storage solution. - -include::snippets/logging-kibana-dep-snip.adoc[] - -{product-title} cluster administrators can deploy {logging} by using Operators. For information, see xref:../../observability/logging/cluster-logging-deploying.adoc#cluster-logging-deploying[Installing logging]. - -The Operators are responsible for deploying, upgrading, and maintaining {logging}. After the Operators are installed, you can create a `ClusterLogging` custom resource (CR) to schedule {logging} pods and other resources necessary to support {logging}. You can also create a `ClusterLogForwarder` CR to specify which logs are collected, how they are transformed, and where they are forwarded to. - -[NOTE] -==== -Because the internal {product-title} Elasticsearch log store does not provide secure storage for audit logs, audit logs are not stored in the internal Elasticsearch instance by default. If you want to send the audit logs to the default internal Elasticsearch log store, for example to view the audit logs in Kibana, you must use the Log Forwarding API as described in xref:../../observability/logging/log_storage/logging-config-es-store.adoc#cluster-logging-elasticsearch-audit_logging-config-es-store[Forward audit logs to the log store]. -==== - -include::modules/logging-architecture-overview.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../observability/logging/log_visualization/log-visualization-ocp-console.adoc#log-visualization-ocp-console[Log visualization with the web console] - -include::modules/cluster-logging-about.adoc[leveloffset=+1] - -ifdef::openshift-rosa,openshift-dedicated[] -include::modules/cluster-logging-cloudwatch.adoc[leveloffset=+1] -For information, see xref:../../observability/logging/log_collection_forwarding/log-forwarding.adoc#about-log-collection_log-forwarding[About log collection and forwarding]. -endif::[] - -include::modules/cluster-logging-json-logging-about.adoc[leveloffset=+2] - -include::modules/cluster-logging-collecting-storing-kubernetes-events.adoc[leveloffset=+2] - -For information, see xref:../../observability/logging/log_collection_forwarding/cluster-logging-eventrouter.adoc#cluster-logging-eventrouter[Collecting and storing Kubernetes events]. - -include::modules/cluster-logging-troubleshoot-logging.adoc[leveloffset=+2] - -include::modules/cluster-logging-export-fields.adoc[leveloffset=+2] - -For information, see xref:../../observability/logging/cluster-logging-exported-fields.adoc#cluster-logging-exported-fields[Log record fields]. - -include::modules/cluster-logging-eventrouter-about.adoc[leveloffset=+2] - -For information, see xref:../../observability/logging/log_collection_forwarding/cluster-logging-eventrouter.adoc#cluster-logging-eventrouter[Collecting and storing Kubernetes events]. diff --git a/observability/logging/config/_attributes b/observability/logging/config/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/observability/logging/config/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/observability/logging/config/cluster-logging-configuring.adoc b/observability/logging/config/cluster-logging-configuring.adoc deleted file mode 100644 index a19b38c7f7af..000000000000 --- a/observability/logging/config/cluster-logging-configuring.adoc +++ /dev/null @@ -1,81 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-configuring -[id="cluster-logging-configuring"] -= Configuring OpenShift Logging -include::_attributes/common-attributes.adoc[] - -toc::[] - -{logging-title-uc} is configurable using a `ClusterLogging` custom resource (CR) deployed -in the `openshift-logging` project. - -The {clo} watches for changes to `ClusterLogging` CR, -creates any missing logging components, and adjusts the logging environment accordingly. - -The `ClusterLogging` CR is based on the `ClusterLogging` custom resource definition (CRD), which defines a complete {logging} environment and includes all the components of the logging stack to collect, store and visualize logs. - -.Sample `ClusterLogging` custom resource (CR) -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - creationTimestamp: '2019-03-20T18:07:02Z' - generation: 1 - name: instance - namespace: openshift-logging -spec: - collection: - logs: - fluentd: - resources: null - type: fluentd - logStore: - elasticsearch: - nodeCount: 3 - redundancyPolicy: SingleRedundancy - resources: - limits: - cpu: - memory: - requests: - cpu: - memory: - storage: {} - type: elasticsearch - managementState: Managed - visualization: - kibana: - proxy: - resources: null - replicas: 1 - resources: null - type: kibana ----- -You can configure the following for {logging}: - -* You can overwrite the image for each {logging} component by modifying the appropriate -environment variable in the `cluster-logging-operator` Deployment. - -* You can specify specific nodes for the logging components using node selectors. - -//// -* You can specify the Log collectors to deploy to each node in a cluster, either Fluentd or Rsyslog. - -[IMPORTANT] -==== -The Rsyslog log collector is currently a Technology Preview feature. -==== -//// - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -// modules/cluster-logging-configuring-image-about.adoc[leveloffset=+1] - -[IMPORTANT] -==== -The logging routes are managed by the {clo} and cannot be modified by the user. -==== diff --git a/observability/logging/config/cluster-logging-memory.adoc b/observability/logging/config/cluster-logging-memory.adoc deleted file mode 100644 index b563f23c4b25..000000000000 --- a/observability/logging/config/cluster-logging-memory.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-memory -[id="cluster-logging-memory"] -= Configuring CPU and memory limits for {logging} components -include::_attributes/common-attributes.adoc[] - -toc::[] - - -You can configure both the CPU and memory limits for each of the {logging} components as needed. - - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - - -include::modules/cluster-logging-cpu-memory.adoc[leveloffset=+1] diff --git a/observability/logging/config/cluster-logging-systemd.adoc b/observability/logging/config/cluster-logging-systemd.adoc deleted file mode 100644 index d04520fa6022..000000000000 --- a/observability/logging/config/cluster-logging-systemd.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-systemd -[id="cluster-logging-systemd"] -= Configuring systemd-journald and Fluentd -include::_attributes/common-attributes.adoc[] - -toc::[] - -Because Fluentd reads from the journal, and the journal default settings are very low, journal entries can be lost because the journal cannot keep up with the logging rate from system services. - -We recommend setting `RateLimitIntervalSec=30s` and `RateLimitBurst=10000` (or even higher if necessary) to prevent the journal from losing entries. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - - -include::modules/cluster-logging-systemd-scaling.adoc[leveloffset=+1] diff --git a/observability/logging/config/images b/observability/logging/config/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/observability/logging/config/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/observability/logging/config/modules b/observability/logging/config/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/observability/logging/config/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/observability/logging/config/snippets b/observability/logging/config/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/observability/logging/config/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/observability/logging/dedicated-cluster-deploying.adoc b/observability/logging/dedicated-cluster-deploying.adoc deleted file mode 100644 index 7ead3ad9a37e..000000000000 --- a/observability/logging/dedicated-cluster-deploying.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: dedicated-cluster-deploying -[id="dedicated-cluster-deploying"] -= Installing the Red Hat OpenShift Logging Operator and OpenShift Elasticsearch Operator -include::_attributes/common-attributes.adoc[] - -toc::[] - -include::modules/dedicated-cluster-install-deploy.adoc[leveloffset=+1] diff --git a/observability/logging/dedicated-cluster-logging.adoc b/observability/logging/dedicated-cluster-logging.adoc deleted file mode 100644 index 4fb9aa183de7..000000000000 --- a/observability/logging/dedicated-cluster-logging.adoc +++ /dev/null @@ -1,63 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: dedicated-cluster-logging -[id="dedicated-cluster-logging"] -= Configuring {logging} -include::_attributes/common-attributes.adoc[] - -toc::[] - -As a cluster administrator, you can deploy the {logging} to aggregate logs for a range of services. - -{product-title} clusters can perform logging tasks using the OpenShift Elasticsearch Operator. - -The {logging} is configurable using a `ClusterLogging` custom resource (CR) -deployed in the `openshift-logging` project namespace. - -The Red Hat OpenShift Logging Operator watches for changes to `ClusterLogging` CR, creates -any missing logging components, and adjusts the logging environment accordingly. - -The `ClusterLogging` CR is based on the `ClusterLogging` custom resource -definition (CRD), which defines a complete OpenShift Logging environment and -includes all the components of the logging stack to collect, store, and visualize -logs. - -The `retentionPolicy` parameter in the `ClusterLogging` custom resource (CR) defines how long the internal Elasticsearch log store retains logs. - -.Sample `ClusterLogging` custom resource (CR) -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: "openshift-logging" -spec: - managementState: "Managed" - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - storage: - storageClassName: "gp2" - size: "200Gi" - redundancyPolicy: "SingleRedundancy" - nodeSelector: - node-role.kubernetes.io/worker: "" - resources: - limits: - memory: 16G - request: - memory: 16G - visualization: - type: "kibana" - kibana: - replicas: 1 - nodeSelector: - node-role.kubernetes.io/worker: "" - collection: - logs: - type: "fluentd" - fluentd: {} - nodeSelector: - node-role.kubernetes.io/worker: "" ----- diff --git a/observability/logging/log_collection_forwarding/_attributes b/observability/logging/log_collection_forwarding/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/observability/logging/log_collection_forwarding/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/observability/logging/log_collection_forwarding/cluster-logging-collector.adoc b/observability/logging/log_collection_forwarding/cluster-logging-collector.adoc deleted file mode 100644 index 638aeddc2a63..000000000000 --- a/observability/logging/log_collection_forwarding/cluster-logging-collector.adoc +++ /dev/null @@ -1,36 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-collector -[id="cluster-logging-collector"] -= Configuring the logging collector -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -toc::[] - -{logging-title-uc} collects operations and application logs from your cluster and enriches the data with Kubernetes pod and project metadata. -All supported modifications to the log collector can be performed though the `spec.collection` stanza in the `ClusterLogging` custom resource (CR). - -include::modules/configuring-logging-collector.adoc[leveloffset=+1] - -include::modules/creating-logfilesmetricexporter.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-limits.adoc[leveloffset=+1] - -[id="cluster-logging-collector-input-receivers"] -== Configuring input receivers - -The {clo} deploys a service for each configured input receiver so that clients can write to the collector. This service exposes the port specified for the input receiver. -The service name is generated based on the following: - -* For multi log forwarder `ClusterLogForwarder` CR deployments, the service name is in the format `-`. For example, `example-http-receiver`. -* For legacy `ClusterLogForwarder` CR deployments, meaning those named `instance` and located in the `openshift-logging` namespace, the service name is in the format `collector-`. For example, `collector-http-receiver`. - -include::modules/log-collector-http-server.adoc[leveloffset=+2] -//include::modules/log-collector-rsyslog-server.adoc[leveloffset=+2] -// uncomment for 5.9 release - -[role="_additional-resources"] -.Additional resources -* xref:../../../observability/logging/log_collection_forwarding/configuring-log-forwarding.adoc#logging-audit-filtering_configuring-log-forwarding[Overview of API audit filter] - -include::modules/cluster-logging-collector-tuning.adoc[leveloffset=+1] diff --git a/observability/logging/log_collection_forwarding/cluster-logging-enabling-json-logging.adoc b/observability/logging/log_collection_forwarding/cluster-logging-enabling-json-logging.adoc deleted file mode 100644 index 6cccc45412de..000000000000 --- a/observability/logging/log_collection_forwarding/cluster-logging-enabling-json-logging.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-enabling-json-logging -[id="cluster-logging-enabling-json-logging"] -= Enabling JSON log forwarding -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can configure the Log Forwarding API to parse JSON strings into a structured object. - -include::modules/cluster-logging-json-log-forwarding.adoc[leveloffset=+1] -include::modules/cluster-logging-configuration-of-json-log-data-for-default-elasticsearch.adoc[leveloffset=+1] -include::modules/cluster-logging-forwarding-json-logs-to-the-default-elasticsearch.adoc[leveloffset=+1] -include::modules/cluster-logging-forwarding-separate-indices.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../../observability/logging/log_collection_forwarding/log-forwarding.adoc#log-forwarding[About log forwarding] diff --git a/observability/logging/log_collection_forwarding/cluster-logging-eventrouter.adoc b/observability/logging/log_collection_forwarding/cluster-logging-eventrouter.adoc deleted file mode 100644 index 29074d707f53..000000000000 --- a/observability/logging/log_collection_forwarding/cluster-logging-eventrouter.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-eventrouter -[id="cluster-logging-eventrouter"] -= Collecting and storing Kubernetes events -include::_attributes/common-attributes.adoc[] - -toc::[] - -The {product-title} Event Router is a pod that watches Kubernetes events and logs them for collection by the {logging}. You must manually deploy the Event Router. - -The Event Router collects events from all projects and writes them to `STDOUT`. The collector then forwards those events to the store defined in the `ClusterLogForwarder` custom resource (CR). - -[IMPORTANT] -==== -The Event Router adds additional load to Fluentd and can impact the number of other log messages that can be processed. -==== - -include::modules/cluster-logging-eventrouter-deploy.adoc[leveloffset=+1] diff --git a/observability/logging/log_collection_forwarding/configuring-log-forwarding.adoc b/observability/logging/log_collection_forwarding/configuring-log-forwarding.adoc deleted file mode 100644 index 4547ddf00adf..000000000000 --- a/observability/logging/log_collection_forwarding/configuring-log-forwarding.adoc +++ /dev/null @@ -1,77 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="configuring-log-forwarding"] -= Configuring log forwarding -:context: configuring-log-forwarding - -toc::[] - -include::snippets/audit-logs-default.adoc[] - -include::modules/cluster-logging-collector-log-forwarding-about.adoc[leveloffset=+1] - -include::modules/logging-create-clf.adoc[leveloffset=+1] - -include::modules/logging-delivery-tuning.adoc[leveloffset=+1] - -include::modules/logging-multiline-except.adoc[leveloffset=+1] - -ifndef::openshift-rosa[] -include::modules/cluster-logging-collector-log-forward-gcp.adoc[leveloffset=+1] -endif::openshift-rosa[] - -include::modules/logging-forward-splunk.adoc[leveloffset=+1] - -include::modules/logging-http-forward.adoc[leveloffset=+1] - -include::modules/logging-forwarding-azure.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-project.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc[leveloffset=+1] - -include::modules/logging-audit-log-filtering.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -ifdef::openshift-enterprise,openshift-origin[] -* xref:../../../networking/network_security/logging-network-security.adoc#logging-network-security[Logging network policy events][Logging for egress firewall and network policy rules] -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -* link:https://docs.openshift.com/container-platform/latest/networking/ovn_kubernetes_network_provider/logging-network-security.html#logging-network-security[Logging for egress firewall and network policy rules] -endif::[] - -include::modules/cluster-logging-collector-log-forward-loki.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* link:https://grafana.com/docs/loki/latest/configuration/[Configuring Loki server] - -include::modules/cluster-logging-collector-log-forward-es.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-fluentd.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-syslog.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-kafka.adoc[leveloffset=+1] - -// Cloudwatch docs -include::modules/cluster-logging-collector-log-forward-cloudwatch.adoc[leveloffset=+1] -include::modules/cluster-logging-collector-log-forward-secret-cloudwatch.adoc[leveloffset=+1] - -ifdef::openshift-rosa[] -include::modules/rosa-cluster-logging-collector-log-forward-sts-cloudwatch.adoc[leveloffset=+1] -endif::[] - -ifdef::openshift-enterprise,openshift-origin,openshift-dedicated[] -include::modules/cluster-logging-collector-log-forward-sts-cloudwatch.adoc[leveloffset=+1] -endif::[] - -[role="_additional-resources"] -.Additional resources -* link:https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html[AWS STS API Reference] -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -* xref:../../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[Cloud Credential Operator (CCO)] -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] diff --git a/observability/logging/log_collection_forwarding/images b/observability/logging/log_collection_forwarding/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/observability/logging/log_collection_forwarding/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/observability/logging/log_collection_forwarding/log-forwarding.adoc b/observability/logging/log_collection_forwarding/log-forwarding.adoc deleted file mode 100644 index fa9bfdee0c9a..000000000000 --- a/observability/logging/log_collection_forwarding/log-forwarding.adoc +++ /dev/null @@ -1,49 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="log-forwarding"] -= About log collection and forwarding -:context: log-forwarding - -toc::[] - -The {clo} deploys a collector based on the `ClusterLogForwarder` resource specification. There are two collector options supported by this Operator: the legacy Fluentd collector, and the Vector collector. - -include::snippets/logging-fluentd-dep-snip.adoc[] - -include::modules/about-log-collection.adoc[leveloffset=+1] - -include::modules/logging-vector-fluentd-feature-comparison.adoc[leveloffset=+2] - -include::modules/log-forwarding-collector-outputs.adoc[leveloffset=+2] - -[id="log-forwarding-about-clf"] -== Log forwarding - -Administrators can create `ClusterLogForwarder` resources that specify which logs are collected, how they are transformed, and where they are forwarded to. - -`ClusterLogForwarder` resources can be used up to forward container, infrastructure, and audit logs to specific endpoints within or outside of a cluster. Transport Layer Security (TLS) is supported so that log forwarders can be configured to send logs securely. - -Administrators can also authorize RBAC permissions that define which service accounts and users can access and forward which types of logs. - -include::modules/log-forwarding-implementations.adoc[leveloffset=+2] - -[id="log-forwarding-enabling-multi-clf-feature"] -=== Enabling the multi log forwarder feature for a cluster - -To use the multi log forwarder feature, you must create a service account and cluster role bindings for that service account. You can then reference the service account in the `ClusterLogForwarder` resource to control access permissions. - -[IMPORTANT] -==== -In order to support multi log forwarding in additional namespaces other than the `openshift-logging` namespace, you must update the {clo} to watch all namespaces]. This functionality is supported by default in new {clo} version 5.8 installations. -==== - -include::modules/log-collection-rbac-permissions.adoc[leveloffset=+3] - -[role="_additional-resources"] -.Additional resources -ifdef::openshift-enterprise[] -* xref:../../../authentication/using-rbac.adoc#using-rbac[Using RBAC to define and apply permissions] -* xref:../../../authentication/using-service-accounts-in-applications.adoc#using-service-accounts-in-applications[Using service accounts in applications] -endif::[] -* link:https://kubernetes.io/docs/reference/access-authn-authz/rbac/[Using RBAC Authorization Kubernetes documentation] diff --git a/observability/logging/log_collection_forwarding/logging-output-types.adoc b/observability/logging/log_collection_forwarding/logging-output-types.adoc deleted file mode 100644 index 612a08d39867..000000000000 --- a/observability/logging/log_collection_forwarding/logging-output-types.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="logging-output-types"] -= Log output types -:context: logging-output-types - -toc::[] - -Outputs define the destination where logs are sent to from a log forwarder. You can configure multiple types of outputs in the `ClusterLogForwarder` custom resource (CR) to send logs to servers that support different protocols. - -include::modules/supported-log-outputs.adoc[leveloffset=+1] - -[id="logging-output-types-descriptions"] -== Output type descriptions - -`default`:: The on-cluster, Red{nbsp}Hat managed log store. You are not required to configure the default output. -+ -[NOTE] -==== -If you configure a `default` output, you receive an error message, because the `default` output name is reserved for referencing the on-cluster, Red{nbsp}Hat managed log store. -==== -`loki`:: Loki, a horizontally scalable, highly available, multi-tenant log aggregation system. -`kafka`:: A Kafka broker. The `kafka` output can use a TCP or TLS connection. -`elasticsearch`:: An external Elasticsearch instance. The `elasticsearch` output can use a TLS connection. -`fluentdForward`:: An external log aggregation solution that supports Fluentd. This option uses the Fluentd `forward` protocols. The `fluentForward` output can use a TCP or TLS connection and supports shared-key authentication by providing a `shared_key` field in a secret. Shared-key authentication can be used with or without TLS. -+ -[IMPORTANT] -==== -The `fluentdForward` output is only supported if you are using the Fluentd collector. It is not supported if you are using the Vector collector. If you are using the Vector collector, you can forward logs to Fluentd by using the `http` output. -==== -`syslog`:: An external log aggregation solution that supports the syslog link:https://tools.ietf.org/html/rfc3164[RFC3164] or link:https://tools.ietf.org/html/rfc5424[RFC5424] protocols. The `syslog` output can use a UDP, TCP, or TLS connection. -`cloudwatch`:: Amazon CloudWatch, a monitoring and log storage service hosted by Amazon Web Services (AWS). -`cloudlogging`:: {gcp-full} Logging, a monitoring and log storage service hosted by {gcp-first}. diff --git a/observability/logging/log_collection_forwarding/modules b/observability/logging/log_collection_forwarding/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/observability/logging/log_collection_forwarding/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/observability/logging/log_collection_forwarding/snippets b/observability/logging/log_collection_forwarding/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/observability/logging/log_collection_forwarding/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/observability/logging/log_storage/_attributes b/observability/logging/log_storage/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/observability/logging/log_storage/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/observability/logging/log_storage/about-log-storage.adoc b/observability/logging/log_storage/about-log-storage.adoc deleted file mode 100644 index e906087a714f..000000000000 --- a/observability/logging/log_storage/about-log-storage.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="about-log-storage"] -= About log storage -:context: about-log-storage - -toc::[] - -You can use an internal Loki or Elasticsearch log store on your cluster for storing logs, or you can use a xref:../../../observability/logging/log_collection_forwarding/configuring-log-forwarding.adoc#logging-create-clf_configuring-log-forwarding[`ClusterLogForwarder` custom resource (CR)] to forward logs to an external store. - -[id="log-storage-overview-types"] -== Log storage types - -include::snippets/logging-loki-statement-snip.adoc[] - -include::modules/cluster-logging-about-es-logstore.adoc[leveloffset=+2] - -[id="log-storage-overview-querying"] -== Querying log stores - -You can query Loki by using the link:https://grafana.com/docs/loki/latest/logql/[LogQL log query language]. - -[role="_additional-resources"] -[id="additional-resources_log-storage-overview"] -== Additional resources -* link:https://grafana.com/docs/loki/latest/get-started/components/[Loki components documentation] -* link:https://loki-operator.dev/docs/object_storage.md/[Loki Object Storage documentation] diff --git a/observability/logging/log_storage/cluster-logging-loki.adoc b/observability/logging/log_storage/cluster-logging-loki.adoc deleted file mode 100644 index 043db2797c21..000000000000 --- a/observability/logging/log_storage/cluster-logging-loki.adoc +++ /dev/null @@ -1,61 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-loki -[id="cluster-logging-loki"] -= Configuring the LokiStack log store -include::_attributes/common-attributes.adoc[] - -toc::[] - -In {logging} documentation, _LokiStack_ refers to the {logging} supported combination of Loki and web proxy with {product-title} authentication integration. LokiStack's proxy uses {product-title} authentication to enforce multi-tenancy. _Loki_ refers to the log store as either the individual component or an external store. - -include::modules/logging-creating-new-group-cluster-admin-user-role.adoc[leveloffset=+1] - -include::modules/logging-loki-restart-hardening.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* link:https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#pod-disruption-budgets[Pod disruption budgets Kubernetes documentation] - -include::modules/logging-loki-reliability-hardening.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#podantiaffinity-v1-core[`PodAntiAffinity` v1 core Kubernetes documentation] -* link:https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity[Assigning Pods to Nodes Kubernetes documentation] -* xref:../../../nodes/scheduling/nodes-scheduler-pod-affinity.adoc#nodes-scheduler-pod-affinity[Placing pods relative to other pods using affinity and anti-affinity rules] - -include::modules/logging-loki-zone-aware-rep.adoc[leveloffset=+1] - -include::modules/logging-loki-zone-fail-recovery.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* link:https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/#spread-constraint-definition[Topology spread constraints Kubernetes documentation] -* link:https://kubernetes.io/docs/setup/best-practices/multiple-zones/#storage-access-for-zones[Kubernetes storage documentation]. - -ifdef::openshift-enterprise[] -* xref:../../../nodes/scheduling/nodes-scheduler-pod-topology-spread-constraints.adoc#nodes-scheduler-pod-topology-spread-constraints-configuring[Controlling pod placement by using pod topology spread constraints] -endif::[] - -include::modules/logging-loki-log-access.adoc[leveloffset=+1,tag=!NetObservMode] - -[role="_additional-resources"] -.Additional resources - -ifdef::openshift-enterprise[] -* xref:../../../authentication/using-rbac.adoc#using-rbac[Using RBAC to define and apply permissions] -endif::[] - -include::modules/logging-loki-retention.adoc[leveloffset=+1] -include::modules/loki-rate-limit-errors.adoc[leveloffset=+1] -include::modules/logging-loki-memberlist-ip.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_cluster-logging-loki"] -== Additional resources -* link:https://grafana.com/docs/loki/latest/get-started/components/[Loki components documentation] -* link:https://grafana.com/docs/loki/latest/logql/[Loki Query Language (LogQL) documentation] -* link:https://loki-operator.dev/docs/howto_connect_grafana.md/[Grafana Dashboard documentation] -* link:https://loki-operator.dev/docs/object_storage.md/[Loki Object Storage documentation] -* link:https://loki-operator.dev/docs/api.md/#loki-grafana-com-v1-IngestionLimitSpec[{loki-op} `IngestionLimitSpec` documentation] -* link:https://grafana.com/docs/loki/latest/operations/storage/schema/#changing-the-schema[Loki Storage Schema documentation] diff --git a/observability/logging/log_storage/images b/observability/logging/log_storage/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/observability/logging/log_storage/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/observability/logging/log_storage/installing-log-storage.adoc b/observability/logging/log_storage/installing-log-storage.adoc deleted file mode 100644 index 9dfdf2129e5c..000000000000 --- a/observability/logging/log_storage/installing-log-storage.adoc +++ /dev/null @@ -1,77 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="installing-log-storage"] -= Installing log storage -:context: installing-log-storage - -toc::[] - -You can use the {oc-first} or the {product-title} web console to deploy a log store on your {product-title} cluster. - -include::snippets/logging-elastic-dep-snip.adoc[] - -[id="installing-log-storage-loki"] -== Deploying a Loki log store - -You can use the {loki-op} to deploy an internal Loki log store on your {product-title} cluster. -After install the {loki-op}, you must configure Loki object storage by creating a secret, and create a `LokiStack` custom resource (CR). - -include::modules/loki-deployment-sizing.adoc[leveloffset=+2] - -// Loki console install -include::modules/logging-loki-gui-install.adoc[leveloffset=+2] -include::modules/loki-create-object-storage-secret-console.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../../observability/logging/log_storage/installing-log-storage.adoc#logging-loki-storage_installing-log-storage[Loki object storage] - -ifdef::openshift-enterprise[] -[id="installing-log-storage-loki-sts"] -== Deploying a Loki log store on a cluster that uses short-term credentials - -For some storage providers, you can use the CCO utility (`ccoctl`) during installation to implement short-term credentials. These credentials are created and managed outside the {product-title} cluster. xref:../../../authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc#cco-short-term-creds[Manual mode with short-term credentials for components]. - -[NOTE] -==== -Short-term credential authentication must be configured during a new installation of {loki-op}, on a cluster that uses this credentials strategy. You cannot configure an existing cluster that uses a different credentials strategy to use this feature. -==== -endif::[] - -include::modules/logging-identity-federation.adoc[leveloffset=+2] - -include::modules/logging-create-loki-cr-console.adoc[leveloffset=+2,tag=!pre-5.9] - -// Loki CLI install -include::modules/logging-loki-cli-install.adoc[leveloffset=+2] -include::modules/loki-create-object-storage-secret-cli.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../../observability/logging/log_storage/installing-log-storage.adoc#logging-loki-storage_installing-log-storage[Loki object storage] - -include::modules/logging-create-loki-cr-cli.adoc[leveloffset=+2,tag=!pre-5.9] - -// Loki object storage -include::modules/logging-loki-storage.adoc[leveloffset=+1] - -// create object storage -include::modules/logging-loki-storage-aws.adoc[leveloffset=+2] -include::modules/logging-loki-storage-azure.adoc[leveloffset=+2] -include::modules/logging-loki-storage-gcp.adoc[leveloffset=+2] -include::modules/logging-loki-storage-minio.adoc[leveloffset=+2] -include::modules/logging-loki-storage-odf.adoc[leveloffset=+2] -include::modules/logging-loki-storage-swift.adoc[leveloffset=+2] - -[id="installing-log-storage-es"] -== Deploying an Elasticsearch log store - -You can use the {es-op} to deploy an internal Elasticsearch log store on your {product-title} cluster. - -include::snippets/logging-elastic-dep-snip.adoc[] -include::modules/logging-es-storage-considerations.adoc[leveloffset=+2] -include::modules/logging-install-es-operator.adoc[leveloffset=+2] -include::modules/cluster-logging-deploy-es-cli.adoc[leveloffset=+2] - -// configuring log store in the clusterlogging CR -include::modules/configuring-log-storage-cr.adoc[leveloffset=+1] diff --git a/observability/logging/log_storage/logging-config-es-store.adoc b/observability/logging/log_storage/logging-config-es-store.adoc deleted file mode 100644 index 4dad37a0f212..000000000000 --- a/observability/logging/log_storage/logging-config-es-store.adoc +++ /dev/null @@ -1,42 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="logging-config-es-store"] -= Configuring the Elasticsearch log store -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: logging-config-es-store - -toc::[] - -You can use Elasticsearch 6 to store and organize log data. - -You can make modifications to your log store, including: - -* Storage for your Elasticsearch cluster -* Shard replication across data nodes in the cluster, from full replication to no replication -* External access to Elasticsearch data - -include::modules/configuring-log-storage-cr.adoc[leveloffset=+1] - -include::modules/cluster-logging-elasticsearch-audit.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../../observability/logging/log_collection_forwarding/log-forwarding.adoc#log-forwarding[About log collection and forwarding] - -include::modules/cluster-logging-elasticsearch-retention.adoc[leveloffset=+1] - -include::modules/cluster-logging-logstore-limits.adoc[leveloffset=+1] - -include::modules/cluster-logging-elasticsearch-ha.adoc[leveloffset=+1] - -include::modules/cluster-logging-elasticsearch-scaledown.adoc[leveloffset=+1] - -include::modules/cluster-logging-elasticsearch-storage.adoc[leveloffset=+1] - -include::modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc[leveloffset=+1] - -include::modules/cluster-logging-manual-rollout-rolling.adoc[leveloffset=+1] - -include::modules/cluster-logging-elasticsearch-exposing.adoc[leveloffset=+1] - -include::modules/cluster-logging-removing-unused-components-if-no-elasticsearch.adoc[leveloffset=+1] diff --git a/observability/logging/log_storage/modules b/observability/logging/log_storage/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/observability/logging/log_storage/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/observability/logging/log_storage/snippets b/observability/logging/log_storage/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/observability/logging/log_storage/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/observability/logging/log_visualization/_attributes b/observability/logging/log_visualization/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/observability/logging/log_visualization/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/observability/logging/log_visualization/cluster-logging-dashboards.adoc b/observability/logging/log_visualization/cluster-logging-dashboards.adoc deleted file mode 100644 index a504a420e16d..000000000000 --- a/observability/logging/log_visualization/cluster-logging-dashboards.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="cluster-logging-dashboards"] -= Viewing cluster dashboards -:context: cluster-logging-dashboards - -toc::[] - -The *Logging/Elasticsearch Nodes* and *Openshift Logging* dashboards in the -ifndef::openshift-rosa,openshift-dedicated[] -{product-title} web console -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -{cluster-manager-url} -endif::[] -contain in-depth details about your Elasticsearch instance and the individual Elasticsearch nodes that you can use to prevent and diagnose problems. - -The *OpenShift Logging* dashboard contains charts that show details about your Elasticsearch instance at a cluster level, including cluster resources, garbage collection, shards in the cluster, and Fluentd statistics. - -The *Logging/Elasticsearch Nodes* dashboard contains charts that show details about your Elasticsearch instance, many at node level, including details on indexing, shards, resources, and so forth. - -include::modules/cluster-logging-dashboards-access.adoc[leveloffset=+1] - -For information on the dashboard charts, see xref:../../../observability/logging/log_visualization/cluster-logging-dashboards.adoc#cluster-logging-dashboards-logging_cluster-logging-dashboards[About the OpenShift Logging dashboard] and xref:../../../observability/logging/log_visualization/cluster-logging-dashboards.adoc#cluster-logging-dashboards-es_cluster-logging-dashboards[About the Logging/Elastisearch Nodes dashboard]. - -include::modules/cluster-logging-dashboards-logging.adoc[leveloffset=+1] -include::modules/cluster-logging-dashboards-es.adoc[leveloffset=+1] diff --git a/observability/logging/log_visualization/images b/observability/logging/log_visualization/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/observability/logging/log_visualization/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/observability/logging/log_visualization/log-visualization-ocp-console.adoc b/observability/logging/log_visualization/log-visualization-ocp-console.adoc deleted file mode 100644 index b826e724ed3c..000000000000 --- a/observability/logging/log_visualization/log-visualization-ocp-console.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="log-visualization-ocp-console"] -= Log visualization with the web console -:context: log-visualization-ocp-console - -toc::[] - -You can use the {product-title} web console to visualize log data by configuring the {log-plug}. Options for configuration are available during installation of {logging} on the web console. - -If you have already installed {logging} and want to configure the plugin, use one of the following procedures. - -include::modules/enabling-log-console-plugin.adoc[leveloffset=+1] -include::modules/logging-plugin-es-loki.adoc[leveloffset=+1] diff --git a/observability/logging/log_visualization/log-visualization.adoc b/observability/logging/log_visualization/log-visualization.adoc deleted file mode 100644 index 22513bbbe56c..000000000000 --- a/observability/logging/log_visualization/log-visualization.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="log-visualization"] -= About log visualization -:context: log-visualization - -toc::[] - -You can visualize your log data in the {product-title} web console, or the Kibana web console, depending on your deployed log storage solution. The Kibana console can be used with ElasticSearch log stores, and the {product-title} web console can be used with the ElasticSearch log store or the LokiStack. - -include::snippets/logging-kibana-dep-snip.adoc[] - -include::modules/configuring-log-visualizer.adoc[leveloffset=+1] - -[id="log-visualization-resource-logs"] -== Viewing logs for a resource - -Resource logs are a default feature that provides limited log viewing capability. You can view the logs for various resources, such as builds, deployments, and pods by using the {oc-first} and the web console. - -[TIP] -==== -To enhance your log retrieving and viewing experience, install the {logging}. The {logging} aggregates all the logs from your {product-title} cluster, such as node system audit logs, application container logs, and infrastructure logs, into a dedicated log store. You can then query, discover, and visualize your log data through the Kibana console or the {product-title} web console. Resource logs do not access the {logging} log store. -==== - -include::modules/viewing-resource-logs-cli-console.adoc[leveloffset=+2] diff --git a/observability/logging/log_visualization/logging-kibana.adoc b/observability/logging/log_visualization/logging-kibana.adoc deleted file mode 100644 index 27aaf807cabd..000000000000 --- a/observability/logging/log_visualization/logging-kibana.adoc +++ /dev/null @@ -1,34 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="logging-kibana"] -= Log visualization with Kibana -:context: logging-kibana - -toc::[] - -If you are using the ElasticSearch log store, you can use the Kibana console to visualize collected log data. - -Using Kibana, you can do the following with your data: - -* Search and browse the data using the *Discover* tab. -* Chart and map the data using the *Visualize* tab. -* Create and view custom dashboards using the *Dashboard* tab. - -Use and configuration of the Kibana interface is beyond the scope of this documentation. For more information about using the interface, see the link:https://www.elastic.co/guide/en/kibana/6.8/connect-to-elasticsearch.html[Kibana documentation]. - -[NOTE] -==== -The audit logs are not stored in the internal {product-title} Elasticsearch instance by default. To view the audit logs in Kibana, you must use the xref:../../../observability/logging/log_storage/logging-config-es-store.adoc#cluster-logging-elasticsearch-audit_logging-config-es-store[Log Forwarding API] to configure a pipeline that uses the `default` output for audit logs. -==== - -include::modules/cluster-logging-visualizer-indices.adoc[leveloffset=+1] -include::modules/cluster-logging-visualizer-kibana.adoc[leveloffset=+1] - -[id="logging-kibana-configuring"] -== Configuring Kibana - -You can configure using the Kibana console by modifying the `ClusterLogging` custom resource (CR). - -include::modules/cluster-logging-cpu-memory.adoc[leveloffset=+2] -include::modules/cluster-logging-kibana-scaling.adoc[leveloffset=+2] diff --git a/observability/logging/log_visualization/modules b/observability/logging/log_visualization/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/observability/logging/log_visualization/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/observability/logging/log_visualization/snippets b/observability/logging/log_visualization/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/observability/logging/log_visualization/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/observability/logging/logging-6.0/6x-cluster-logging-collector-6.0.adoc b/observability/logging/logging-6.0/6x-cluster-logging-collector-6.0.adoc deleted file mode 100644 index 3b9b49affb74..000000000000 --- a/observability/logging/logging-6.0/6x-cluster-logging-collector-6.0.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-collector-6-1 -[id="cluster-logging-collector-6-1"] -= Configuring the logging collector -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -toc::[] - -{logging-title-uc} collects operations and application logs from your cluster and enriches the data with Kubernetes pod and project metadata. -All supported modifications to the log collector are performed though the `spec.collection` stanza in the `ClusterLogForwarder` custom resource (CR). - -include::modules/log6x-creating-logfilesmetricexporter.adoc[leveloffset=+1] - -include::modules/log6x-cluster-logging-collector-limits.adoc[leveloffset=+1] - -[id="cluster-logging-collector-input-receivers_{context}"] -== Configuring input receivers - -The {clo} deploys a service for each configured input receiver so that clients can write to the collector. This service exposes the port specified for the input receiver. For log forwarder `ClusterLogForwarder` CR deployments, the service name is in the `-` format. - -include::modules/log6x-log-collector-http-server.adoc[leveloffset=+2] -include::modules/log6x-log-collector-syslog-server.adoc[leveloffset=+2] diff --git a/observability/logging/logging-6.0/_attributes b/observability/logging/logging-6.0/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/observability/logging/logging-6.0/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/observability/logging/logging-6.0/images b/observability/logging/logging-6.0/images deleted file mode 120000 index 4399cbb3c0f3..000000000000 --- a/observability/logging/logging-6.0/images +++ /dev/null @@ -1 +0,0 @@ -../../../images/ \ No newline at end of file diff --git a/observability/logging/logging-6.0/log60-cluster-logging-support.adoc b/observability/logging/logging-6.0/log60-cluster-logging-support.adoc deleted file mode 100644 index 82301f9832fa..000000000000 --- a/observability/logging/logging-6.0/log60-cluster-logging-support.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="log60-cluster-logging-support"] -= Support -include::_attributes/common-attributes.adoc[] -:context: log60-cluster-logging-support - -toc::[] - -include::snippets/logging-supported-config-snip.adoc[] -include::snippets/logging-compatibility-snip.adoc[] -include::snippets/log6x-loki-statement-snip.adoc[] - -{logging-uc} {for} is an opinionated collector and normalizer of application, infrastructure, and audit logs. It is intended to be used for forwarding logs to various supported systems. - -{logging-uc} is not: - -* A high scale log collection system -* Security Information and Event Monitoring (SIEM) compliant -* A "bring your own" (BYO) log collector configuration -* Historical or long term log retention or storage -* A guaranteed log sink -* Secure storage - audit logs are not stored by default - -[id="cluster-logging-support-CRDs_{context}"] -== Supported API custom resource definitions - -The following table describes the supported {logging-uc} APIs. - -include::snippets/log6x-api-support-states-snip.adoc[] - -include::modules/cluster-logging-maintenance-support-list-6x.adoc[leveloffset=+1] -include::modules/unmanaged-operators.adoc[leveloffset=+1] - -[id="support-exception-for-coo-logging-ui-plugin_{context}"] -== Support exception for the Logging UI Plugin - -Until the approaching General Availability (GA) release of the Cluster Observability Operator (COO), which is currently in link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview] (TP), Red{nbsp}Hat provides support to customers who are using Logging 6.0 or later with the COO for its Logging UI Plugin on {product-title} 4.14 or later. This support exception is temporary as the COO includes several independent features, some of which are still TP features, but the Logging UI Plugin is ready for GA. - -[id="cluster-logging-support-must-gather_{context}"] -== Collecting {logging} data for Red Hat Support - -When opening a support case, it is helpful to provide debugging information about your cluster to Red{nbsp}Hat Support. - -You can use the xref:../../../support/gathering-cluster-data.adoc#gathering-cluster-data[must-gather tool] to collect diagnostic information for project-level resources, cluster-level resources, and each of the {logging} components. -For prompt support, supply diagnostic information for both {product-title} and {logging}. - -include::modules/cluster-logging-must-gather-about.adoc[leveloffset=+2] -include::modules/cluster-logging-must-gather-collecting.adoc[leveloffset=+2] diff --git a/observability/logging/logging-6.0/log6x-about.adoc b/observability/logging/logging-6.0/log6x-about.adoc deleted file mode 100644 index a4680dd2b47d..000000000000 --- a/observability/logging/logging-6.0/log6x-about.adoc +++ /dev/null @@ -1,175 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="log6x-about"] -= Logging 6.0 -:context: logging-6x - -toc::[] - -The `ClusterLogForwarder` custom resource (CR) is the central configuration point for log collection and forwarding. - -== Inputs and Outputs - -Inputs specify the sources of logs to be forwarded. Logging provides the following built-in input types that select logs from different parts of your cluster: - -* `application` -* `receiver` -* `infrastructure` -* `audit` - -You can also define custom inputs based on namespaces or pod labels to fine-tune log selection. - -Outputs define the destinations where logs are sent. Each output type has its own set of configuration options, allowing you to customize the behavior and authentication settings. - - -== Receiver Input Type -The receiver input type enables the Logging system to accept logs from external sources. It supports two formats for receiving logs: `http` and `syslog`. - -The `ReceiverSpec` field defines the configuration for a receiver input. - -== Pipelines and Filters - -Pipelines determine the flow of logs from inputs to outputs. A pipeline consists of one or more input refs, output refs, and optional filter refs. You can use filters to transform or drop log messages within a pipeline. The order of filters matters, as they are applied sequentially, and earlier filters can prevent log messages from reaching later stages. - -== Operator Behavior - -The Cluster Logging Operator manages the deployment and configuration of the collector based on the `managementState` field: - -- When set to `Managed` (default), the Operator actively manages the logging resources to match the configuration defined in the spec. -- When set to `Unmanaged`, the Operator does not take any action, allowing you to manually manage the logging components. - -== Validation -Logging includes extensive validation rules and default values to ensure a smooth and error-free configuration experience. The `ClusterLogForwarder` resource enforces validation checks on required fields, dependencies between fields, and the format of input values. Default values are provided for certain fields, reducing the need for explicit configuration in common scenarios. - -== Quick Start - -.Prerequisites -* You have access to an {product-title} cluster with `cluster-admin` permissions. -* You installed the {oc-first}. -* You have access to a supported object store. For example, AWS S3, {gcp-full} Storage, {azure-short}, Swift, Minio, or {rh-storage}. - -.Procedure - -. Install the `{clo}`, `{loki-op}`, and `{coo-first}` from the software catalog. - -. Create a secret to access an existing object storage bucket: -+ -.Example command for AWS -[source,terminal,subs="+quotes"] ----- -$ oc create secret generic logging-loki-s3 \ - --from-literal=bucketnames="" \ - --from-literal=endpoint="" \ - --from-literal=access_key_id="" \ - --from-literal=access_key_secret="" \ - --from-literal=region="" \ - -n openshift-logging ----- - -. Create a `LokiStack` custom resource (CR) in the `openshift-logging` namespace: -+ -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: - managementState: Managed - size: 1x.extra-small - storage: - schemas: - - effectiveDate: '2022-06-01' - version: v13 - secret: - name: logging-loki-s3 - type: s3 - storageClassName: gp3-csi - tenants: - mode: openshift-logging ----- - -. Create a service account for the collector: -+ -[source,shell] ----- -$ oc create sa collector -n openshift-logging ----- - -. Bind the `ClusterRole` to the service account: -+ -[source,shell] ----- -$ oc adm policy add-cluster-role-to-user logging-collector-logs-writer -z collector -n openshift-logging ----- - -. Create a `UIPlugin` to enable the Log section in the Observe tab: -+ -[source,yaml] ----- -apiVersion: observability.openshift.io/v1alpha1 -kind: UIPlugin -metadata: - name: logging -spec: - type: Logging - logging: - lokiStack: - name: logging-loki ----- - -. Add additional roles to the collector service account: -+ -[source,shell] ----- -$ oc adm policy add-cluster-role-to-user collect-application-logs -z collector -n openshift-logging ----- -+ -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user collect-audit-logs -z collector -n openshift-logging ----- -+ -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user collect-infrastructure-logs -z collector -n openshift-logging ----- - -. Create a `ClusterLogForwarder` CR to configure log forwarding: -+ -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: collector - namespace: openshift-logging -spec: - serviceAccount: - name: collector - outputs: - - name: default-lokistack - type: lokiStack - lokiStack: - target: - name: logging-loki - namespace: openshift-logging - authentication: - token: - from: serviceAccount - tls: - ca: - key: service-ca.crt - configMapName: openshift-service-ca.crt - pipelines: - - name: default-logstore - inputRefs: - - application - - infrastructure - outputRefs: - - default-lokistack ----- - -.Verification -* Verify that logs are visible in the *Log* section of the *Observe* tab in the {product-title} web console. diff --git a/observability/logging/logging-6.0/log6x-clf.adoc b/observability/logging/logging-6.0/log6x-clf.adoc deleted file mode 100644 index da6c54f99eb6..000000000000 --- a/observability/logging/logging-6.0/log6x-clf.adoc +++ /dev/null @@ -1,117 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="log6x-clf"] -= Configuring log forwarding -:context: logging-6x - -toc::[] - -The `ClusterLogForwarder` (CLF) allows users to configure forwarding of logs to various destinations. It provides a flexible way to select log messages from different sources, send them through a pipeline that can transform or filter them, and forward them to one or more outputs. - -.Key Functions of the ClusterLogForwarder -* Selects log messages using inputs -* Forwards logs to external destinations using outputs -* Filters, transforms, and drops log messages using filters -* Defines log forwarding pipelines connecting inputs, filters and outputs - -// need to verify if this is relevant still. -//include::modules/log6x-config-roles.adoc[leveloffset=+1] - -include::modules/log6x-collection-setup.adoc[leveloffset=+1] - -// OBSDOCS-1104 -== Modifying log level in collector - -To modify the log level in the collector, you can set the `observability.openshift.io/log-level` annotation to `trace`, `debug`, `info`, `warn`, `error`, and `off`. - -.Example log level annotation -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: collector - annotations: - observability.openshift.io/log-level: debug -# ... ----- - -== Managing the Operator - -The `ClusterLogForwarder` resource has a `managementState` field that controls whether the operator actively manages its resources or leaves them Unmanaged: - -Managed:: (default) The operator will drive the logging resources to match the desired state in the CLF spec. - -Unmanaged:: The operator will not take any action related to the logging components. - -This allows administrators to temporarily pause log forwarding by setting `managementState` to `Unmanaged`. - -== Structure of the ClusterLogForwarder - -The CLF has a `spec` section that contains the following key components: - -Inputs:: Select log messages to be forwarded. Built-in input types `application`, `infrastructure` and `audit` forward logs from different parts of the cluster. You can also define custom inputs. - -Outputs:: Define destinations to forward logs to. Each output has a unique name and type-specific configuration. - -Pipelines:: Define the path logs take from inputs, through filters, to outputs. Pipelines have a unique name and consist of a list of input, output and filter names. - -Filters:: Transform or drop log messages in the pipeline. Users can define filters that match certain log fields and drop or modify the messages. Filters are applied in the order specified in the pipeline. - -=== Inputs - -Inputs are configured in an array under `spec.inputs`. There are three built-in input types: - -application:: Selects logs from all application containers, excluding those in infrastructure namespaces. - -infrastructure:: Selects logs from nodes and from infrastructure components running in the following namespaces: -** `default` -** `kube` -** `openshift` -** Containing the `kube-` or `openshift-` prefix - -audit:: Selects logs from the OpenShift API server audit logs, Kubernetes API server audit logs, ovn audit logs, and node audit logs from auditd. - -Users can define custom inputs of type `application` that select logs from specific namespaces or using pod labels. - -=== Outputs - -Outputs are configured in an array under `spec.outputs`. Each output must have a unique name and a type. Supported types are: - -azureMonitor:: Forwards logs to Azure Monitor. -cloudwatch:: Forwards logs to AWS CloudWatch. -//elasticsearch:: Forwards logs to an external Elasticsearch instance. -googleCloudLogging:: Forwards logs to {gcp-full} Logging. -http:: Forwards logs to a generic HTTP endpoint. -kafka:: Forwards logs to a Kafka broker. -loki:: Forwards logs to a Loki logging backend. -lokistack:: Forwards logs to the logging supported combination of Loki and web proxy with {Product-Title} authentication integration. LokiStack's proxy uses {Product-Title} authentication to enforce multi-tenancy -otlp:: Forwards logs using the OpenTelemetry Protocol. -splunk:: Forwards logs to Splunk. -syslog:: Forwards logs to an external syslog server. - -Each output type has its own configuration fields. - -=== Pipelines - -Pipelines are configured in an array under `spec.pipelines`. Each pipeline must have a unique name and consists of: - -inputRefs:: Names of inputs whose logs should be forwarded to this pipeline. -outputRefs:: Names of outputs to send logs to. -filterRefs:: (optional) Names of filters to apply. - -The order of filterRefs matters, as they are applied sequentially. Earlier filters can drop messages that will not be processed by later filters. - -=== Filters - -Filters are configured in an array under `spec.filters`. They can match incoming log messages based on the value of structured fields and modify or drop them. - -Administrators can configure the following types of filters: - -include::modules/log6x-multiline-except.adoc[leveloffset=+2] -include::modules/log6x-content-filter-drop-records.adoc[leveloffset=+2] -include::modules/log6x-audit-log-filtering.adoc[leveloffset=+2] -include::modules/log6x-input-spec-filter-labels-expressions.adoc[leveloffset=+2] -include::modules/log6x-content-filter-prune-records.adoc[leveloffset=+2] -include::modules/log6x-input-spec-filter-audit-infrastructure.adoc[leveloffset=+1] -include::modules/log6x-input-spec-filter-namespace-container.adoc[leveloffset=+1] diff --git a/observability/logging/logging-6.0/log6x-loki.adoc b/observability/logging/logging-6.0/log6x-loki.adoc deleted file mode 100644 index 097c0f994e13..000000000000 --- a/observability/logging/logging-6.0/log6x-loki.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[leveloffset=+1] -[id="log6x-loki"] -= Storing logs with LokiStack -:context: logging-6x - -toc::[] - -You can configure a `LokiStack` CR to store application, audit, and infrastructure-related logs. - -[id="prerequisites_{context}"] -== Prerequisites - -* You have installed the {loki-op} by using the CLI or web console. -* You have a `serviceAccount` in the same namespace in which you create the `ClusterLogForwarder`. -* The `serviceAccount` is assigned `collect-audit-logs`, `collect-application-logs`, and `collect-infrastructure-logs` cluster roles. - -=== Core Setup and Configuration -*Role-based access controls, basic monitoring, and pod placement to deploy Loki.* - -include::modules/log6x-loki-sizing.adoc[leveloffset=+1] -include::modules/log6x-loki-rbac-rules-perms.adoc[leveloffset=+1] -include::modules/log6x-enabling-loki-alerts.adoc[leveloffset=+1] -include::modules/log6x-loki-memberlist-ip.adoc[leveloffset=+1] -include::modules/log6x-loki-retention.adoc[leveloffset=+1] -include::modules/log6x-loki-pod-placement.adoc[leveloffset=+1] - -=== Enhanced Reliability and Performance -*Configurations to ensure Loki’s reliability and efficiency in production.* - -include::modules/log6x-identity-federation.adoc[leveloffset=+1] -include::modules/log6x-loki-reliability-hardening.adoc[leveloffset=+1] -include::modules/log6x-loki-restart-hardening.adoc[leveloffset=+1] - -=== Advanced Deployment and Scalability -*Specialized configurations for high availability, scalability, and error handling.* - -include::modules/log6x-loki-zone-aware-rep.adoc[leveloffset=+1] -include::modules/log6x-loki-zone-fail-recovery.adoc[leveloffset=+1] -include::modules/log6x-loki-rate-limit-errors.adoc[leveloffset=+1] diff --git a/observability/logging/logging-6.0/log6x-meta-contributing.adoc b/observability/logging/logging-6.0/log6x-meta-contributing.adoc deleted file mode 100644 index 53067d40cf66..000000000000 --- a/observability/logging/logging-6.0/log6x-meta-contributing.adoc +++ /dev/null @@ -1,46 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="log6x-meta-contributing"] -= Contributing to logging documentation -:context: logging-6x - -[IMPORTANT] -==== -Do not include this file in the topic map. This is a guide meant for contributors, and is not intended to be published. -==== - -Logging consists of the Red Hat OpenShift Logging Operator (also known as the Cluster Logging Operator), and an accompanying log store Operator. Either the Loki Operator (current/future), or Elasticsearch (deprecated). Either vector (current/future) or fluentd (deprecated) handles log collection and aggregation. Operators use custom resources (CR) to manage applications and their components. High-level configuration and settings are provided by the user within a CR. The Operator translates high-level directives into low-level actions, based on best practices embedded within the Operator’s logic. A custom resource definition (CRD) defines a CR and lists all the configurations available to users of the Operator. Installing an Operator creates the CRDs, which are then used to generate CRs. - -== Operator CRs: -* `Red Hat OpenShift Logging Operator` -** (Deprecated) `ClusterLogging` (CL) - Deploys the collector and forwarder which currently are both implemented by a daemonset running on each node. -** `ClusterLogForwarder` (CLF) - Generates collector configuration to forward logs per user configuration. -* `Loki Operator`: -** `LokiStack` - Controls the Loki cluster as log store and the web proxy with {product-title} authentication integration to enforce multi-tenancy. -** `AlertingRule` - Alerting rules allow you to define alert conditions based on LogQL expressions. -** `RecordingRule` - Recording rules allow you to precompute frequently needed or computationally expensive expressions and save their result as a new set of time series. -** `RulerConfig` - The ruler API endpoints require to configure a backend object storage to store the recording rules and alerts. -* (Deprecated) `OpenShift Elasticsearch Operator` [Note: These CRs are generated and managed by the `ClusterLogging` Operator, manual changes cannot be made without being overwritten by the Operator.] -** `ElasticSearch` - Configure and deploy an Elasticsearch instance as the default log store. -** `Kibana` - Configure and deploy Kibana instance to search, query and view logs. - -== Underlying configuration(s): -* 5.0 - 5.4 -** Elasticsearch/Fluentd -* 5.5 - 5.9: [Note: features vary by version.] -** Elasticsearch/Fluentd -** Elasticsearch/Vector -** Loki/Fluentd -** Loki/Vector -* 6.0 -** Loki/Vector - -== Naming Conventions: -[May not be inclusive of all relevant modules.] -* 5.0 - 5.4 -** cluster-logging- -* 5.5 - 5.9 -** logging- -** loki-logging- -* 6.0 -** log6x- diff --git a/observability/logging/logging-6.0/log6x-release-notes.adoc b/observability/logging/logging-6.0/log6x-release-notes.adoc deleted file mode 100644 index e6046de999bc..000000000000 --- a/observability/logging/logging-6.0/log6x-release-notes.adoc +++ /dev/null @@ -1,81 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="log6x-release-notes"] -= Logging 6.0.0 -:context: logging-6x - -toc::[] - -This release includes link:https://access.redhat.com/errata/RHBA-2024:6693[{logging-uc} {for} Bug Fix Release 6.0.0] - -include::snippets/logging-compatibility-snip.adoc[] - -.Upstream component versions -[options="header"] -|=== - -| {logging} Version 6+| Component Version - -| Operator | `eventrouter` | `logfilemetricexporter` | `loki` | `lokistack-gateway` | `opa-openshift` | `vector` - -|6.0 | 0.4 | 1.1 | 3.1.0 | 0.1 | 0.1 | 0.37.1 - -|=== - -[id="log6x-release-notes-6-0-0-removal-notice"] -== Removal notice - -* With this release, {logging} no longer supports the `ClusterLogging.logging.openshift.io` and `ClusterLogForwarder.logging.openshift.io` custom resources. Refer to the product documentation for details on the replacement features. (link:https://issues.redhat.com/browse/LOG-5803[LOG-5803]) - -* With this release, {logging} no longer manages or deploys log storage (such as Elasticsearch), visualization (such as Kibana), or Fluentd-based log collectors. (link:https://issues.redhat.com/browse/LOG-5368[LOG-5368]) - -[NOTE] -==== -In order to continue to use Elasticsearch and Kibana managed by the elasticsearch-operator, the administrator must modify those object's ownerRefs before deleting the ClusterLogging resource. -==== - -[id="log6x-release-notes-6-0-0-enhancements"] -== New features and enhancements - -* This feature introduces a new architecture for {logging} {for} by shifting component responsibilities to their relevant Operators, such as for storage, visualization, and collection. It introduces the `ClusterLogForwarder.observability.openshift.io` API for log collection and forwarding. Support for the `ClusterLogging.logging.openshift.io` and `ClusterLogForwarder.logging.openshift.io` APIs, along with the Red Hat managed Elastic stack (Elasticsearch and Kibana), is removed. Users are encouraged to migrate to the Red Hat `LokiStack` for log storage. Existing managed Elasticsearch deployments can be used for a limited time. Automated migration for log collection is not provided, so administrators need to create a new ClusterLogForwarder.observability.openshift.io specification to replace their previous custom resources. Refer to the official product documentation for more details. (link:https://issues.redhat.com/browse/LOG-3493[LOG-3493]) - -* With this release, the responsibility for deploying the {logging} view plugin shifts from the {clo} to the {coo-first}. For new log storage installations that need visualization, the {coo-full} and the associated `UIPlugin` resource must be deployed. For more information, see xref:log6x-visual.adoc#log6x-visual[Visualization for logging]. (link:https://issues.redhat.com/browse/LOG-5461[LOG-5461]) -+ --- -include::snippets/logging-support-exception-for-cluster-observability-operator-due-to-logging-ui-plugin.adoc[] --- - -* This enhancement sets default requests and limits for Vector collector deployments' memory and CPU usage based on Vector documentation recommendations. (link:https://issues.redhat.com/browse/LOG-4745[LOG-4745]) - -* This enhancement updates Vector to align with the upstream version v0.37.1. (link:https://issues.redhat.com/browse/LOG-5296[LOG-5296]) - -* This enhancement introduces an alert that triggers when log collectors buffer logs to a node's file system and use over 15% of the available space, indicating potential back pressure issues. (link:https://issues.redhat.com/browse/LOG-5381[LOG-5381]) - -* This enhancement updates the selectors for all components to use common Kubernetes labels. (link:https://issues.redhat.com/browse/LOG-5906[LOG-5906]) - -* This enhancement changes the collector configuration to deploy as a ConfigMap instead of a secret, allowing users to view and edit the configuration when the ClusterLogForwarder is set to Unmanaged. (link:https://issues.redhat.com/browse/LOG-5599[LOG-5599]) - -* This enhancement adds the ability to configure the Vector collector log level using an annotation on the ClusterLogForwarder, with options including trace, debug, info, warn, error, or off. (link:https://issues.redhat.com/browse/LOG-5372[LOG-5372]) - -* This enhancement adds validation to reject configurations where Amazon CloudWatch outputs use multiple AWS roles, preventing incorrect log routing. (link:https://issues.redhat.com/browse/LOG-5640[LOG-5640]) -* This enhancement removes the Log Bytes Collected and Log Bytes Sent graphs from the metrics dashboard. (link:https://issues.redhat.com/browse/LOG-5964[LOG-5964]) - -* This enhancement updates the must-gather functionality to only capture information for inspecting Logging 6.0 components, including Vector deployments from ClusterLogForwarder.observability.openshift.io resources and the Red Hat managed LokiStack. (link:https://issues.redhat.com/browse/LOG-5949[LOG-5949]) - - -* This enhancement improves Azure storage secret validation by providing early warnings for specific error conditions. (link:https://issues.redhat.com/browse/LOG-4571[LOG-4571]) - -[id="log6x-release-notes-6-0-0-technology-preview-features"] -== Technology Preview features - -* This release introduces a Technology Preview feature for log forwarding using OpenTelemetry. A new output type,` OTLP`, allows sending JSON-encoded log records using the OpenTelemetry data model and resource semantic conventions. (link:https://issues.redhat.com/browse/LOG-4225[LOG-4225]) - -[id="log6x-release-notes-6-0-0-bug-fixes"] -== Bug fixes - -* Before this update, the `CollectorHighErrorRate` and `CollectorVeryHighErrorRate` alerts were still present. With this update, both alerts are removed in the {logging} 6.0 release but might return in a future release. (link:https://issues.redhat.com/browse/LOG-3432[LOG-3432]) - -[id="log6x-release-notes-6-0-0-CVEs"] -== CVEs - -* link:https://access.redhat.com/security/cve/CVE-2024-34397[CVE-2024-34397] diff --git a/observability/logging/logging-6.0/log6x-upgrading-to-6.adoc b/observability/logging/logging-6.0/log6x-upgrading-to-6.adoc deleted file mode 100644 index c403df7bc536..000000000000 --- a/observability/logging/logging-6.0/log6x-upgrading-to-6.adoc +++ /dev/null @@ -1,477 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="log6x-upgrading-to-6"] -= Upgrading to Logging 6.0 -:context: log6x - -toc::[] - -Logging v6.0 is a significant upgrade from previous releases, achieving several longstanding goals of Cluster Logging: - -* Introduction of distinct operators to manage logging components (e.g., collectors, storage, visualization). -* Removal of support for managed log storage and visualization based on Elastic products (i.e., Elasticsearch, Kibana). -* Deprecation of the Fluentd log collector implementation. -* Removal of support for `ClusterLogging.logging.openshift.io` and `ClusterLogForwarder.logging.openshift.io` resources. - -[NOTE] -==== -The *cluster-logging-operator* does not provide an automated upgrade process. -==== - -Given the various configurations for log collection, forwarding, and storage, no automated upgrade is provided by the *cluster-logging-operator*. This documentation assists administrators in converting existing `ClusterLogging.logging.openshift.io` and `ClusterLogForwarder.logging.openshift.io` specifications to the new API. Examples of migrated `ClusterLogForwarder.observability.openshift.io` resources for common use cases are included. - -include::modules/log6x-oc-explain.adoc[leveloffset=+1] - -== Log Storage - -The only managed log storage solution available in this release is a Lokistack, managed by the Loki Operator. This solution, previously available as the preferred alternative to the managed Elasticsearch offering, remains unchanged in its deployment process. - -[IMPORTANT] -==== -To continue using an existing Red Hat managed Elasticsearch or Kibana deployment provided by the Elasticsearch Operator, remove the owner references from the Elasticsearch resource named `elasticsearch`, and the Kibana resource named `kibana` in the `openshift-logging` namespace before removing the `ClusterLogging` resource named `instance` in the same namespace. -==== - - -. Temporarily set `ClusterLogging` resource to the `Unmanaged` state by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-logging patch clusterlogging/instance -p '{"spec":{"managementState": "Unmanaged"}}' --type=merge ----- - -. Remove the `ownerReferences` parameter from the `Elasticsearch` resource by running the following command: -+ -The following command ensures that `ClusterLogging` no longer owns the `Elasticsearch` resource. Updates to the `ClusterLogging` resource's `logStore` field will no longer affect the `Elasticsearch` resource. -+ -[source,terminal] ----- -$ oc -n openshift-logging patch elasticsearch/elasticsearch -p '{"metadata":{"ownerReferences": []}}' --type=merge ----- - -. Remove the `ownerReferences` parameter from the `Kibana` resource. -+ -The following command ensures that Cluster Logging no longer owns the `Kibana` resource. Updates to the `ClusterLogging` resource's `visualization` field will no longer affect the `Kibana` resource. -+ -[source,terminal] ----- -$ oc -n openshift-logging patch kibana/kibana -p '{"metadata":{"ownerReferences": []}}' --type=merge ----- - -. Set the `ClusterLogging` resource to the `Managed` state by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-logging patch clusterlogging/instance -p '{"spec":{"managementState": "Managed"}}' --type=merge ----- - -== Log Visualization -[subs="+quotes"] -The OpenShift console UI plugin for log visualization has been moved to the *cluster-observability-operator* from the *cluster-logging-operator*. -// Pending support statement. - - - -== Log Collection and Forwarding -// Can't link to github, need to figure a workaround. - -Log collection and forwarding configurations are now specified under the new link:https://github.com/openshift/cluster-logging-operator/blob/master/docs/reference/operator/api_observability_v1.adoc[API], part of the `observability.openshift.io` API group. The following sections highlight the differences from the old API resources. - -[NOTE] -==== -Vector is the only supported collector implementation. -==== - -== Management, Resource Allocation, and Workload Scheduling - -Configuration for management state (e.g., Managed, Unmanaged), resource requests and limits, tolerations, and node selection is now part of the new *ClusterLogForwarder* API. - -.Previous Configuration -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -spec: - managementState: "Managed" - collection: - resources: - limits: {} - requests: {} - nodeSelector: {} - tolerations: {} ----- - -.Current Configuration -[source,yaml] ----- -apiVersion: "observability.openshift.io/v1" -kind: ClusterLogForwarder -spec: - managementState: Managed - collector: - resources: - limits: {} - requests: {} - nodeSelector: {} - tolerations: {} ----- - -== Input Specifications - -The input specification is an optional part of the *ClusterLogForwarder* specification. Administrators can continue to use the predefined values of *application*, *infrastructure*, and *audit* to collect these sources. - -=== Application Inputs - -Namespace and container inclusions and exclusions have been consolidated into a single field. - -.5.9 Application Input with Namespace and Container Includes and Excludes -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -spec: - inputs: - - name: application-logs - type: application - application: - namespaces: - - foo - - bar - includes: - - namespace: my-important - container: main - excludes: - - container: too-verbose ----- - -.6.0 Application Input with Namespace and Container Includes and Excludes -[source,yaml] ----- -apiVersion: "observability.openshift.io/v1" -kind: ClusterLogForwarder -spec: - inputs: - - name: application-logs - type: application - application: - includes: - - namespace: foo - - namespace: bar - - namespace: my-important - container: main - excludes: - - container: too-verbose ----- - -[NOTE] -==== -*application*, *infrastructure*, and *audit* are reserved words and cannot be used as names when defining an input. -==== - -=== Input Receivers - -Changes to input receivers include: - -* Explicit configuration of the type at the receiver level. -* Port settings moved to the receiver level. - -.5.9 Input Receivers -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -spec: - inputs: - - name: an-http - receiver: - http: - port: 8443 - format: kubeAPIAudit - - name: a-syslog - receiver: - type: syslog - syslog: - port: 9442 ----- - -.6.0 Input Receivers -[source,yaml] ----- -apiVersion: "observability.openshift.io/v1" -kind: ClusterLogForwarder -spec: - inputs: - - name: an-http - type: receiver - receiver: - type: http - port: 8443 - http: - format: kubeAPIAudit - - name: a-syslog - type: receiver - receiver: - type: syslog - port: 9442 ----- - -== Output Specifications - -High-level changes to output specifications include: - -* URL settings moved to each output type specification. -* Tuning parameters moved to each output type specification. -* Separation of TLS configuration from authentication. -* Explicit configuration of keys and secret/configmap for TLS and authentication. - -== Secrets and TLS Configuration - -Secrets and TLS configurations are now separated into authentication and TLS configuration for each output. They must be explicitly defined in the specification rather than relying on administrators to define secrets with recognized keys. Upgrading TLS and authorization configurations requires administrators to understand previously recognized keys to continue using existing secrets. Examples in the following sections provide details on how to configure *ClusterLogForwarder* secrets to forward to existing Red Hat managed log storage solutions. - -== Red Hat Managed Elasticsearch - -.v5.9 Forwarding to Red Hat Managed Elasticsearch -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - name: instance - namespace: openshift-logging -spec: - logStore: - type: elasticsearch ----- - -.v6.0 Forwarding to Red Hat Managed Elasticsearch -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - serviceAccount: - name: - managementState: Managed - outputs: - - name: audit-elasticsearch - type: elasticsearch - elasticsearch: - url: https://elasticsearch:9200 - version: 6 - index: audit-write - tls: - ca: - key: ca-bundle.crt - secretName: collector - certificate: - key: tls.crt - secretName: collector - key: - key: tls.key - secretName: collector - - name: app-elasticsearch - type: elasticsearch - elasticsearch: - url: https://elasticsearch:9200 - version: 6 - index: app-write - tls: - ca: - key: ca-bundle.crt - secretName: collector - certificate: - key: tls.crt - secretName: collector - key: - key: tls.key - secretName: collector - - name: infra-elasticsearch - type: elasticsearch - elasticsearch: - url: https://elasticsearch:9200 - version: 6 - index: infra-write - tls: - ca: - key: ca-bundle.crt - secretName: collector - certificate: - key: tls.crt - secretName: collector - key: - key: tls.key - secretName: collector - pipelines: - - name: app - inputRefs: - - application - outputRefs: - - app-elasticsearch - - name: audit - inputRefs: - - audit - outputRefs: - - audit-elasticsearch - - name: infra - inputRefs: - - infrastructure - outputRefs: - - infra-elasticsearch ----- - -== Red Hat Managed LokiStack - -.v5.9 Forwarding to Red Hat Managed LokiStack -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - name: instance - namespace: openshift-logging -spec: - logStore: - type: lokistack - lokistack: - name: lokistack-dev ----- - -.v6.0 Forwarding to Red Hat Managed LokiStack -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - serviceAccount: - name: - outputs: - - name: default-lokistack - type: lokiStack - lokiStack: - target: - name: lokistack-dev - namespace: openshift-logging - authentication: - token: - from: serviceAccount - tls: - ca: - key: service-ca.crt - configMapName: openshift-service-ca.crt - pipelines: - - outputRefs: - - default-lokistack - - inputRefs: - - application - - infrastructure ----- - -== Filters and Pipeline Configuration - -Pipeline configurations now define only the routing of input sources to their output destinations, with any required transformations configured separately as filters. All attributes of pipelines from previous releases have been converted to filters in this release. Individual filters are defined in the `filters` specification and referenced by a pipeline. - -.5.9 Filters -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -spec: - pipelines: - - name: application-logs - parse: json - labels: - foo: bar - detectMultilineErrors: true ----- - -.6.0 Filter Configuration -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -spec: - filters: - - name: detectexception - type: detectMultilineException - - name: parse-json - type: parse - - name: labels - type: openshiftLabels - openshiftLabels: - foo: bar - pipelines: - - name: application-logs - filterRefs: - - detectexception - - labels - - parse-json ----- - -== Validation and Status - -Most validations are enforced when a resource is created or updated, providing immediate feedback. This is a departure from previous releases, where validation occurred post-creation and required inspecting the resource status. Some validation still occurs post-creation for cases where it is not possible to validate at creation or update time. - -Instances of the `ClusterLogForwarder.observability.openshift.io` must satisfy the following conditions before the operator will deploy the log collector: Authorized, Valid, Ready. An example of these conditions is: - -.6.0 Status Conditions -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -status: - conditions: - - lastTransitionTime: "2024-09-13T03:28:44Z" - message: 'permitted to collect log types: [application]' - reason: ClusterRolesExist - status: "True" - type: observability.openshift.io/Authorized - - lastTransitionTime: "2024-09-13T12:16:45Z" - message: "" - reason: ValidationSuccess - status: "True" - type: observability.openshift.io/Valid - - lastTransitionTime: "2024-09-13T12:16:45Z" - message: "" - reason: ReconciliationComplete - status: "True" - type: Ready - filterConditions: - - lastTransitionTime: "2024-09-13T13:02:59Z" - message: filter "detectexception" is valid - reason: ValidationSuccess - status: "True" - type: observability.openshift.io/ValidFilter-detectexception - - lastTransitionTime: "2024-09-13T13:02:59Z" - message: filter "parse-json" is valid - reason: ValidationSuccess - status: "True" - type: observability.openshift.io/ValidFilter-parse-json - inputConditions: - - lastTransitionTime: "2024-09-13T12:23:03Z" - message: input "application1" is valid - reason: ValidationSuccess - status: "True" - type: observability.openshift.io/ValidInput-application1 - outputConditions: - - lastTransitionTime: "2024-09-13T13:02:59Z" - message: output "default-lokistack-application1" is valid - reason: ValidationSuccess - status: "True" - type: observability.openshift.io/ValidOutput-default-lokistack-application1 - pipelineConditions: - - lastTransitionTime: "2024-09-13T03:28:44Z" - message: pipeline "default-before" is valid - reason: ValidationSuccess - status: "True" - type: observability.openshift.io/ValidPipeline-default-before ----- - -[NOTE] -==== -Conditions that are satisfied and applicable have a "status" value of "True". Conditions with a status other than "True" provide a reason and a message explaining the issue. -==== diff --git a/observability/logging/logging-6.0/log6x-visual.adoc b/observability/logging/logging-6.0/log6x-visual.adoc deleted file mode 100644 index f24f28c7fe0a..000000000000 --- a/observability/logging/logging-6.0/log6x-visual.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="log6x-visual"] -= Visualization for logging -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: logging-6x - -toc::[] - -ifndef::openshift-rosa,openshift-rosa-hcp[] -Visualization for logging is provided by deploying the link:https://docs.redhat.com/en/documentation/red_hat_openshift_cluster_observability_operator/1-latest/html/ui_plugins_for_red_hat_openshift_cluster_observability_operator/logging-ui-plugin#coo-logging-ui-plugin-install_logging-ui-plugin[Logging UI Plugin] of the link:https://docs.redhat.com/en/documentation/red_hat_openshift_cluster_observability_operator/1-latest/html/about_red_hat_openshift_cluster_observability_operator/cluster-observability-operator-overview-1[Cluster Observability Operator], which requires Operator installation. -endif::openshift-rosa,openshift-rosa-hcp[] -ifdef::openshift-rosa,openshift-rosa-hcp[] -Visualization for logging is provided by deploying the Logging UI Plugin of the Cluster Observability Operator, which requires Operator installation. -endif::openshift-rosa,openshift-rosa-hcp[] - -include::snippets/logging-support-exception-for-cluster-observability-operator-due-to-logging-ui-plugin.adoc[] diff --git a/observability/logging/logging-6.0/modules b/observability/logging/logging-6.0/modules deleted file mode 120000 index 7e8b50bee77a..000000000000 --- a/observability/logging/logging-6.0/modules +++ /dev/null @@ -1 +0,0 @@ -../../../modules/ \ No newline at end of file diff --git a/observability/logging/logging-6.0/snippets b/observability/logging/logging-6.0/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/observability/logging/logging-6.0/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/observability/logging/logging-6.1/6x-cluster-logging-collector-6.1.adoc b/observability/logging/logging-6.1/6x-cluster-logging-collector-6.1.adoc deleted file mode 100644 index 4fdb50a4d4a3..000000000000 --- a/observability/logging/logging-6.1/6x-cluster-logging-collector-6.1.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-collector-6-0 -[id="cluster-logging-collector-6-0"] -= Configuring the logging collector -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -toc::[] - -{logging-title-uc} collects operations and application logs from your cluster and enriches the data with Kubernetes pod and project metadata. -All supported modifications to the log collector are performed though the `spec.collection` stanza in the `ClusterLogForwarder` custom resource (CR). - -include::modules/log6x-creating-logfilesmetricexporter.adoc[leveloffset=+1] - -include::modules/log6x-cluster-logging-collector-limits.adoc[leveloffset=+1] - -[id="cluster-logging-collector-input-receivers_{context}"] -== Configuring input receivers - -The {clo} deploys a service for each configured input receiver so that clients can write to the collector. This service exposes the port specified for the input receiver. For log forwarder `ClusterLogForwarder` CR deployments, the service name is in the `-` format. - -include::modules/log6x-log-collector-http-server.adoc[leveloffset=+2] -include::modules/log6x-log-collector-syslog-server.adoc[leveloffset=+2] diff --git a/observability/logging/logging-6.1/_attributes b/observability/logging/logging-6.1/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/observability/logging/logging-6.1/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/observability/logging/logging-6.1/images b/observability/logging/logging-6.1/images deleted file mode 120000 index 4399cbb3c0f3..000000000000 --- a/observability/logging/logging-6.1/images +++ /dev/null @@ -1 +0,0 @@ -../../../images/ \ No newline at end of file diff --git a/observability/logging/logging-6.1/log61-cluster-logging-support.adoc b/observability/logging/logging-6.1/log61-cluster-logging-support.adoc deleted file mode 100644 index b9436cded0ae..000000000000 --- a/observability/logging/logging-6.1/log61-cluster-logging-support.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="log61-cluster-logging-support"] -= Support -include::_attributes/common-attributes.adoc[] -:context: log61-cluster-logging-support - -toc::[] - -include::snippets/logging-supported-config-snip.adoc[] -include::snippets/logging-compatibility-snip.adoc[] -include::snippets/log6x-loki-statement-snip.adoc[] - -{logging-uc} {for} is an opinionated collector and normalizer of application, infrastructure, and audit logs. It is intended to be used for forwarding logs to various supported systems. - -{logging-uc} is not: - -* A high scale log collection system -* Security Information and Event Monitoring (SIEM) compliant -* A "bring your own" (BYO) log collector configuration -* Historical or long term log retention or storage -* A guaranteed log sink -* Secure storage - audit logs are not stored by default - -[id="cluster-logging-support-CRDs_{context}"] -== Supported API custom resource definitions - -The following table describes the supported {logging-uc} APIs. - -include::snippets/log6x-api-support-states-snip.adoc[] - -include::modules/cluster-logging-maintenance-support-list-6x.adoc[leveloffset=+1] -include::modules/unmanaged-operators.adoc[leveloffset=+1] - -[id="support-exception-for-coo-logging-ui-plugin_{context}"] -== Support exception for the Logging UI Plugin - -Until the approaching General Availability (GA) release of the Cluster Observability Operator (COO), which is currently in link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview] (TP), Red{nbsp}Hat provides support to customers who are using Logging 6.0 or later with the COO for its Logging UI Plugin on {product-title} 4.14 or later. This support exception is temporary as the COO includes several independent features, some of which are still TP features, but the Logging UI Plugin is ready for GA. - -[id="cluster-logging-support-must-gather_{context}"] -== Collecting {logging} data for Red Hat Support - -When opening a support case, it is helpful to provide debugging information about your cluster to Red{nbsp}Hat Support. - -You can use the xref:../../../support/gathering-cluster-data.adoc#gathering-cluster-data[must-gather tool] to collect diagnostic information for project-level resources, cluster-level resources, and each of the {logging} components. -For prompt support, supply diagnostic information for both {product-title} and {logging}. - -include::modules/cluster-logging-must-gather-about.adoc[leveloffset=+2] -include::modules/cluster-logging-must-gather-collecting.adoc[leveloffset=+2] diff --git a/observability/logging/logging-6.1/log6x-about-6.1.adoc b/observability/logging/logging-6.1/log6x-about-6.1.adoc deleted file mode 100644 index 184eb35b0053..000000000000 --- a/observability/logging/logging-6.1/log6x-about-6.1.adoc +++ /dev/null @@ -1,65 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="log6x-about-6-1"] -= Logging 6.1 -:context: logging-6x-6.1 - -toc::[] - -The `ClusterLogForwarder` custom resource (CR) is the central configuration point for log collection and forwarding. - -[id="inputs-and-outputs_6-1_{context}"] -== Inputs and outputs - -Inputs specify the sources of logs to be forwarded. Logging provides the following built-in input types that select logs from different parts of your cluster: - -* `application` -* `receiver` -* `infrastructure` -* `audit` - -You can also define custom inputs based on namespaces or pod labels to fine-tune log selection. - -Outputs define the destinations where logs are sent. Each output type has its own set of configuration options, allowing you to customize the behavior and authentication settings. - -[id="receiver-input-type_6-1_{context}"] -== Receiver input type -The receiver input type enables the Logging system to accept logs from external sources. It supports two formats for receiving logs: `http` and `syslog`. - -The `ReceiverSpec` field defines the configuration for a receiver input. - -[id="pipelines-and-filters_6-1_{context}"] -== Pipelines and filters - -Pipelines determine the flow of logs from inputs to outputs. A pipeline consists of one or more input refs, output refs, and optional filter refs. You can use filters to transform or drop log messages within a pipeline. The order of filters matters, as they are applied sequentially, and earlier filters can prevent log messages from reaching later stages. - -[id="operator-behavior_6-1_{context}"] -== Operator behavior - -The Cluster Logging Operator manages the deployment and configuration of the collector based on the `managementState` field of the `ClusterLogForwarder` resource: - -- When set to `Managed` (default), the Operator actively manages the logging resources to match the configuration defined in the spec. -- When set to `Unmanaged`, the Operator does not take any action, allowing you to manually manage the logging components. - -[id="validation_6-1_{context}"] -== Validation -Logging includes extensive validation rules and default values to ensure a smooth and error-free configuration experience. The `ClusterLogForwarder` resource enforces validation checks on required fields, dependencies between fields, and the format of input values. Default values are provided for certain fields, reducing the need for explicit configuration in common scenarios. - -[id="quick-start_6-1_{context}"] -== Quick start - -OpenShift Logging supports two data models: - -* ViaQ (General Availability) -* OpenTelemetry (Technology Preview) - -You can select either of these data models based on your requirement by configuring the `lokiStack.dataModel` field in the `ClusterLogForwarder`. ViaQ is the default data model when forwarding logs to LokiStack. - -[NOTE] -==== -In future releases of OpenShift Logging, the default data model will change from ViaQ to OpenTelemetry. -==== - -include::modules/log6x-quickstart-viaq.adoc[leveloffset=+2] - -include::modules/log6x-quickstart-opentelemetry.adoc[leveloffset=+2] diff --git a/observability/logging/logging-6.1/log6x-clf-6.1.adoc b/observability/logging/logging-6.1/log6x-clf-6.1.adoc deleted file mode 100644 index 062258d86765..000000000000 --- a/observability/logging/logging-6.1/log6x-clf-6.1.adoc +++ /dev/null @@ -1,122 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="log6x-clf-6-1"] -= Configuring log forwarding -:context: logging-6x-6.1 - -toc::[] - -The `ClusterLogForwarder` (CLF) allows users to configure forwarding of logs to various destinations. It provides a flexible way to select log messages from different sources, send them through a pipeline that can transform or filter them, and forward them to one or more outputs. - -.Key Functions of the ClusterLogForwarder -* Selects log messages using inputs -* Forwards logs to external destinations using outputs -* Filters, transforms, and drops log messages using filters -* Defines log forwarding pipelines connecting inputs, filters and outputs - -include::modules/log6x-collection-setup.adoc[leveloffset=+1] - -[id="modifying-log-level_6-1_{context}"] -== Modifying log level in collector - -To modify the log level in the collector, you can set the `observability.openshift.io/log-level` annotation to `trace`, `debug`, `info`, `warn`, `error`, and `off`. - -.Example log level annotation -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: collector - annotations: - observability.openshift.io/log-level: debug -# ... ----- - -[id="managing-the-operator_6-1_{context}"] -== Managing the Operator - -The `ClusterLogForwarder` resource has a `managementState` field that controls whether the operator actively manages its resources or leaves them Unmanaged: - -Managed:: (default) The operator will drive the logging resources to match the desired state in the CLF spec. - -Unmanaged:: The operator will not take any action related to the logging components. - -This allows administrators to temporarily pause log forwarding by setting `managementState` to `Unmanaged`. - -[id="clf-structure_6-1_{context}"] -== Structure of the ClusterLogForwarder - -The CLF has a `spec` section that contains the following key components: - -Inputs:: Select log messages to be forwarded. Built-in input types `application`, `infrastructure` and `audit` forward logs from different parts of the cluster. You can also define custom inputs. - -Outputs:: Define destinations to forward logs to. Each output has a unique name and type-specific configuration. - -Pipelines:: Define the path logs take from inputs, through filters, to outputs. Pipelines have a unique name and consist of a list of input, output and filter names. - -Filters:: Transform or drop log messages in the pipeline. Users can define filters that match certain log fields and drop or modify the messages. Filters are applied in the order specified in the pipeline. - -[id="clf-inputs_6-1_{context}"] -=== Inputs - -Inputs are configured in an array under `spec.inputs`. There are three built-in input types: - -application:: Selects logs from all application containers, excluding those in infrastructure namespaces. - -infrastructure:: Selects logs from nodes and from infrastructure components running in the following namespaces: -** `default` -** `kube` -** `openshift` -** Containing the `kube-` or `openshift-` prefix - -audit:: Selects logs from the OpenShift API server audit logs, Kubernetes API server audit logs, ovn audit logs, and node audit logs from auditd. - -Users can define custom inputs of type `application` that select logs from specific namespaces or using pod labels. - -[id="clf-outputs_6-1_{context}"] -=== Outputs - -Outputs are configured in an array under `spec.outputs`. Each output must have a unique name and a type. Supported types are: - -azureMonitor:: Forwards logs to Azure Monitor. -cloudwatch:: Forwards logs to AWS CloudWatch. -//elasticsearch:: Forwards logs to an external Elasticsearch instance. -googleCloudLogging:: Forwards logs to {gcp-full} Logging. -http:: Forwards logs to a generic HTTP endpoint. -kafka:: Forwards logs to a Kafka broker. -loki:: Forwards logs to a Loki logging backend. -lokistack:: Forwards logs to the logging supported combination of Loki and web proxy with {Product-Title} authentication integration. LokiStack's proxy uses {Product-Title} authentication to enforce multi-tenancy -otlp:: Forwards logs using the OpenTelemetry Protocol. -splunk:: Forwards logs to Splunk. -syslog:: Forwards logs to an external syslog server. - -Each output type has its own configuration fields. - -include::modules/log6x-configuring-otlp-output.adoc[leveloffset=+1] - -[id="clf-pipelines_6-1_{context}"] -=== Pipelines - -Pipelines are configured in an array under `spec.pipelines`. Each pipeline must have a unique name and consists of: - -inputRefs:: Names of inputs whose logs should be forwarded to this pipeline. -outputRefs:: Names of outputs to send logs to. -filterRefs:: (optional) Names of filters to apply. - -The order of filterRefs matters, as they are applied sequentially. Earlier filters can drop messages that will not be processed by later filters. - -[id="clf-filters_6-1_{context}"] -=== Filters - -Filters are configured in an array under `spec.filters`. They can match incoming log messages based on the value of structured fields and modify or drop them. - -Administrators can configure the following types of filters: - -include::modules/log6x-multiline-except.adoc[leveloffset=+2] -include::modules/log6x-content-filter-drop-records.adoc[leveloffset=+2] -include::modules/log6x-audit-log-filtering.adoc[leveloffset=+2] -include::modules/log6x-input-spec-filter-labels-expressions.adoc[leveloffset=+2] -include::modules/log6x-content-filter-prune-records.adoc[leveloffset=+2] -include::modules/log6x-input-spec-filter-audit-infrastructure.adoc[leveloffset=+1] -include::modules/log6x-input-spec-filter-namespace-container.adoc[leveloffset=+1] diff --git a/observability/logging/logging-6.1/log6x-configuring-lokistack-otlp-6.1.adoc b/observability/logging/logging-6.1/log6x-configuring-lokistack-otlp-6.1.adoc deleted file mode 100644 index 602dc6e7efdb..000000000000 --- a/observability/logging/logging-6.1/log6x-configuring-lokistack-otlp-6.1.adoc +++ /dev/null @@ -1,146 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="log6x-configuring-lokistack-otlp-6-1"] -= OTLP data ingestion in Loki -include::_attributes/common-attributes.adoc[] -:context: log6x-configuring-lokistack-otlp-6-1 - -toc::[] - -You can use an API endpoint by using the OpenTelemetry Protocol (OTLP) with Logging 6.1. As OTLP is a standardized format not specifically designed for Loki, OTLP requires an additional Loki configuration to map data format of OpenTelemetry to data model of Loki. OTLP lacks concepts such as _stream labels_ or _structured metadata_. Instead, OTLP provides metadata about log entries as *attributes*, grouped into the following three categories: - -* Resource -* Scope -* Log - -You can set metadata for multiple entries simultaneously or individually as needed. - -include::modules/log6x-configuring-lokistack-otlp-data-ingestion.adoc[leveloffset=+1] - -[id="attribute-mapping_{context}"] -== Attribute mapping - -When you set the {loki-op} to the `openshift-logging` mode, {loki-op} automatically applies a default set of attribute mappings. These mappings align specific OTLP attributes with stream labels and structured metadata of Loki. - -For typical setups, these default mappings are sufficient. However, you might need to customize attribute mapping in the following cases: - -* Using a custom collector: If your setup includes a custom collector that generates additional attributes, consider customizing the mapping to ensure these attributes are retained in Loki. -* Adjusting attribute detail levels: If the default attribute set is more detailed than necessary, you can reduce it to essential attributes only. This can avoid excessive data storage and streamline the {logging} process. - -[IMPORTANT] -==== -Attributes that are not mapped to either stream labels or structured metadata are not stored in Loki. -==== - -[id="custom-attribute-mapping-for-openshift_{context}"] -=== Custom attribute mapping for OpenShift -When using the {loki-op} in `openshift-logging` mode, attribute mapping follow OpenShift default values, but you can configure custom mappings to adjust default values. -In the `openshift-logging` mode, you can configure custom attribute mappings globally for all tenants or for individual tenants as needed. When you define custom mappings, they are appended to the OpenShift default values. If you do not need default labels, you can disable them in the tenant configuration. - -[NOTE] -==== -A major difference between the {loki-op} and Loki lies in inheritance handling. Loki copies only `default_resource_attributes_as_index_labels` to tenants by default, while the {loki-op} applies the entire global configuration to each tenant in the `openshift-logging` mode. -==== - -Within `LokiStack`, attribute mapping configuration is managed through the `limits` setting. See the following example `LokiStack` configuration: - -[source,yaml] ----- -# ... -spec: - limits: - global: - otlp: {} # <1> - tenants: - application: - otlp: {} # <2> ----- -<1> Defines global OTLP attribute configuration. -<2> OTLP attribute configuration for the `application` tenant within `openshift-logging` mode. - -[NOTE] -==== -Both global and per-tenant OTLP configurations can map attributes to stream labels or structured metadata. At least one stream label is required to save a log entry to Loki storage, so ensure this configuration meets that requirement. -==== - -Stream labels derive only from resource-level attributes, which the `LokiStack` resource structure reflects: - -[source,yaml] ----- -spec: - limits: - global: - otlp: - streamLabels: - resourceAttributes: - - name: "k8s.namespace.name" - - name: "k8s.pod.name" - - name: "k8s.container.name" ----- - -Structured metadata, in contrast, can be generated from resource, scope or log-level attributes: - -[source,yaml] ----- -# ... -spec: - limits: - global: - otlp: - streamLabels: -# ... - structuredMetadata: - resourceAttributes: - - name: "process.command_line" - - name: "k8s\\.pod\\.labels\\..+" - regex: true - scopeAttributes: - - name: "service.name" - logAttributes: - - name: "http.route" ----- - -[TIP] -==== -Use regular expressions by setting `regex: true` for attributes names when mapping similar attributes in Loki. -==== - -[IMPORTANT] -==== -Avoid using regular expressions for stream labels, as this can increase data volume. -==== - -[id="customizing-openshift-defaults_{context}"] -=== Customizing OpenShift defaults - -In `openshift-logging` mode, certain attributes are required and cannot be removed from the configuration due to their role in OpenShift functions. Other attributes, labeled *recommended*, might be disabled if performance is impacted. - -When using the `openshift-logging` mode without custom attributes, you can achieve immediate compatibility with OpenShift tools. If additional attributes are needed as stream labels or structured metadata, use custom configuration. Custom configurations can merge with default configurations. - -[id="removing-recommended-attributes_{context}"] -=== Removing recommended attributes - -To reduce default attributes in `openshift-logging` mode, disable recommended attributes: - -[source,yaml] ----- -# ... -spec: - tenants: - mode: openshift-logging - openshift: - otlp: - disableRecommendedAttributes: true # <1> ----- -<1> Set `disableRecommendedAttributes: true` to remove recommended attributes, which limits default attributes to the *required attributes*. - -[NOTE] -==== -This option is beneficial if the default attributes causes performance or storage issues. This setting might negatively impact query performance, as it removes default stream labels. You should pair this option with a custom attribute configuration to retain attributes essential for queries. -==== - -[role="_additional-resources"] -[id="additional-resources_{context}"] -== Additional resources -* link:https://grafana.com/docs/loki/latest/get-started/labels/[Loki labels] -* link:https://grafana.com/docs/loki/latest/get-started/labels/structured-metadata/[Structured metadata] -* link:https://opentelemetry.io/docs/specs/otel/common/#attribute[OpenTelemetry attribute] diff --git a/observability/logging/logging-6.1/log6x-loki-6.1.adoc b/observability/logging/logging-6.1/log6x-loki-6.1.adoc deleted file mode 100644 index b13e0aeaf7a9..000000000000 --- a/observability/logging/logging-6.1/log6x-loki-6.1.adoc +++ /dev/null @@ -1,46 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[leveloffset=+1] -[id="log6x-loki-6-1"] -= Storing logs with LokiStack -:context: log6x-loki-6.1 - -toc::[] - -You can configure a `LokiStack` CR to store application, audit, and infrastructure-related logs. - -include::snippets/log6x-loki-statement-snip.adoc[leveloffset=+1] - -include::modules/log6x-loki-sizing.adoc[leveloffset=+1] - -[id="prerequisites-6-1_{context}"] -== Prerequisites - -* You have installed the {loki-op} by using the CLI or web console. -* You have a `serviceAccount` in the same namespace in which you create the `ClusterLogForwarder`. -* The `serviceAccount` is assigned `collect-audit-logs`, `collect-application-logs`, and `collect-infrastructure-logs` cluster roles. - -[id="setup-6-1_{context}"] -== Core Setup and Configuration -*Role-based access controls, basic monitoring, and pod placement to deploy Loki.* - -include::modules/log6x-loki-rbac-rules-perms.adoc[leveloffset=+1] -include::modules/log6x-enabling-loki-alerts.adoc[leveloffset=+1] -include::modules/log6x-loki-memberlist-ip.adoc[leveloffset=+1] -include::modules/log6x-loki-retention.adoc[leveloffset=+1] -include::modules/log6x-loki-pod-placement.adoc[leveloffset=+1] - -[id="performance-6-1_{context}"] -== Enhanced Reliability and Performance -*Configurations to ensure Loki’s reliability and efficiency in production.* - -include::modules/log6x-identity-federation.adoc[leveloffset=+1] -include::modules/log6x-loki-reliability-hardening.adoc[leveloffset=+1] -include::modules/log6x-loki-restart-hardening.adoc[leveloffset=+1] - -[id="advanced-6-1_{context}"] -== Advanced Deployment and Scalability -*Specialized configurations for high availability, scalability, and error handling.* - -include::modules/log6x-loki-zone-aware-rep.adoc[leveloffset=+1] -include::modules/log6x-loki-zone-fail-recovery.adoc[leveloffset=+1] -include::modules/log6x-loki-rate-limit-errors.adoc[leveloffset=+1] diff --git a/observability/logging/logging-6.1/log6x-opentelemetry-data-model-6.1.adoc b/observability/logging/logging-6.1/log6x-opentelemetry-data-model-6.1.adoc deleted file mode 100644 index 44ae7b10deac..000000000000 --- a/observability/logging/logging-6.1/log6x-opentelemetry-data-model-6.1.adoc +++ /dev/null @@ -1,383 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="log6x-opentelemetry-data-model-6-1"] -= OpenTelemetry data model -include::_attributes/common-attributes.adoc[] -:context: log6x-opentelemetry-data-model-6-1 - -toc::[] - -This document outlines the protocol and semantic conventions {for} Logging's OpenTelemetry support with {logging-uc} 6.1. - -:FeatureName: The OpenTelemetry Protocol (OTLP) output log forwarder -include::snippets/technology-preview.adoc[] - -[id="forwarding-and-ingestion-protocol_{context}"] -== Forwarding and ingestion protocol - -Red Hat OpenShift {logging-uc} collects and forwards logs to OpenTelemetry endpoints using link:https://opentelemetry.io/docs/specs/otlp/[OTLP Specification]. OTLP encodes, transports, and delivers telemetry data. You can also deploy Loki storage, which provides an OTLP endpont to ingest log streams. This document defines the semantic conventions for the logs collected from various OpenShift cluster sources. - -[id="semantic-conventions_{context}"] -== Semantic conventions - -The log collector in this solution gathers the following log streams: - -* Container logs -* Cluster node journal logs -* Cluster node auditd logs -* Kubernetes and OpenShift API server logs -* OpenShift Virtual Network (OVN) logs - -You can forward these streams according to the semantic conventions defined by OpenTelemetry semantic attributes. The semantic conventions in OpenTelemetry define a resource as an immutable representation of the entity producing telemetry, identified by attributes. For example, a process running in a container includes attributes such as `container_name`, `cluster_id`, `pod_name`, `namespace`, and possibly `deployment` or `app_name`. These attributes are grouped under the resource object, which helps reduce repetition and optimizes log transmission as telemetry data. - -In addition to resource attributes, logs might also contain scope attributes specific to instrumentation libraries and log attributes specific to each log entry. These attributes provide greater detail about each log entry and enhance filtering capabilities when querying logs in storage. - -The following sections define the attributes that are generally forwarded. - -[id="log-entry-structure_{context}"] -=== Log entry structure - -All log streams include the following link:https://opentelemetry.io/docs/specs/otel/logs/data-model/#log-and-event-record-definition[log data] fields: - -The *Applicable Sources* column indicates which log sources each field applies to: - -* `all`: This field is present in all logs. -* `container`: This field is present in Kubernetes container logs, both application and infrastructure. -* `audit`: This field is present in Kubernetes, OpenShift API, and OVN logs. -* `auditd`: This field is present in node auditd logs. -* `journal`: This field is present in node journal logs. - -[cols="1,1,1", options="header"] -|=== -|Name |Applicable Sources |Comment - -|`body` -|all -| - -|`observedTimeUnixNano` -|all -| - -|`timeUnixNano` -|all -| - -|`severityText` -|container, journal -| - -|`attributes` -|all -|(Optional) Present when forwarding stream specific attributes -|=== - -[id="attributes_{context}"] -=== Attributes - -Log entries include a set of resource, scope, and log attributes based on their source, as described in the following table. - -The *Location* column specifies the type of attribute: - -* `resource`: Indicates a resource attribute -* `scope`: Indicates a scope attribute -* `log`: Indicates a log attribute - -The *Storage* column indicates whether the attribute is stored in a LokiStack using the default `openshift-logging` mode and specifies where the attribute is stored: - -* `stream label`: -** Enables efficient filtering and querying based on specific labels. -** Can be labeled as `required` if the {loki-op} enforces this attribute in the configuration. -* `structured metadata`: -** Allows for detailed filtering and storage of key-value pairs. -** Enables users to use direct labels for streamlined queries without requiring JSON parsing. - -With OTLP, users can filter queries directly by labels rather than using JSON parsing, improving the speed and efficiency of queries. - -[cols="1,1,1,1,1", options="header"] -|=== -|Name |Location |Applicable Sources |Storage (LokiStack) |Comment - -|`log_source` -|resource -|all -|required stream label -|*(DEPRECATED)* Compatibility attribute, contains same information as `openshift.log.source` - -|`log_type` -|resource -|all -|required stream label -|*(DEPRECATED)* Compatibility attribute, contains same information as `openshift.log.type` - -|`kubernetes.container_name` -|resource -|container -|stream label -|*(DEPRECATED)* Compatibility attribute, contains same information as `k8s.container.name` - -|`kubernetes.host` -|resource -|all -|stream label -|*(DEPRECATED)* Compatibility attribute, contains same information as `k8s.node.name` - -|`kubernetes.namespace_name` -|resource -|container -|required stream label -|*(DEPRECATED)* Compatibility attribute, contains same information as `k8s.namespace.name` - -|`kubernetes.pod_name` -|resource -|container -|stream label -|*(DEPRECATED)* Compatibility attribute, contains same information as `k8s.pod.name` - -|`openshift.cluster_id` -|resource -|all -| -|*(DEPRECATED)* Compatibility attribute, contains same information as `openshift.cluster.uid` - -|`level` -|log -|container, journal -| -|*(DEPRECATED)* Compatibility attribute, contains same information as `severityText` - -|`openshift.cluster.uid` -|resource -|all -|required stream label -| - -|`openshift.log.source` -|resource -|all -|required stream label -| - -|`openshift.log.type` -|resource -|all -|required stream label -| - -|`openshift.labels.*` -|resource -|all -|structured metadata -| - -|`k8s.node.name` -|resource -|all -|stream label -| - -|`k8s.namespace.name` -|resource -|container -|required stream label -| - -|`k8s.container.name` -|resource -|container -|stream label -| - -|`k8s.pod.labels.*` -|resource -|container -|structured metadata -| - -|`k8s.pod.name` -|resource -|container -|stream label -| - -|`k8s.pod.uid` -|resource -|container -|structured metadata -| - -|`k8s.cronjob.name` -|resource -|container -|stream label -|Conditionally forwarded based on creator of pod - -|`k8s.daemonset.name` -|resource -|container -|stream label -|Conditionally forwarded based on creator of pod - -|`k8s.deployment.name` -|resource -|container -|stream label -|Conditionally forwarded based on creator of pod - -|`k8s.job.name` -|resource -|container -|stream label -|Conditionally forwarded based on creator of pod - -|`k8s.replicaset.name` -|resource -|container -|structured metadata -|Conditionally forwarded based on creator of pod - -|`k8s.statefulset.name` -|resource -|container -|stream label -|Conditionally forwarded based on creator of pod - -|`log.iostream` -|log -|container -|structured metadata -| - -|`k8s.audit.event.level` -|log -|audit -|structured metadata -| - -|`k8s.audit.event.stage` -|log -|audit -|structured metadata -| - -|`k8s.audit.event.user_agent` -|log -|audit -|structured metadata -| - -|`k8s.audit.event.request.uri` -|log -|audit -|structured metadata -| - -|`k8s.audit.event.response.code` -|log -|audit -|structured metadata -| - -|`k8s.audit.event.annotation.*` -|log -|audit -|structured metadata -| - -|`k8s.audit.event.object_ref.resource` -|log -|audit -|structured metadata -| - -|`k8s.audit.event.object_ref.name` -|log -|audit -|structured metadata -| - -|`k8s.audit.event.object_ref.namespace` -|log -|audit -|structured metadata -| - -|`k8s.audit.event.object_ref.api_group` -|log -|audit -|structured metadata -| - -|`k8s.audit.event.object_ref.api_version` -|log -|audit -|structured metadata -| - -|`k8s.user.username` -|log -|audit -|structured metadata -| - -|`k8s.user.groups` -|log -|audit -|structured metadata -| - -|`process.executable.name` -|resource -|journal -|structured metadata -| - -|`process.executable.path` -|resource -|journal -|structured metadata -| - -|`process.command_line` -|resource -|journal -|structured metadata -| - -|`process.pid` -|resource -|journal -|structured metadata -| - -|`service.name` -|resource -|journal -|stream label -| - -|`systemd.t.*` -|log -|journal -|structured metadata -| - -|`systemd.u.*` -|log -|journal -|structured metadata -| -|=== - -[NOTE] -==== -Attributes marked as *Compatibility attribute* support minimal backward compatibility with the ViaQ data model. These attributes are deprecated and function as a compatibility layer to ensure continued UI functionality. These attributes will remain supported until the Logging UI fully supports the OpenTelemetry counterparts in future releases. -==== - -Loki changes the attribute names when persisting them to storage. The names will be lowercased, and all characters in the set: (`.`,`/`,`-`) will be replaced by underscores (`_`). For example, `k8s.namespace.name` will become `k8s_namespace_name`. - -[role="_additional-resources"] -[id="additional-resources_{context}"] -== Additional resources -* link:https://opentelemetry.io/docs/specs/semconv/[Semantic Conventions] -* link:https://opentelemetry.io/docs/specs/otel/logs/data-model/[Logs Data Model] -* link:https://opentelemetry.io/docs/specs/semconv/general/logs/[General Logs Attributes] diff --git a/observability/logging/logging-6.1/log6x-release-notes-6.1.adoc b/observability/logging/logging-6.1/log6x-release-notes-6.1.adoc deleted file mode 100644 index b865c4e295d5..000000000000 --- a/observability/logging/logging-6.1/log6x-release-notes-6.1.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="log6x-release-notes-6-1"] -= Logging 6.1 -:context: logging-6x-6.1 - -toc::[] - -include::modules/log6x-6-1-2-rn.adoc[leveloffset=+1] - -include::modules/log6x-6-1-1-rn.adoc[leveloffset=+1] - -include::modules/log6x-6-1-0-rn.adoc[leveloffset=+1] diff --git a/observability/logging/logging-6.1/log6x-visual-6.1.adoc b/observability/logging/logging-6.1/log6x-visual-6.1.adoc deleted file mode 100644 index 3d66a2de6465..000000000000 --- a/observability/logging/logging-6.1/log6x-visual-6.1.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="log6x-visual-6-1"] -= Visualization for logging -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: logging-6x-6.1 - -toc::[] - -ifndef::openshift-rosa,openshift-rosa-hcp[] -Visualization for logging is provided by deploying the link:link:https://docs.redhat.com/en/documentation/red_hat_openshift_cluster_observability_operator/1-latest/html/ui_plugins_for_red_hat_openshift_cluster_observability_operator/logging-ui-plugin#coo-logging-ui-plugin-install_logging-ui-plugin[Logging UI Plugin] of the link:https://docs.redhat.com/en/documentation/red_hat_openshift_cluster_observability_operator/1-latest/html/about_red_hat_openshift_cluster_observability_operator/cluster-observability-operator-overview-1[Cluster Observability Operator], which requires Operator installation. -endif::openshift-rosa,openshift-rosa-hcp[] -ifdef::openshift-rosa,openshift-rosa-hcp[] -Visualization for logging is provided by deploying the Logging UI Plugin of the Cluster Observability Operator, which requires Operator installation. -endif::openshift-rosa,openshift-rosa-hcp[] - - -include::snippets/logging-support-exception-for-cluster-observability-operator-due-to-logging-ui-plugin.adoc[] diff --git a/observability/logging/logging-6.1/modules b/observability/logging/logging-6.1/modules deleted file mode 120000 index 7e8b50bee77a..000000000000 --- a/observability/logging/logging-6.1/modules +++ /dev/null @@ -1 +0,0 @@ -../../../modules/ \ No newline at end of file diff --git a/observability/logging/logging-6.1/snippets b/observability/logging/logging-6.1/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/observability/logging/logging-6.1/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/observability/logging/logging-6.2/6x-cluster-logging-collector-6.2.adoc b/observability/logging/logging-6.2/6x-cluster-logging-collector-6.2.adoc deleted file mode 100644 index f8014660fbd1..000000000000 --- a/observability/logging/logging-6.2/6x-cluster-logging-collector-6.2.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-collector-6-2 -[id="cluster-logging-collector-6-2"] -= Configuring the logging collector -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -toc::[] - -{logging-title-uc} collects operations and application logs from your cluster and enriches the data with Kubernetes pod and project metadata. -All supported modifications to the log collector are performed though the `spec.collection` stanza in the `ClusterLogForwarder` custom resource (CR). - -include::modules/log6x-creating-logfilesmetricexporter.adoc[leveloffset=+1] -include::modules/log6x-cluster-logging-collector-limits.adoc[leveloffset=+1] - -[id="cluster-logging-collector-input-receivers_{context}"] -== Configuring input receivers - -The {clo} deploys a service for each configured input receiver so that clients can write to the collector. This service exposes the port specified for the input receiver. For log forwarder `ClusterLogForwarder` CR deployments, the service name is in the `-` format. - -include::modules/log6x-log-collector-http-server.adoc[leveloffset=+2] -include::modules/log6x-log-collector-syslog-server.adoc[leveloffset=+2] diff --git a/observability/logging/logging-6.2/_attributes b/observability/logging/logging-6.2/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/observability/logging/logging-6.2/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/observability/logging/logging-6.2/images b/observability/logging/logging-6.2/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/observability/logging/logging-6.2/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/observability/logging/logging-6.2/log62-cluster-logging-support.adoc b/observability/logging/logging-6.2/log62-cluster-logging-support.adoc deleted file mode 100644 index 95e861138817..000000000000 --- a/observability/logging/logging-6.2/log62-cluster-logging-support.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="log62-cluster-logging-support"] -= Support -include::_attributes/common-attributes.adoc[] -:context: log62-cluster-logging-support - -toc::[] - -include::snippets/logging-supported-config-snip.adoc[] -include::snippets/logging-compatibility-snip.adoc[] -include::snippets/log6x-loki-statement-snip.adoc[] - -{logging-uc} {for} is an opinionated collector and normalizer of application, infrastructure, and audit logs. It is intended to be used for forwarding logs to various supported systems. - -{logging-uc} is not: - -* A high scale log collection system -* Security Information and Event Monitoring (SIEM) compliant -* A "bring your own" (BYO) log collector configuration -* Historical or long term log retention or storage -* A guaranteed log sink -* Secure storage - audit logs are not stored by default - -[id="cluster-logging-support-CRDs_{context}"] -== Supported API custom resource definitions - -The following table describes the supported {logging-uc} APIs. - -include::snippets/log6x-api-support-states-snip.adoc[] - -include::modules/cluster-logging-maintenance-support-list-6x.adoc[leveloffset=+1] -include::modules/unmanaged-operators.adoc[leveloffset=+1] - -[id="support-exception-for-coo-logging-ui-plugin_{context}"] -== Support exception for the Logging UI Plugin - -Until the approaching General Availability (GA) release of the Cluster Observability Operator (COO), which is currently in link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview] (TP), Red{nbsp}Hat provides support to customers who are using Logging 6.0 or later with the COO for its Logging UI Plugin on {product-title} 4.14 or later. This support exception is temporary as the COO includes several independent features, some of which are still TP features, but the Logging UI Plugin is ready for GA. - -[id="cluster-logging-support-must-gather_{context}"] -== Collecting {logging} data for Red Hat Support - -When opening a support case, it is helpful to provide debugging information about your cluster to Red{nbsp}Hat Support. - -You can use the xref:../../../support/gathering-cluster-data.adoc#gathering-cluster-data[must-gather tool] to collect diagnostic information for project-level resources, cluster-level resources, and each of the {logging} components. -For prompt support, supply diagnostic information for both {product-title} and {logging}. - -include::modules/cluster-logging-must-gather-about.adoc[leveloffset=+2] -include::modules/cluster-logging-must-gather-collecting.adoc[leveloffset=+2] diff --git a/observability/logging/logging-6.2/log6x-about-6.2.adoc b/observability/logging/logging-6.2/log6x-about-6.2.adoc deleted file mode 100644 index f75dedca4498..000000000000 --- a/observability/logging/logging-6.2/log6x-about-6.2.adoc +++ /dev/null @@ -1,66 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="log6x-about-6-2"] -= Logging 6.2 - -:context: logging-6x-6.2 - -toc::[] - -The `ClusterLogForwarder` custom resource (CR) is the central configuration point for log collection and forwarding. - -[id="inputs-and-outputs_6-2_{context}"] -== Inputs and outputs - -Inputs specify the sources of logs to be forwarded. Logging provides the following built-in input types that select logs from different parts of your cluster: - -* `application` -* `receiver` -* `infrastructure` -* `audit` - -You can also define custom inputs based on namespaces or pod labels to fine-tune log selection. - -Outputs define the destinations where logs are sent. Each output type has its own set of configuration options, allowing you to customize the behavior and authentication settings. - -[id="receiver-input-type_6-2_{context}"] -== Receiver input type -The receiver input type enables the Logging system to accept logs from external sources. It supports two formats for receiving logs: `http` and `syslog`. - -The `ReceiverSpec` field defines the configuration for a receiver input. - -[id="pipelines-and-filters_6-2_{context}"] -== Pipelines and filters - -Pipelines determine the flow of logs from inputs to outputs. A pipeline consists of one or more input refs, output refs, and optional filter refs. You can use filters to transform or drop log messages within a pipeline. The order of filters matters, as they are applied sequentially, and earlier filters can prevent log messages from reaching later stages. - -[id="operator-behavior_6-2_{context}"] -== Operator behavior - -The Cluster Logging Operator manages the deployment and configuration of the collector based on the `managementState` field of the `ClusterLogForwarder` resource: - -- When set to `Managed` (default), the Operator actively manages the logging resources to match the configuration defined in the spec. -- When set to `Unmanaged`, the Operator does not take any action, allowing you to manually manage the logging components. - -[id="validation_6-2_{context}"] -== Validation -Logging includes extensive validation rules and default values to ensure a smooth and error-free configuration experience. The `ClusterLogForwarder` resource enforces validation checks on required fields, dependencies between fields, and the format of input values. Default values are provided for certain fields, reducing the need for explicit configuration in common scenarios. - -[id="quick-start_6-2_{context}"] -== Quick start - -OpenShift Logging supports two data models: - -* ViaQ (General Availability) -* OpenTelemetry (Technology Preview) - -You can select either of these data models based on your requirement by configuring the `lokiStack.dataModel` field in the `ClusterLogForwarder`. ViaQ is the default data model when forwarding logs to LokiStack. - -[NOTE] -==== -In future releases of OpenShift Logging, the default data model will change from ViaQ to OpenTelemetry. -==== - -include::modules/log6x-quickstart-viaq.adoc[leveloffset=+2] - -include::modules/log6x-quickstart-opentelemetry.adoc[leveloffset=+2] diff --git a/observability/logging/logging-6.2/log6x-clf-6.2.adoc b/observability/logging/logging-6.2/log6x-clf-6.2.adoc deleted file mode 100644 index e888d39dfb1b..000000000000 --- a/observability/logging/logging-6.2/log6x-clf-6.2.adoc +++ /dev/null @@ -1,125 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="log6x-clf-6-2"] -= Configuring log forwarding -:context: logging-6x-6.2 - -toc::[] - -The `ClusterLogForwarder` (CLF) allows users to configure forwarding of logs to various destinations. It provides a flexible way to select log messages from different sources, send them through a pipeline that can transform or filter them, and forward them to one or more outputs. - -.Key Functions of the ClusterLogForwarder -* Selects log messages using inputs -* Forwards logs to external destinations using outputs -* Filters, transforms, and drops log messages using filters -* Defines log forwarding pipelines connecting inputs, filters and outputs - -include::modules/log6x-collection-setup.adoc[leveloffset=+1] - -[id="modifying-log-level_6-2_{context}"] -== Modifying log level in collector - -To modify the log level in the collector, you can set the `observability.openshift.io/log-level` annotation to `trace`, `debug`, `info`, `warn`, `error`, and `off`. - -.Example log level annotation -[source,yaml] ----- -apiVersion: observability.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: collector - annotations: - observability.openshift.io/log-level: debug -# ... ----- - -[id="managing-the-operator_6-2_{context}"] -== Managing the Operator - -The `ClusterLogForwarder` resource has a `managementState` field that controls whether the operator actively manages its resources or leaves them Unmanaged: - -Managed:: (default) The operator will drive the logging resources to match the desired state in the CLF spec. - -Unmanaged:: The operator will not take any action related to the logging components. - -This allows administrators to temporarily pause log forwarding by setting `managementState` to `Unmanaged`. - -[id="clf-structure_6-2_{context}"] -== Structure of the ClusterLogForwarder - -The CLF has a `spec` section that contains the following key components: - -Inputs:: Select log messages to be forwarded. Built-in input types `application`, `infrastructure` and `audit` forward logs from different parts of the cluster. You can also define custom inputs. - -Outputs:: Define destinations to forward logs to. Each output has a unique name and type-specific configuration. - -Pipelines:: Define the path logs take from inputs, through filters, to outputs. Pipelines have a unique name and consist of a list of input, output and filter names. - -Filters:: Transform or drop log messages in the pipeline. Users can define filters that match certain log fields and drop or modify the messages. Filters are applied in the order specified in the pipeline. - -[id="clf-inputs_6-2_{context}"] -=== Inputs - -Inputs are configured in an array under `spec.inputs`. There are three built-in input types: - -application:: Selects logs from all application containers, excluding those in infrastructure namespaces. - -infrastructure:: Selects logs from nodes and from infrastructure components running in the following namespaces: -** `default` -** `kube` -** `openshift` -** Containing the `kube-` or `openshift-` prefix - -audit:: Selects logs from the OpenShift API server audit logs, Kubernetes API server audit logs, ovn audit logs, and node audit logs from auditd. - -Users can define custom inputs of type `application` that select logs from specific namespaces or using pod labels. - -[id="clf-outputs_6-2_{context}"] -=== Outputs - -Outputs are configured in an array under `spec.outputs`. Each output must have a unique name and a type. Supported types are: - -azureMonitor:: Forwards logs to Azure Monitor. -cloudwatch:: Forwards logs to AWS CloudWatch. -elasticsearch:: Forwards logs to an external Elasticsearch instance. -googleCloudLogging:: Forwards logs to {gcp-full} Logging. -http:: Forwards logs to a generic HTTP endpoint. -kafka:: Forwards logs to a Kafka broker. -loki:: Forwards logs to a Loki logging backend. -lokistack:: Forwards logs to the logging supported combination of Loki and web proxy with {Product-Title} authentication integration. LokiStack's proxy uses {Product-Title} authentication to enforce multi-tenancy -otlp:: Forwards logs using the OpenTelemetry Protocol. -splunk:: Forwards logs to Splunk. -syslog:: Forwards logs to an external syslog server. - -Each output type has its own configuration fields. - -include::modules/log6x-configuring-otlp-output.adoc[leveloffset=+1] - -[id="clf-pipelines_6-2_{context}"] -=== Pipelines - -Pipelines are configured in an array under `spec.pipelines`. Each pipeline must have a unique name and consists of: - -inputRefs:: Names of inputs whose logs should be forwarded to this pipeline. -outputRefs:: Names of outputs to send logs to. -filterRefs:: (optional) Names of filters to apply. - -The order of filterRefs matters, as they are applied sequentially. Earlier filters can drop messages that will not be processed by later filters. - -[id="clf-filters_6-2_{context}"] -=== Filters - -Filters are configured in an array under `spec.filters`. They can match incoming log messages based on the value of structured fields and modify or drop them. - -Administrators can configure the following types of filters: - -include::modules/log6x-multiline-except.adoc[leveloffset=+1] -include::modules/log6x-logging-http-forward-6-2.adoc[leveloffset=+1] -include::modules/log6x-cluster-logging-collector-log-forward-syslog.adoc[leveloffset=+1] -include::modules/log6x-content-filter-drop-records.adoc[leveloffset=+1] -include::modules/log6x-audit-log-filtering.adoc[leveloffset=+1] -include::modules/log6x-input-spec-filter-labels-expressions.adoc[leveloffset=+1] -include::modules/log6x-content-filter-prune-records.adoc[leveloffset=+1] -include::modules/log6x-input-spec-filter-audit-infrastructure.adoc[leveloffset=+1] -include::modules/log6x-input-spec-filter-namespace-container.adoc[leveloffset=+1] - diff --git a/observability/logging/logging-6.2/log6x-configuring-lokistack-otlp-6.2.adoc b/observability/logging/logging-6.2/log6x-configuring-lokistack-otlp-6.2.adoc deleted file mode 100644 index 0e34dc4f3f7d..000000000000 --- a/observability/logging/logging-6.2/log6x-configuring-lokistack-otlp-6.2.adoc +++ /dev/null @@ -1,142 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="log6x-configuring-lokistack-otlp-6-2"] -= OTLP data ingestion in Loki -include::_attributes/common-attributes.adoc[] -:context: log6x-configuring-lokistack-otlp-6-2 - -toc::[] - -You can use an API endpoint by using the OpenTelemetry Protocol (OTLP) with Logging. As OTLP is a standardized format not specifically designed for Loki, OTLP requires an additional Loki configuration to map data format of OpenTelemetry to data model of Loki. OTLP lacks concepts such as _stream labels_ or _structured metadata_. Instead, OTLP provides metadata about log entries as *attributes*, grouped into the following three categories: - -* Resource -* Scope -* Log - -You can set metadata for multiple entries simultaneously or individually as needed. - -include::modules/log6x-configuring-lokistack-otlp-data-ingestion.adoc[leveloffset=+1] - -[id="attribute-mapping_{context}"] -== Attribute mapping - -When you set the {loki-op} to the `openshift-logging` mode, {loki-op} automatically applies a default set of attribute mappings. These mappings align specific OTLP attributes with stream labels and structured metadata of Loki. - -For typical setups, these default mappings are sufficient. However, you might need to customize attribute mapping in the following cases: - -* Using a custom collector: If your setup includes a custom collector that generates additional attributes that you do not want to store, consider customizing the mapping to ensure these attributes are dropped by Loki. -* Adjusting attribute detail levels: If the default attribute set is more detailed than necessary, you can reduce it to essential attributes only. This can avoid excessive data storage and streamline the {logging} process. - -[id="custom-attribute-mapping-for-openshift_{context}"] -=== Custom attribute mapping for OpenShift - -When using the {loki-op} in `openshift-logging` mode, attribute mapping follow OpenShift default values, but you can configure custom mappings to adjust default values. -In the `openshift-logging` mode, you can configure custom attribute mappings globally for all tenants or for individual tenants as needed. When you define custom mappings, they are appended to the OpenShift default values. If you do not need default labels, you can disable them in the tenant configuration. - -[NOTE] -==== -A major difference between the {loki-op} and Loki lies in inheritance handling. Loki copies only `default_resource_attributes_as_index_labels` to tenants by default, while the {loki-op} applies the entire global configuration to each tenant in the `openshift-logging` mode. -==== - -Within `LokiStack`, attribute mapping configuration is managed through the `limits` setting. See the following example `LokiStack` configuration: - -[source,yaml] ----- -# ... -spec: - limits: - global: - otlp: {} # <1> - tenants: - application: # <2> - otlp: {} ----- -<1> Defines global OTLP attribute configuration. -<2> Defines the OTLP attribute configuration for the `application` tenant within the `openshift-logging` mode. You can also configure `infrastructure` and `audit` tenants in addition to `application` tenants. - -[NOTE] -==== -You can use both global and per-tenant OTLP configurations for mapping attributes to stream labels. -==== - -Stream labels derive only from resource-level attributes, which the `LokiStack` resource structure reflects. See the following `LokiStack` example configuration: - -[source,yaml] ----- -spec: - limits: - global: - otlp: - streamLabels: - resourceAttributes: - - name: "k8s.namespace.name" - - name: "k8s.pod.name" - - name: "k8s.container.name" ----- - -You can drop attributes of type resource, scope, or log from the log entry. - -[source,yaml] ----- -# ... -spec: - limits: - global: - otlp: - streamLabels: -# ... - drop: - resourceAttributes: - - name: "process.command_line" - - name: "k8s\\.pod\\.labels\\..+" - regex: true - scopeAttributes: - - name: "service.name" - logAttributes: - - name: "http.route" ----- - -You can use regular expressions by setting `regex: true` to apply a configuration for attributes with similar names. - -[IMPORTANT] -==== -Avoid using regular expressions for stream labels, as this can increase data volume. -==== - -Attributes that are not explicitly set as stream labels or dropped from the entry are saved as structured metadata by default. - -[id="customizing-openshift-defaults_{context}"] -=== Customizing OpenShift defaults - -In the `openshift-logging` mode, certain attributes are required and cannot be removed from the configuration due to their role in OpenShift functions. Other attributes, labeled *recommended*, might be dropped if performance is impacted. For information about the attributes, see link:https://docs.openshift.com/container-platform/4.17/observability/logging/logging-6.1/log6x-opentelemetry-data-model-6.1.html#attributes_log6x-opentelemetry-data-model-6-1[OpenTelemetry data model attributes]. - -When using the `openshift-logging` mode without custom attributes, you can achieve immediate compatibility with OpenShift tools. If additional attributes are needed as stream labels or some attributes need to be droped, use custom configuration. Custom configurations can merge with default configurations. - -[id="removing-recommended-attributes_{context}"] -=== Removing recommended attributes - -To reduce default attributes in the `openshift-logging` mode, disable recommended attributes: - -[source,yaml] ----- -# ... -spec: - tenants: - mode: openshift-logging - openshift: - otlp: - disableRecommendedAttributes: true # <1> ----- -<1> Set `disableRecommendedAttributes: true` to remove recommended attributes, which limits default attributes to the required attributes or stream labels. -+ -[NOTE] -==== -This setting might negatively impact query performance, as it removes default stream labels. You must pair this option with a custom attribute configuration to retain attributes essential for queries. -==== - -[role="_additional-resources"] -[id="additional-resources_{context}"] -== Additional resources -* link:https://grafana.com/docs/loki/latest/get-started/labels/[Loki labels] (Grafana documentation) -* link:https://grafana.com/docs/loki/latest/get-started/labels/structured-metadata/[Structured metadata] (Grafana documentation) -* link:https://docs.openshift.com/container-platform/4.17/observability/logging/logging-6.1/log6x-opentelemetry-data-model-6.1.html[OpenTelemetry data model] -* link:https://opentelemetry.io/docs/specs/otel/common/#attribute[OpenTelemetry attribute] (OpenTelemetry documentation) \ No newline at end of file diff --git a/observability/logging/logging-6.2/log6x-loki-6.2.adoc b/observability/logging/logging-6.2/log6x-loki-6.2.adoc deleted file mode 100644 index f5b4768b4043..000000000000 --- a/observability/logging/logging-6.2/log6x-loki-6.2.adoc +++ /dev/null @@ -1,49 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[leveloffset=+1] -[id="log6x-loki-6-2"] -= Storing logs with LokiStack -:context: log6x-loki-6.2 - -toc::[] - -You can configure a `LokiStack` custom resource (CR) to store application, audit, and infrastructure-related logs. - -include::snippets/log6x-loki-statement-snip.adoc[leveloffset=+1] - -include::modules/log6x-loki-sizing.adoc[leveloffset=+1] - -[id="prerequisites-6-2_{context}"] -== Prerequisites - -* You have installed the {loki-op} by using the command-line interface (CLI) or web console. -* You have created a `serviceAccount` CR in the same namespace as the `ClusterLogForwarder` CR. -* You have assigned the `collect-audit-logs`, `collect-application-logs`, and `collect-infrastructure-logs` cluster roles to the `serviceAccount` CR. - -[id="setup-6-2_{context}"] -== Core set up and configuration - -Use role-based access controls, basic monitoring, and pod placement to deploy Loki. - -include::modules/log6x-loki-rbac-rules-perms.adoc[leveloffset=+1] -include::modules/log6x-enabling-loki-alerts.adoc[leveloffset=+1] -include::modules/log6x-loki-memberlist-ip.adoc[leveloffset=+1] -include::modules/log6x-loki-retention.adoc[leveloffset=+1] -include::modules/log6x-loki-pod-placement.adoc[leveloffset=+1] - -[id="performance-6-2_{context}"] -== Enhanced reliability and performance - -Use the following configurations to ensure reliability and efficiency of Loki in production. - -include::modules/log6x-identity-federation.adoc[leveloffset=+1] -include::modules/log6x-loki-reliability-hardening.adoc[leveloffset=+1] -include::modules/log6x-loki-restart-hardening.adoc[leveloffset=+1] - -[id="advanced-6-2_{context}"] -== Advanced deployment and scalability - -To configure high availability, scalability, and error handling, use the following information. - -include::modules/log6x-loki-zone-aware-rep.adoc[leveloffset=+1] -include::modules/log6x-loki-zone-fail-recovery.adoc[leveloffset=+1] -include::modules/log6x-loki-rate-limit-errors.adoc[leveloffset=+1] diff --git a/observability/logging/logging-6.2/log6x-release-notes-6.2.adoc b/observability/logging/logging-6.2/log6x-release-notes-6.2.adoc deleted file mode 100644 index a4fbe4cb68ba..000000000000 --- a/observability/logging/logging-6.2/log6x-release-notes-6.2.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="log6x-release-notes-6-2"] -= Logging 6.2 -:context: log6x-release-notes-6-2 - -toc::[] - -include::modules/log6x-6-2-0-rn.adoc[leveloffset=+1] diff --git a/observability/logging/logging-6.2/log6x-visual-6.2.adoc b/observability/logging/logging-6.2/log6x-visual-6.2.adoc deleted file mode 100644 index 5b0867032f32..000000000000 --- a/observability/logging/logging-6.2/log6x-visual-6.2.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="log6x-visual-6-2"] -= Visualization for logging -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: logging-6x-6.2 - -toc::[] - -ifndef::openshift-rosa,openshift-rosa-hcp[] -Visualization for logging is provided by deploying the link:https://docs.redhat.com/en/documentation/red_hat_openshift_cluster_observability_operator/1-latest/html/ui_plugins_for_red_hat_openshift_cluster_observability_operator/logging-ui-plugin#coo-logging-ui-plugin-install_logging-ui-plugin[Logging UI Plugin] of the link:https://docs.redhat.com/en/documentation/red_hat_openshift_cluster_observability_operator/1-latest/html/about_red_hat_openshift_cluster_observability_operator/cluster-observability-operator-overview-1[Cluster Observability Operator], which requires Operator installation. -endif::openshift-rosa,openshift-rosa-hcp[] -ifdef::openshift-rosa,openshift-rosa-hcp[] -Visualization for logging is provided by deploying the Logging UI Plugin of the Cluster Observability Operator, which requires Operator installation. -endif::openshift-rosa,openshift-rosa-hcp[] - -include::snippets/logging-support-exception-for-cluster-observability-operator-due-to-logging-ui-plugin.adoc[] diff --git a/observability/logging/logging-6.2/modules b/observability/logging/logging-6.2/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/observability/logging/logging-6.2/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/observability/logging/logging-6.2/snippets b/observability/logging/logging-6.2/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/observability/logging/logging-6.2/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/observability/logging/logging-common-terms.adoc b/observability/logging/logging-common-terms.adoc deleted file mode 100644 index 4be23e219d67..000000000000 --- a/observability/logging/logging-common-terms.adoc +++ /dev/null @@ -1,88 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="openshift-logging-common-terms"] -= Glossary -:context: openshift-logging-common-terms - -toc::[] - -This glossary defines common terms that are used in the {logging} documentation. - -Annotation:: -You can use annotations to attach metadata to objects. - -{clo}:: -The {clo} provides a set of APIs to control the collection and forwarding of application, infrastructure, and audit logs. - -Custom resource (CR):: -A CR is an extension of the Kubernetes API. To configure the {logging} and log forwarding, you can customize the `ClusterLogging` and the `ClusterLogForwarder` custom resources. - -Event router:: -The event router is a pod that watches {product-title} events. It collects logs by using the {logging}. - -Fluentd:: -Fluentd is a log collector that resides on each {product-title} node. It gathers application, infrastructure, and audit logs and forwards them to different outputs. - -Garbage collection:: -Garbage collection is the process of cleaning up cluster resources, such as terminated containers and images that are not referenced by any running pods. - -Elasticsearch:: -Elasticsearch is a distributed search and analytics engine. {product-title} uses Elasticsearch as a default log store for the {logging}. - -{es-op}:: -The {es-op} is used to run an Elasticsearch cluster on {product-title}. The {es-op} provides self-service for the Elasticsearch cluster operations and is used by the {logging}. - -Indexing:: -Indexing is a data structure technique that is used to quickly locate and access data. Indexing optimizes the performance by minimizing the amount of disk access required when a query is processed. - -JSON logging:: -The Log Forwarding API enables you to parse JSON logs into a structured object and forward them to either the {Logging} managed Elasticsearch or any other third-party system supported by the Log Forwarding API. - -Kibana:: -Kibana is a browser-based console interface to query, discover, and visualize your Elasticsearch data through histograms, line graphs, and pie charts. - -Kubernetes API server:: -Kubernetes API server validates and configures data for the API objects. - -Labels:: -Labels are key-value pairs that you can use to organize and select subsets of objects, such as a pod. - -Logging:: -With the {logging}, you can aggregate application, infrastructure, and audit logs throughout your cluster. You can also store them to a default log store, forward them to third party systems, and query and visualize the stored logs in the default log store. - -Logging collector:: -A logging collector collects logs from the cluster, formats them, and forwards them to the log store or third party systems. - -Log store:: -A log store is used to store aggregated logs. You can use an internal log store or forward logs to external log stores. - -Log visualizer:: -Log visualizer is the user interface (UI) component you can use to view information such as logs, graphs, charts, and other metrics. - -Node:: -A node is a worker machine in the {product-title} cluster. A node is either a virtual machine (VM) or a physical machine. - -Operators:: -Operators are the preferred method of packaging, deploying, and managing a Kubernetes application in an {product-title} cluster. An Operator takes human operational knowledge and encodes it into software that is packaged and shared with customers. - -Pod:: -A pod is the smallest logical unit in Kubernetes. A pod consists of one or more containers and runs on a worker node. - -Role-based access control (RBAC):: -RBAC is a key security control to ensure that cluster users and workloads have access only to resources required to execute their roles. - -Shards:: -Elasticsearch organizes log data from Fluentd into datastores, or indices, then subdivides each index into multiple pieces called shards. - -Taint:: -Taints ensure that pods are scheduled onto appropriate nodes. You can apply one or more taints on a node. - -Toleration:: -You can apply tolerations to pods. Tolerations allow the scheduler to schedule pods with matching taints. - -Web console:: -A user interface (UI) to manage {product-title}. -ifdef::openshift-rosa,openshift-dedicated[] -The web console for {product-title} can be found at link:https://console.redhat.com/openshift[https://console.redhat.com/openshift]. -endif::[] diff --git a/observability/logging/logging_alerts/_attributes b/observability/logging/logging_alerts/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/observability/logging/logging_alerts/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/observability/logging/logging_alerts/custom-logging-alerts.adoc b/observability/logging/logging_alerts/custom-logging-alerts.adoc deleted file mode 100644 index 5d0e59d6cc49..000000000000 --- a/observability/logging/logging_alerts/custom-logging-alerts.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="custom-logging-alerts"] -include::_attributes/common-attributes.adoc[] -= Custom logging alerts -:context: custom-logging-alerts - -toc::[] - -In logging 5.7 and later versions, users can configure the LokiStack deployment to produce customized alerts and recorded metrics. If you want to use customized link:https://grafana.com/docs/loki/latest/alert/[alerting and recording rules], you must enable the LokiStack ruler component. - -LokiStack log-based alerts and recorded metrics are triggered by providing link:https://grafana.com/docs/loki/latest/query/[LogQL] expressions to the ruler component. The {loki-op} manages a ruler that is optimized for the selected LokiStack size, which can be `1x.extra-small`, `1x.small`, or `1x.medium`. - -To provide these expressions, you must create an `AlertingRule` custom resource (CR) containing Prometheus-compatible link:https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/[alerting rules], or a `RecordingRule` CR containing Prometheus-compatible link:https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/[recording rules]. - -Administrators can configure log-based alerts or recorded metrics for `application`, `audit`, or `infrastructure` tenants. Users without administrator permissions can configure log-based alerts or recorded metrics for `application` tenants of the applications that they have access to. - -Application, audit, and infrastructure alerts are sent by default to the {product-title} monitoring stack Alertmanager in the `openshift-monitoring` namespace, unless you have disabled the local Alertmanager instance. If the Alertmanager that is used to monitor user-defined projects in the `openshift-user-workload-monitoring` namespace is enabled, application alerts are sent to the Alertmanager in this namespace by default. - -include::modules/configuring-logging-loki-ruler.adoc[leveloffset=+1] -include::modules/loki-rbac-rules-permissions.adoc[leveloffset=+1] - -ifdef::openshift-enterprise[] -[role="_additional-resources"] -.Additional resources -* xref:../../../authentication/using-rbac.adoc#using-rbac[Using RBAC to define and apply permissions] -endif::openshift-enterprise[] - -include::modules/logging-enabling-loki-alerts.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_custom-logging-alerts"] -== Additional resources -* xref:../../../observability/monitoring/about-ocp-monitoring/about-ocp-monitoring.adoc#about-ocp-monitoring[About {product-title} monitoring] -ifdef::openshift-enterprise[] -* xref:../../../post_installation_configuration/configuring-alert-notifications.adoc#configuring-alert-notifications[Configuring alert notifications] -endif::openshift-enterprise[] -// maybe need an update to https://docs.openshift.com/container-platform/4.13/observability/monitoring/monitoring-overview.html#default-monitoring-targets_monitoring-overview to talk about Loki and Vector now? Are these part of default monitoring? diff --git a/observability/logging/logging_alerts/default-logging-alerts.adoc b/observability/logging/logging_alerts/default-logging-alerts.adoc deleted file mode 100644 index 14c9b07c6293..000000000000 --- a/observability/logging/logging_alerts/default-logging-alerts.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="default-logging-alerts"] -include::_attributes/common-attributes.adoc[] -= Default logging alerts -:context: default-logging-alerts - -toc::[] - -Logging alerts are installed as part of the {clo} installation. Alerts depend on metrics exported by the log collection and log storage backends. These metrics are enabled if you selected the option to *Enable Operator recommended cluster monitoring on this namespace* when installing the {clo}. - -Default logging alerts are sent to the {product-title} monitoring stack Alertmanager in the `openshift-monitoring` namespace, unless you have disabled the local Alertmanager instance. - -include::modules/monitoring-accessing-the-alerting-ui.adoc[leveloffset=+1] -include::modules/logging-collector-alerts.adoc[leveloffset=+1] -include::modules/logging-vector-collector-alerts.adoc[leveloffset=+1] -include::modules/logging-fluentd-collector-alerts.adoc[leveloffset=+1] -include::modules/cluster-logging-elasticsearch-rules.adoc[leveloffset=+1] - -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -[role="_additional-resources"] -[id="additional-resources_default-logging-alerts"] -== Additional resources -* xref:../../../observability/monitoring/managing-alerts/managing-alerts-as-an-administrator.adoc#modifying-core-platform-alerting-rules_managing-alerts-as-an-administrator[Modifying core platform alerting rules] -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] diff --git a/observability/logging/logging_alerts/images b/observability/logging/logging_alerts/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/observability/logging/logging_alerts/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/observability/logging/logging_alerts/modules b/observability/logging/logging_alerts/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/observability/logging/logging_alerts/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/observability/logging/logging_alerts/snippets b/observability/logging/logging_alerts/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/observability/logging/logging_alerts/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/observability/logging/logging_release_notes/_attributes b/observability/logging/logging_release_notes/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/observability/logging/logging_release_notes/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/observability/logging/logging_release_notes/cluster-logging-collector.adoc b/observability/logging/logging_release_notes/cluster-logging-collector.adoc deleted file mode 100644 index 59e5de608f7f..000000000000 --- a/observability/logging/logging_release_notes/cluster-logging-collector.adoc +++ /dev/null @@ -1,38 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-collector -[id="cluster-logging-collector"] -= Configuring the logging collector -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -toc::[] - -{logging-title-uc} collects operations and application logs from your cluster and enriches the data with Kubernetes pod and project metadata. -All supported modifications to the log collector can be performed though the `spec.collection` stanza in the `ClusterLogging` custom resource (CR). - -include::modules/configuring-logging-collector.adoc[leveloffset=+1] - -include::modules/creating-logfilesmetricexporter.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-limits.adoc[leveloffset=+1] - -[id="cluster-logging-collector-input-receivers_{context}"] -== Configuring input receivers - -The {clo} deploys a service for each configured input receiver so that clients can write to the collector. This service exposes the port specified for the input receiver. -The service name is generated as follows: - -* For multi log forwarder `ClusterLogForwarder` CR deployments, the service name is in the `-` format, for example, `example-http-receiver`. -* For legacy `ClusterLogForwarder` CR deployments named `instance` and that are located in the `openshift-logging` namespace, the service name is in the `collector-` format, for example, `collector-http-receiver`. - -include::modules/log-collector-http-server.adoc[leveloffset=+2] -//include::modules/log-collector-rsyslog-server.adoc[leveloffset=+2] -// uncomment for 5.9 release - -//// -[role="_additional-resources"] -.Additional resources -* xref:../../../observability/logging/log_collection_forwarding/configuring-log-forwarding.adoc#logging-audit-filtering_configuring-log-forwarding[Overview of API audit filter] -//// - -include::modules/cluster-logging-collector-tuning.adoc[leveloffset=+1] diff --git a/observability/logging/logging_release_notes/cluster-logging-support.adoc b/observability/logging/logging_release_notes/cluster-logging-support.adoc deleted file mode 100644 index 5b3bfe23962b..000000000000 --- a/observability/logging/logging_release_notes/cluster-logging-support.adoc +++ /dev/null @@ -1,73 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="cluster-logging-support"] -include::_attributes/common-attributes.adoc[] -= Support -:context: cluster-logging-support - -toc::[] - -include::snippets/logging-supported-config-snip.adoc[] -include::snippets/logging-compatibility-snip.adoc[] -include::snippets/log6x-loki-statement-snip.adoc[] - -{logging-uc} {for} is an opinionated collector and normalizer of application, infrastructure, and audit logs. You can use it to forward logs to various supported systems. - -{logging-uc} is not: - -* A high scale log collection system -* Security Information and Event Monitoring (SIEM) compliant -* A "bring your own" (BYO) log collector configuration -* Historical or long term log retention or storage -* A guaranteed log sink -* Secure storage - audit logs are not stored by default - -[id="cluster-logging-support-CRDs_{context}"] -== Supported API custom resource definitions - -The following table describes the supported {logging-uc} APIs. - -.Loki API support states -[cols="3",options="header"] -|=== -|CustomResourceDefinition (CRD) -|ApiVersion -|Support state - -|LokiStack -|lokistack.loki.grafana.com/v1 -|Supported from 5.5 - -|RulerConfig -|rulerconfig.loki.grafana/v1 -|Supported from 5.7 - -|AlertingRule -|alertingrule.loki.grafana/v1 -|Supported from 5.7 - -|RecordingRule -|recordingrule.loki.grafana/v1 -|Supported from 5.7 - -|LogFileMetricExporter -|LogFileMetricExporter.logging.openshift.io/v1alpha1 -|Supported from 5.8 - -|ClusterLogForwarder -|clusterlogforwarder.logging.openshift.io/v1 -|Supported from 4.5. -|=== - -include::modules/cluster-logging-maintenance-support-list.adoc[leveloffset=+1] -include::modules/unmanaged-operators.adoc[leveloffset=+1] - -[id="cluster-logging-support-must-gather_{context}"] -== Collecting logging data for Red Hat Support - -When opening a support case, it is helpful to provide debugging information about your cluster to Red{nbsp}Hat Support. - -You can use the xref:../../../support/gathering-cluster-data.adoc#gathering-cluster-data[must-gather tool] to collect diagnostic information for project-level resources, cluster-level resources, and each of the {logging} components. -For prompt support, supply diagnostic information for both {product-title} and {logging}. - -include::modules/cluster-logging-must-gather-about.adoc[leveloffset=+2] -include::modules/cluster-logging-must-gather-collecting.adoc[leveloffset=+2] diff --git a/observability/logging/logging_release_notes/images b/observability/logging/logging_release_notes/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/observability/logging/logging_release_notes/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/observability/logging/logging_release_notes/logging-5-7-release-notes.adoc b/observability/logging/logging_release_notes/logging-5-7-release-notes.adoc deleted file mode 100644 index f5834035ec2e..000000000000 --- a/observability/logging/logging_release_notes/logging-5-7-release-notes.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="logging-5-7-release-notes"] -include::_attributes/common-attributes.adoc[] -= Logging 5.7 -:context: logging-5-7-release-notes - -toc::[] - -include::snippets/logging-compatibility-snip.adoc[] - -include::snippets/logging-stable-updates-snip.adoc[] - -include::modules/logging-release-notes-5-7-8.adoc[leveloffset=+1] - -include::modules/logging-rn-5.7.7.adoc[leveloffset=+1] - -include::modules/logging-rn-5.7.6.adoc[leveloffset=+1] - -// No release notes for 5.7.5 since this was a CVE only releases. In the future, add a link to the CVE. - -include::modules/logging-rn-5.7.4.adoc[leveloffset=+1] - -include::modules/logging-rn-5.7.3.adoc[leveloffset=+1] - -include::modules/logging-rn-5.7.2.adoc[leveloffset=+1] - -include::modules/logging-rn-5.7.1.adoc[leveloffset=+1] - -include::modules/logging-rn-5.7.0.adoc[leveloffset=+1] diff --git a/observability/logging/logging_release_notes/logging-5-8-release-notes.adoc b/observability/logging/logging_release_notes/logging-5-8-release-notes.adoc deleted file mode 100644 index 57e16c5aa076..000000000000 --- a/observability/logging/logging_release_notes/logging-5-8-release-notes.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="logging-5-8-release-notes"] -include::_attributes/common-attributes.adoc[] -= Logging 5.8 -:context: logging-5-8-release-notes - -toc::[] - -include::snippets/logging-compatibility-snip.adoc[] - -include::snippets/logging-stable-updates-snip.adoc[] - -include::modules/logging-release-notes-5-8-4.adoc[leveloffset=+1] - -include::modules/logging-release-notes-5-8-3.adoc[leveloffset=+1] - -include::modules/logging-release-notes-5-8-2.adoc[leveloffset=+1] - -include::modules/logging-release-notes-5-8-1.adoc[leveloffset=+1] - -include::modules/logging-release-notes-5-8-0.adoc[leveloffset=+1] diff --git a/observability/logging/logging_release_notes/logging-5-9-release-notes.adoc b/observability/logging/logging_release_notes/logging-5-9-release-notes.adoc deleted file mode 100644 index 268de051fb3b..000000000000 --- a/observability/logging/logging_release_notes/logging-5-9-release-notes.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="logging-5-9-release-notes"] -include::_attributes/common-attributes.adoc[] -= Logging 5.9 -:context: logging-5-9-release-notes - -toc::[] - -include::snippets/logging-compatibility-snip.adoc[] - -include::snippets/logging-stable-updates-snip.adoc[] - -include::modules/logging-release-notes-5-9-3.adoc[leveloffset=+1] - -include::modules/logging-release-notes-5-9-2.adoc[leveloffset=+1] - -include::modules/logging-release-notes-5-9-1.adoc[leveloffset=+1] - -include::modules/logging-release-notes-5-9-0.adoc[leveloffset=+1] diff --git a/observability/logging/logging_release_notes/modules b/observability/logging/logging_release_notes/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/observability/logging/logging_release_notes/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/observability/logging/logging_release_notes/snippets b/observability/logging/logging_release_notes/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/observability/logging/logging_release_notes/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/observability/logging/performance_reliability/_attributes b/observability/logging/performance_reliability/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/observability/logging/performance_reliability/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/observability/logging/performance_reliability/images b/observability/logging/performance_reliability/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/observability/logging/performance_reliability/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/observability/logging/performance_reliability/logging-content-filtering.adoc b/observability/logging/performance_reliability/logging-content-filtering.adoc deleted file mode 100644 index 8a9bcdf57893..000000000000 --- a/observability/logging/performance_reliability/logging-content-filtering.adoc +++ /dev/null @@ -1,30 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="logging-content-filtering"] -= Filtering logs by content -:context: logging-content-filtering - -toc::[] - -Collecting all logs from a cluster might produce a large amount of data, which can be expensive to transport and store. - -You can reduce the volume of your log data by filtering out low priority data that does not need to be stored. {logging-uc} provides content filters that you can use to reduce the volume of log data. - -[NOTE] -==== -Content filters are distinct from `input` selectors. `input` selectors select or ignore entire log streams based on source metadata. Content filters edit log streams to remove and modify records based on the record content. -==== - -Log data volume can be reduced by using one of the following methods: - -* xref:../../../observability/logging/performance_reliability/logging-content-filtering.adoc#logging-content-filter-drop-records_logging-content-filtering[Configuring content filters to drop unwanted log records] -* xref:../../../observability/logging/performance_reliability/logging-content-filtering.adoc#logging-content-filter-prune-records_logging-content-filtering[Configuring content filters to prune log records] - -include::modules/logging-content-filter-drop-records.adoc[leveloffset=+1] -include::modules/logging-content-filter-prune-records.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_logging-content-filtering"] -== Additional resources -* xref:../../../observability/logging/log_collection_forwarding/configuring-log-forwarding.adoc#cluster-logging-collector-log-forwarding-about_configuring-log-forwarding[About forwarding logs to third-party systems] diff --git a/observability/logging/performance_reliability/logging-flow-control-mechanisms.adoc b/observability/logging/performance_reliability/logging-flow-control-mechanisms.adoc deleted file mode 100644 index 15529c06a209..000000000000 --- a/observability/logging/performance_reliability/logging-flow-control-mechanisms.adoc +++ /dev/null @@ -1,44 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="logging-flow-control-mechanisms"] -= Flow control mechanisms -:context: logging-flow-control-mechanisms - -toc::[] - -If logs are produced faster than they can be collected, it can be difficult to predict or control the volume of logs being sent to an output. -Not being able to predict or control the volume of logs being sent to an output can result in logs being lost. If there is a system outage and log buffers are accumulated without user control, this can also cause long recovery times and high latency when the connection is restored. - -As an administrator, you can limit logging rates by configuring flow control mechanisms for your {logging}. - -[id="logging-configuring-flow-control-benefits"] -== Benefits of flow control mechanisms - -* The cost and volume of logging can be predicted more accurately in advance. -* Noisy containers cannot produce unbounded log traffic that drowns out other containers. -* Ignoring low-value logs reduces the load on the logging infrastructure. -* High-value logs can be preferred over low-value logs by assigning higher rate limits. - -[id="logging-configuring-flow-control-about-rate-limits"] -== Configuring rate limits - -Rate limits are configured per collector, which means that the maximum rate of log collection is the number of collector instances multiplied by the rate limit. - -Because logs are collected from each node's file system, a collector is deployed on each cluster node. For example, in a 3-node cluster, with a maximum rate limit of 10 records per second per collector, the maximum rate of log collection is 30 records per second. - -Because the exact byte size of a record as written to an output can vary due to transformations, different encodings, or other factors, rate limits are set in number of records instead of bytes. - -You can configure rate limits in the `ClusterLogForwarder` custom resource (CR) in two ways: - -Output rate limit:: Limit the rate of outbound logs to selected outputs, for example, to match the network or storage capacity of an output. The output rate limit controls the aggregated per-output rate. - -Input rate limit:: Limit the per-container rate of log collection for selected containers. - -include::modules/logging-set-output-rate-limit.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../../observability/logging/log_collection_forwarding/logging-output-types.adoc#logging-output-types[Log output types] - -include::modules/logging-set-input-rate-limit.adoc[leveloffset=+1] diff --git a/observability/logging/performance_reliability/logging-input-spec-filtering.adoc b/observability/logging/performance_reliability/logging-input-spec-filtering.adoc deleted file mode 100644 index 519ce40a131a..000000000000 --- a/observability/logging/performance_reliability/logging-input-spec-filtering.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="logging-input-spec-filtering"] -= Filtering logs by metadata -:context: logging-input-spec-filtering - -toc::[] - - -You can filter logs in the `ClusterLogForwarder` CR to select or ignore an entire log stream based on the metadata by using the `input` selector. As an administrator or developer, you can include or exclude the log collection to reduce the memory and CPU load on the collector. - -[IMPORTANT] -==== -You can use this feature only if the Vector collector is set up in your logging deployment. -==== - -[NOTE] -==== -`input` spec filtering is different from content filtering. `input` selectors select or ignore entire log streams based on the source metadata. Content filters edit the log streams to remove and modify the records based on the record content. -==== - -include::modules/logging-input-spec-filter-namespace-container.adoc[leveloffset=+1] -include::modules/logging-input-spec-filter-labels-expressions.adoc[leveloffset=+1] -include::modules/logging-input-spec-filter-audit-infrastructure.adoc[leveloffset=+1] \ No newline at end of file diff --git a/observability/logging/performance_reliability/modules b/observability/logging/performance_reliability/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/observability/logging/performance_reliability/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/observability/logging/performance_reliability/snippets b/observability/logging/performance_reliability/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/observability/logging/performance_reliability/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/observability/logging/scheduling_resources/_attributes b/observability/logging/scheduling_resources/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/observability/logging/scheduling_resources/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/observability/logging/scheduling_resources/images b/observability/logging/scheduling_resources/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/observability/logging/scheduling_resources/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/observability/logging/scheduling_resources/logging-node-selectors.adoc b/observability/logging/scheduling_resources/logging-node-selectors.adoc deleted file mode 100644 index 6b6a0702eb28..000000000000 --- a/observability/logging/scheduling_resources/logging-node-selectors.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="logging-node-selectors"] -= Using node selectors to move logging resources -:context: logging-node-selectors - -toc::[] - -include::snippets/about-node-selectors.adoc[] - -include::modules/nodes-scheduler-node-selectors-about.adoc[leveloffset=+1] - -include::modules/logging-loki-pod-placement.adoc[leveloffset=+1] - -include::modules/log-collector-resources-scheduling.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-pod-location.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_logging-node-selection"] -== Additional resources -* xref:../../../nodes/scheduling/nodes-scheduler-node-selectors.adoc#nodes-scheduler-node-selectors[Placing pods on specific nodes using node selectors] diff --git a/observability/logging/scheduling_resources/logging-taints-tolerations.adoc b/observability/logging/scheduling_resources/logging-taints-tolerations.adoc deleted file mode 100644 index a15d0d2033ab..000000000000 --- a/observability/logging/scheduling_resources/logging-taints-tolerations.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="logging-taints-tolerations"] -= Using taints and tolerations to control logging pod placement -:context: logging-taints-tolerations - -toc::[] - -Taints and tolerations allow the node to control which pods should (or should not) be scheduled on them. - -include::modules/nodes-scheduler-taints-tolerations-about.adoc[leveloffset=+1] - -include::modules/logging-loki-pod-placement.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-tolerations.adoc[leveloffset=+1] - -include::modules/log-collector-resources-scheduling.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-pod-location.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_cluster-logging-tolerations"] -== Additional resources -ifdef::openshift-enterprise,openshift-origin[] -* xref:../../../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints] -endif::[] diff --git a/observability/logging/scheduling_resources/modules b/observability/logging/scheduling_resources/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/observability/logging/scheduling_resources/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/observability/logging/scheduling_resources/snippets b/observability/logging/scheduling_resources/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/observability/logging/scheduling_resources/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/observability/logging/sd-accessing-the-service-logs.adoc b/observability/logging/sd-accessing-the-service-logs.adoc deleted file mode 100644 index 37c28d013121..000000000000 --- a/observability/logging/sd-accessing-the-service-logs.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="sd-accessing-the-service-logs"] -= Accessing the service logs for {product-title} clusters -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: sd-accessing-the-service-logs - -toc::[] - -[role="_abstract"] -You can view the service logs for your {product-title} -ifdef::openshift-rosa[] - (ROSA) -endif::[] - clusters by using the {cluster-manager-first}. The service logs detail cluster events such as load balancer quota updates and scheduled maintenance upgrades. The logs also show cluster resource changes such as the addition or deletion of users, groups, and identity providers. - -// Commented out while the OpenShift Cluster Manager CLI is in Developer Preview: -//You can view the service logs for your {product-title} (ROSA) clusters by using {cluster-manager-first} or the {cluster-manager} CLI (`ocm`). The service logs detail cluster events such as load balancer quota updates and scheduled maintenance upgrades. The logs also show cluster resource changes such as the addition or deletion of users, groups, and identity providers. - -Additionally, you can add notification contacts for -ifdef::openshift-rosa[] - a ROSA -endif::[] -ifdef::openshift-dedicated[] - an {product-title} -endif::[] - cluster. Subscribed users receive emails about cluster events that require customer action, known cluster incidents, upgrade maintenance, and other topics. - -// Commented out while the OpenShift Cluster Manager CLI is in Developer Preview: -//include::modules/viewing-the-service-logs.adoc[leveloffset=+1] -//include::modules/viewing-the-service-logs-ocm.adoc[leveloffset=+2] -//include::modules/viewing-the-service-logs-cli.adoc[leveloffset=+2] -include::modules/viewing-the-service-logs-ocm.adoc[leveloffset=+1] -include::modules/adding-cluster-notification-contacts.adoc[leveloffset=+1] diff --git a/observability/logging/troubleshooting/_attributes b/observability/logging/troubleshooting/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/observability/logging/troubleshooting/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/observability/logging/troubleshooting/cluster-logging-cluster-status.adoc b/observability/logging/troubleshooting/cluster-logging-cluster-status.adoc deleted file mode 100644 index 9f469f1e6831..000000000000 --- a/observability/logging/troubleshooting/cluster-logging-cluster-status.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-cluster-status -[id="cluster-logging-cluster-status"] -= Viewing Logging status -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can view the status of the {clo} and other {logging} components. - -include::modules/cluster-logging-clo-status.adoc[leveloffset=+1] - -include::modules/cluster-logging-clo-status-comp.adoc[leveloffset=+1] diff --git a/observability/logging/troubleshooting/cluster-logging-log-store-status.adoc b/observability/logging/troubleshooting/cluster-logging-log-store-status.adoc deleted file mode 100644 index 44db6c6b4aa8..000000000000 --- a/observability/logging/troubleshooting/cluster-logging-log-store-status.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-elasticsearch -[id="cluster-logging-log-store-status"] -= Viewing the status of the Elasticsearch log store -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can view the status of the {es-op} and for a number of Elasticsearch components. - -include::modules/cluster-logging-log-store-status-viewing.adoc[leveloffset=+1] - -include::modules/cluster-logging-log-store-status-comp.adoc[leveloffset=+1] - -include::modules/ref_cluster-logging-elasticsearch-cluster-status.adoc[leveloffset=+1] diff --git a/observability/logging/troubleshooting/images b/observability/logging/troubleshooting/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/observability/logging/troubleshooting/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/observability/logging/troubleshooting/log-forwarding-troubleshooting.adoc b/observability/logging/troubleshooting/log-forwarding-troubleshooting.adoc deleted file mode 100644 index 2e345a01f197..000000000000 --- a/observability/logging/troubleshooting/log-forwarding-troubleshooting.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="log-forwarding-troubleshooting"] -= Troubleshooting log forwarding -:context: log-forwarding-troubleshooting - -toc::[] - -include::modules/redeploying-fluentd-pods.adoc[leveloffset=+1] -include::modules/loki-rate-limit-errors.adoc[leveloffset=+1] diff --git a/observability/logging/troubleshooting/modules b/observability/logging/troubleshooting/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/observability/logging/troubleshooting/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/observability/logging/troubleshooting/snippets b/observability/logging/troubleshooting/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/observability/logging/troubleshooting/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/observability/logging/troubleshooting/troubleshooting-logging-alerts.adoc b/observability/logging/troubleshooting/troubleshooting-logging-alerts.adoc deleted file mode 100644 index a642306372e0..000000000000 --- a/observability/logging/troubleshooting/troubleshooting-logging-alerts.adoc +++ /dev/null @@ -1,55 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="troubleshooting-logging-alerts"] -include::_attributes/common-attributes.adoc[] -= Troubleshooting logging alerts -:context: troubleshooting-logging-alerts - -toc::[] - -You can use the following procedures to troubleshoot logging alerts on your cluster. - -include::modules/es-cluster-health-is-red.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../../observability/monitoring/accessing-metrics/accessing-metrics-as-an-administrator.adoc#reviewing-monitoring-dashboards-admin_accessing-metrics-as-an-administrator[Reviewing monitoring dashboards as a cluster administrator] -* link:https://www.elastic.co/guide/en/elasticsearch/reference/7.13/fix-common-cluster-issues.html#fix-red-yellow-cluster-status[Fix a red or yellow cluster status] - -[id="elasticsearch-cluster-health-is-yellow"] -== Elasticsearch cluster health status is yellow - -Replica shards for at least one primary shard are not allocated to nodes. Increase the node count by adjusting the `nodeCount` value in the `ClusterLogging` custom resource (CR). - -[role="_additional-resources"] -.Additional resources -* link:https://www.elastic.co/guide/en/elasticsearch/reference/7.13/fix-common-cluster-issues.html#fix-red-yellow-cluster-status[Fix a red or yellow cluster status] - -include::modules/es-node-disk-low-watermark-reached.adoc[leveloffset=+1] -include::modules/es-node-disk-high-watermark-reached.adoc[leveloffset=+1] -include::modules/es-node-disk-flood-watermark-reached.adoc[leveloffset=+1] - -[id="troubleshooting-logging-alerts-es-jvm-heap-use-is-high"] -== Elasticsearch JVM heap usage is high - -The Elasticsearch node Java virtual machine (JVM) heap memory used is above 75%. Consider https://www.elastic.co/guide/en/elasticsearch/reference/current/advanced-configuration.html#set-jvm-heap-size[increasing the heap size]. - -[id="troubleshooting-logging-alerts-aggregated-logging-system-cpu-is-high"] -== Aggregated logging system CPU is high - -System CPU usage on the node is high. Check the CPU of the cluster node. Consider allocating more CPU resources to the node. - -[id="troubleshooting-logging-alerts-es-process-cpu-is-high"] -== Elasticsearch process CPU is high - -Elasticsearch process CPU usage on the node is high. Check the CPU of the cluster node. Consider allocating more CPU resources to the node. - -include::modules/es-disk-space-low.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* link:https://www.elastic.co/guide/en/elasticsearch/reference/7.13/fix-common-cluster-issues.html#fix-red-yellow-cluster-status[Fix a red or yellow cluster status] - -[id="troubleshooting-logging-alerts-es-filedescriptor-usage-is-high"] -== Elasticsearch FileDescriptor usage is high - -Based on current usage trends, the predicted number of file descriptors on the node is insufficient. Check the value of `max_file_descriptors` for each node as described in the Elasticsearch link:https://www.elastic.co/guide/en/elasticsearch/reference/6.8/file-descriptors.html[File Descriptors] documentation. diff --git a/snippets/audit-logs-default.adoc b/snippets/audit-logs-default.adoc deleted file mode 100644 index 992748a1e489..000000000000 --- a/snippets/audit-logs-default.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_mod-docs-content-type: SNIPPET - -// Module included in the following assemblies and modules: -// -// * observability/logging/log_collection_forwarding/configuring-log-forwarding.adoc -// -// * modules/cluster-logging-elasticsearch-audit.adoc - -In a {logging} deployment, container and infrastructure logs are forwarded to the internal log store defined in the `ClusterLogging` custom resource (CR) by default. - -Audit logs are not forwarded to the internal log store by default because this does not provide secure storage. You are responsible for ensuring that the system to which you forward audit logs is compliant with your organizational and governmental regulations, and is properly secured. - -If this default configuration meets your needs, you do not need to configure a `ClusterLogForwarder` CR. If a `ClusterLogForwarder` CR exists, logs are not forwarded to the internal log store unless a pipeline is defined that contains the `default` output. diff --git a/snippets/log6x-api-support-states-snip.adoc b/snippets/log6x-api-support-states-snip.adoc deleted file mode 100644 index 40fd8528d5c8..000000000000 --- a/snippets/log6x-api-support-states-snip.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Text snippet included in the following assemblies: -// logging/logging-6.2/log62-cluster-logging-support.adoc -// logging/logging-6.1/log61-cluster-logging-support.adoc -//logging/logging-6.0/log60-cluster-logging-support.adoc -// Text snippet included in the following modules: -// -// -:_mod-docs-content-type: SNIPPET - -.Logging API support states -[cols="3",options="header"] -|=== -|CustomResourceDefinition (CRD) -|ApiVersion -|Support state - -|LokiStack -|lokistack.loki.grafana.com/v1 -|Supported from 5.5 - -|RulerConfig -|rulerconfig.loki.grafana/v1 -|Supported from 5.7 - -|AlertingRule -|alertingrule.loki.grafana/v1 -|Supported from 5.7 - -|RecordingRule -|recordingrule.loki.grafana/v1 -|Supported from 5.7 - -|LogFileMetricExporter -|LogFileMetricExporter.logging.openshift.io/v1alpha1 -|Supported from 5.8 - -|ClusterLogForwarder -|clusterlogforwarder.observability.openshift.io/v1 -|Supported from 6.0 -|=== \ No newline at end of file diff --git a/snippets/log6x-loki-statement-snip.adoc b/snippets/log6x-loki-statement-snip.adoc deleted file mode 100644 index 4e6ac2929e27..000000000000 --- a/snippets/log6x-loki-statement-snip.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Text snippet included in the following assemblies: -// * observability/logging/logging-6.0/log6x-loki.adoc -// * observability/logging/logging-6.2/log6x-loki-6.2.adoc -// Text snippet included in the following modules: -// -// -:_mod-docs-content-type: SNIPPET - -Loki is a horizontally scalable, highly available, multi-tenant log aggregation system offered as a GA log store for {logging} {for} that can be visualized with the OpenShift {ObservabilityShortName} UI. The Loki configuration provided by OpenShift {logging-uc} is a short-term log store designed to enable users to perform fast troubleshooting with the collected logs. For that purpose, the {logging} {for} configuration of Loki has short-term storage, and is optimized for very recent queries. For long-term storage or queries over a long time period, users should look to log stores external to their cluster. diff --git a/snippets/logging-compatibility-snip.adoc b/snippets/logging-compatibility-snip.adoc deleted file mode 100644 index be76bdca0ec4..000000000000 --- a/snippets/logging-compatibility-snip.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Text snippet included in the following assemblies: -// -// * observability/logging/cluster-logging-support.adoc -// * observability/logging/logging_release_notes/logging-5-7-release-notes.adoc -// * observability/logging/logging_release_notes/logging-5-8-release-notes.adoc -// -// Text snippet included in the following modules: -// -// - -:_mod-docs-content-type: SNIPPET - -[NOTE] -==== -Logging is provided as an installable component, with a distinct release cycle from the core {product-title}. The link:https://access.redhat.com/support/policy/updates/openshift_operators#platform-agnostic[Red Hat OpenShift Container Platform Life Cycle Policy] outlines release compatibility. -==== diff --git a/snippets/logging-elastic-dep-snip.adoc b/snippets/logging-elastic-dep-snip.adoc deleted file mode 100644 index 7a232dc874a7..000000000000 --- a/snippets/logging-elastic-dep-snip.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Text snippet included in the following assemblies: -// -// * observability/logging/cluster-logging-deploying.adoc -// -// Text snippet included in the following modules: -// -// * configuring-log-storage-cr.adoc - -:_mod-docs-content-type: SNIPPET - -[NOTE] -==== -The {logging-uc} 5.9 release does not contain an updated version of the {es-op}. If you currently use the {es-op} released with {logging-uc} 5.8, it will continue to work with {logging-uc} until the EOL of {logging-uc} 5.8. As an alternative to using the {es-op} to manage the default log storage, you can use the {loki-op}. For more information on the {logging-uc} lifecycle dates, see link:https://access.redhat.com/support/policy/updates/openshift_operators#platform-agnostic[Platform Agnostic Operators]. -==== \ No newline at end of file diff --git a/snippets/logging-fluentd-dep-snip.adoc b/snippets/logging-fluentd-dep-snip.adoc deleted file mode 100644 index da8022c318fa..000000000000 --- a/snippets/logging-fluentd-dep-snip.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Text snippet included in the following assemblies: -// -// * observability/logging/cluster-logging-deploying.adoc -// -// Text snippet included in the following modules: -// -// * configuring-logging-collector.adoc - -:_mod-docs-content-type: SNIPPET - -[NOTE] -==== -Fluentd is deprecated and is planned to be removed in a future release. Red{nbsp}Hat provides bug fixes and support for this feature during the current release lifecycle, but this feature no longer receives enhancements. As an alternative to Fluentd, you can use Vector instead. -==== diff --git a/snippets/logging-kibana-dep-snip.adoc b/snippets/logging-kibana-dep-snip.adoc deleted file mode 100644 index a4b98d8308b3..000000000000 --- a/snippets/logging-kibana-dep-snip.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Text snippet included in the following assemblies: -// -// * observability/logging/cluster-logging.adoc -// -// Text snippet included in the following modules: -// -// - -:_mod-docs-content-type: SNIPPET - -[NOTE] -==== -The Kibana web console is now deprecated and is planned to be removed in a future logging release. -==== diff --git a/snippets/logging-loki-statement-snip.adoc b/snippets/logging-loki-statement-snip.adoc deleted file mode 100644 index f8eae7281043..000000000000 --- a/snippets/logging-loki-statement-snip.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Text snippet included in the following assemblies: -// -// -// Text snippet included in the following modules: -// -// -:_mod-docs-content-type: SNIPPET - -Loki is a horizontally scalable, highly available, multi-tenant log aggregation system offered as a GA log store for {logging} {for} that can be visualized with the OpenShift {ObservabilityShortName} UI. The Loki configuration provided by OpenShift {logging-uc} is a short-term log store designed to enable users to perform fast troubleshooting with the collected logs. For that purpose, the {logging} {for} configuration of Loki has short-term storage, and is optimized for very recent queries. For long-term storage or queries over a long time period, users should look to log stores external to their cluster. - -Elasticsearch indexes incoming log records completely during ingestion. Loki indexes only a few fixed labels during ingestion and defers more complex parsing until after the logs have been stored. This means Loki can collect logs more quickly. \ No newline at end of file diff --git a/snippets/logging-stable-updates-snip.adoc b/snippets/logging-stable-updates-snip.adoc deleted file mode 100644 index ca8d0e33997f..000000000000 --- a/snippets/logging-stable-updates-snip.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Text snippet included in the following assemblies: -// -// * observability/logging/logging_release_notes/logging-5-7-release-notes.adoc -// * observability/logging/logging_release_notes/logging-5-8-release-notes.adoc -// -// Text snippet included in the following modules: -// logging-loki-gui-install.adoc -// cluster-logging-deploy-console.adoc -// -:_mod-docs-content-type: SNIPPET - -[NOTE] -==== -The *stable* channel only provides updates to the most recent release of logging. To continue receiving updates for prior releases, you must change your subscription channel to *stable-x.y*, where `x.y` represents the major and minor version of logging you have installed. For example, *stable-5.7*. -==== diff --git a/snippets/logging-support-exception-for-cluster-observability-operator-due-to-logging-ui-plugin.adoc b/snippets/logging-support-exception-for-cluster-observability-operator-due-to-logging-ui-plugin.adoc deleted file mode 100644 index 64b132427d85..000000000000 --- a/snippets/logging-support-exception-for-cluster-observability-operator-due-to-logging-ui-plugin.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Text snippet included in the following assembly: -// -// * observability/logging/logging-6.0/log6x-visual.adoc - -:_mod-docs-content-type: SNIPPET - -[IMPORTANT] -==== -Until the approaching General Availability (GA) release of the Cluster Observability Operator (COO), which is currently in link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview] (TP), Red{nbsp}Hat provides support to customers who are using Logging 6.0 or later with the COO for its{nbsp} -ifndef::openshift-rosa,openshift-rosa-hcp[] -link:https://docs.redhat.com/en/documentation/red_hat_openshift_cluster_observability_operator/1-latest/html/ui_plugins_for_red_hat_openshift_cluster_observability_operator/logging-ui-plugin#coo-logging-ui-plugin-install_logging-ui-plugin[Logging UI Plugin]{nbsp} -endif::openshift-rosa,openshift-rosa-hcp[] -ifdef::openshift-rosa,openshift-rosa-hcp[] -Logging UI Plugin{nbsp} -endif::openshift-rosa,openshift-rosa-hcp[] -on {product-title} 4.14 or later. This support exception is temporary as the COO includes several independent features, some of which are still TP features, but the Logging UI Plugin is ready for GA. -==== diff --git a/snippets/logging-supported-config-snip.adoc b/snippets/logging-supported-config-snip.adoc deleted file mode 100644 index 222e20106d90..000000000000 --- a/snippets/logging-supported-config-snip.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Text snippet included in the following assemblies: -// -// * observability/logging/cluster-logging-support.adoc -// -// Text snippet included in the following modules: -// - -:_mod-docs-content-type: SNIPPET - -Only the configuration options described in this documentation are supported for {logging}. - -Do not use any other configuration options, as they are unsupported. Configuration paradigms might change across {product-title} releases, and such cases can only be handled gracefully if all configuration possibilities are controlled. If you use configurations other than those described in this documentation, your changes will be overwritten, because Operators are designed to reconcile any differences. - -[NOTE] -==== -If you must perform configurations not described in the {product-title} documentation, you must set your Red Hat OpenShift Logging Operator to `Unmanaged`. An unmanaged {logging} instance is not supported and does not receive updates until you return its status to `Managed`. -====