diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index 9414da4fcb48..f7e24d0516af 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -3076,13 +3076,87 @@ Topics: # File: log6x-visual # - Name: API reference 6.0 # File: log6x-api-reference - - Name: Logging 5.8 - Dir: logging_release_notes + - Name: Logging 5.8 release notes + File: logging-5-8-release-notes + - Name: Support + File: cluster-logging-support + - Name: Troubleshooting logging + Dir: troubleshooting + Topics: + - Name: Viewing Logging status + File: cluster-logging-cluster-status + - Name: Troubleshooting log forwarding + File: log-forwarding-troubleshooting + - Name: About Logging + File: cluster-logging + - Name: Installing Logging + File: cluster-logging-deploying + - Name: Updating Logging + File: cluster-logging-upgrading + Distros: openshift-enterprise,openshift-origin + - Name: Visualizing logs + Dir: log_visualization + Topics: + - Name: About log visualization + File: log-visualization + - Name: Log visualization with the web console + File: log-visualization-ocp-console + - Name: Configuring your Logging deployment + Dir: config + Distros: openshift-enterprise,openshift-origin + Topics: + - Name: Configuring CPU and memory limits for Logging components + File: cluster-logging-memory + - Name: Configuring systemd-journald for Logging + File: cluster-logging-systemd + - Name: Log collection and forwarding + Dir: log_collection_forwarding + Topics: + - Name: About log collection and forwarding + File: log-forwarding + - Name: Log output types + File: logging-output-types + - Name: Enabling JSON log forwarding + File: cluster-logging-enabling-json-logging + - Name: Configuring log forwarding + File: configuring-log-forwarding + - Name: Configuring the logging collector + File: cluster-logging-collector + - Name: Collecting and storing Kubernetes events + File: cluster-logging-eventrouter + - Name: Log storage + Dir: log_storage + Topics: + - Name: Installing log storage + File: installing-log-storage + - Name: Configuring the LokiStack log store + File: cluster-logging-loki + - Name: Logging alerts + Dir: logging_alerts Topics: - - Name: Release notes - File: logging-5-8-release-notes - - Name: Installing Logging - File: cluster-logging-deploying + - Name: Default logging alerts + File: default-logging-alerts + - Name: Custom logging alerts + File: custom-logging-alerts + - Name: Performance and reliability tuning + Dir: performance_reliability + Topics: + - Name: Flow control mechanisms + File: logging-flow-control-mechanisms + - Name: Scheduling resources + Dir: scheduling_resources + Topics: + - Name: Using node selectors to move logging resources + File: logging-node-selectors + - Name: Using tolerations to control logging pod placement + File: logging-taints-tolerations + - Name: Uninstalling Logging + File: cluster-logging-uninstall +# - Name: Exported fields +# File: cluster-logging-exported-fields +# Distros: openshift-enterprise,openshift-origin + # - Name: 5.7 Logging API reference + # File: logging-5-7-reference # - Name: Configuring the logging collector # File: cluster-logging-collector # - Name: Support diff --git a/modules/cluster-logging-collector-limits.adoc b/modules/cluster-logging-collector-limits.adoc index efd726c1acbc..eec48a89b8de 100644 --- a/modules/cluster-logging-collector-limits.adoc +++ b/modules/cluster-logging-collector-limits.adoc @@ -36,29 +36,3 @@ spec: # ... ---- <1> Specify the CPU and memory limits and requests as needed. The values shown are the default values. - -//// -[source,yaml] ----- -$ oc edit ClusterLogging instance - -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - -.... - -spec: - collection: - logs: - rsyslog: - resources: - limits: <1> - memory: 358Mi - requests: - cpu: 100m - memory: 358Mi ----- -<1> Specify the CPU and memory limits and requests as needed. The values shown are the default values. -//// diff --git a/modules/cluster-logging-collector-log-forward-syslog.adoc b/modules/cluster-logging-collector-log-forward-syslog.adoc index 2cdcd5412bb8..b044aa3bfb74 100644 --- a/modules/cluster-logging-collector-log-forward-syslog.adoc +++ b/modules/cluster-logging-collector-log-forward-syslog.adoc @@ -9,9 +9,7 @@ To configure log forwarding using the *syslog* protocol, you must create a `Clus .Prerequisites * You must have a logging server that is configured to receive the logging data using the specified protocol or format. - .Procedure - . Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: + [source,yaml] diff --git a/modules/cluster-logging-deploying-about.adoc b/modules/cluster-logging-deploying-about.adoc index fec039ef7c5a..0754015be64b 100644 --- a/modules/cluster-logging-deploying-about.adoc +++ b/modules/cluster-logging-deploying-about.adoc @@ -155,10 +155,11 @@ spec: nodeCount: 3 resources: limits: - memory: 32Gi + cpu: 200m + memory: 16Gi requests: - cpu: 3 - memory: 32Gi + cpu: 200m + memory: 16Gi storage: storageClassName: "gp2" size: "200G" diff --git a/modules/cluster-logging-elasticsearch-audit.adoc b/modules/cluster-logging-elasticsearch-audit.adoc index 9c01bd20314a..aaadac6ca974 100644 --- a/modules/cluster-logging-elasticsearch-audit.adoc +++ b/modules/cluster-logging-elasticsearch-audit.adoc @@ -10,7 +10,7 @@ include::snippets/audit-logs-default.adoc[] .Procedure -To use the Log Forward API to forward audit logs to the internal Elasticsearch instance: +To use the Log Forwarding API to forward audit logs to the internal Elasticsearch instance: . Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: + diff --git a/modules/cluster-logging-kibana-limits.adoc b/modules/cluster-logging-kibana-limits.adoc index 63b81a78c6f9..af28054b855e 100644 --- a/modules/cluster-logging-kibana-limits.adoc +++ b/modules/cluster-logging-kibana-limits.adoc @@ -2,7 +2,6 @@ // // * observability/logging/cluster-logging-visualizer.adoc -:_mod-docs-content-type: PROCEDURE [id="cluster-logging-kibana-limits_{context}"] = Configure the CPU and memory limits for the log visualizer diff --git a/modules/cluster-logging-kibana-scaling.adoc b/modules/cluster-logging-kibana-scaling.adoc index a6aa97f0ae3d..4ff78d1748d8 100644 --- a/modules/cluster-logging-kibana-scaling.adoc +++ b/modules/cluster-logging-kibana-scaling.adoc @@ -19,8 +19,6 @@ $ oc -n openshift-logging edit ClusterLogging instance + [source,yaml] ---- -$ oc edit ClusterLogging instance - apiVersion: "logging.openshift.io/v1" kind: "ClusterLogging" metadata: @@ -35,4 +33,3 @@ spec: replicas: 1 <1> ---- <1> Specify the number of Kibana nodes. - diff --git a/modules/cluster-logging-maintenance-support-list-6x.adoc b/modules/cluster-logging-maintenance-support-list-6x.adoc index 3e35f5affa22..5e68fe88b194 100644 --- a/modules/cluster-logging-maintenance-support-list-6x.adoc +++ b/modules/cluster-logging-maintenance-support-list-6x.adoc @@ -1,9 +1,3 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging-6.0/log60-cluster-logging-support.adoc -// * observability/logging/logging-6.1/log61-cluster-logging-support.adoc -// * observability/logging/logging-6.2/log62-cluster-logging-support.adoc - :_mod-docs-content-type: REFERENCE [id="cluster-logging-maintenance-support-list_{context}"] = Unsupported configurations diff --git a/modules/cluster-logging-manual-rollout-rolling.adoc b/modules/cluster-logging-manual-rollout-rolling.adoc index 96dc5bcded41..bbd85f70e993 100644 --- a/modules/cluster-logging-manual-rollout-rolling.adoc +++ b/modules/cluster-logging-manual-rollout-rolling.adoc @@ -20,6 +20,7 @@ To perform a rolling cluster restart: . Change to the `openshift-logging` project: + +[source,terminal] ---- $ oc project openshift-logging ---- @@ -46,24 +47,28 @@ $ oc exec -c elasticsearch -- es_util --query="_flus + For example: + +[source,terminal] ---- $ oc exec -c elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_util --query="_flush/synced" -XPOST ---- + .Example output -+ +[source,terminal] ---- {"_shards":{"total":4,"successful":4,"failed":0},".security":{"total":2,"successful":2,"failed":0},".kibana_1":{"total":2,"successful":2,"failed":0}} ---- -. Prevent shard balancing when purposely bringing down nodes using the {product-title} es_util tool: +. Prevent shard balancing when purposely bringing down nodes using the {product-title} +link:https://github.com/openshift/origin-aggregated-logging/tree/master/elasticsearch#es_util[*es_util*] tool: + +[source,terminal] ---- $ oc exec -c elasticsearch -- es_util --query="_cluster/settings" -XPUT -d '{ "persistent": { "cluster.routing.allocation.enable" : "primaries" } }' ---- + For example: + +[source,terminal] ---- $ oc exec elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_util --query="_cluster/settings" -XPUT -d '{ "persistent": { "cluster.routing.allocation.enable" : "primaries" } }' ---- @@ -79,18 +84,19 @@ $ oc exec elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_uti .. By default, the {product-title} Elasticsearch cluster blocks rollouts to their nodes. Use the following command to allow rollouts and allow the pod to pick up the changes: + +[source,terminal] ---- $ oc rollout resume deployment/ ---- + For example: + +[source,terminal] ---- $ oc rollout resume deployment/elasticsearch-cdm-0-1 ---- + -.Example output -+ +[source,terminal] ---- deployment.extensions/elasticsearch-cdm-0-1 resumed ---- @@ -98,6 +104,7 @@ deployment.extensions/elasticsearch-cdm-0-1 resumed A new pod is deployed. After the pod has a ready container, you can move on to the next deployment. + +[source,terminal] ---- $ oc get pods -l component=elasticsearch- ---- @@ -113,24 +120,26 @@ elasticsearch-cdm-5ceex6ts-3-585968dc68-k7kjr 2/2 Running 0 22h .. After the deployments are complete, reset the pod to disallow rollouts: + +[source,terminal] ---- $ oc rollout pause deployment/ ---- + For example: + +[source,terminal] ---- $ oc rollout pause deployment/elasticsearch-cdm-0-1 ---- + -.Example output -+ +[source,terminal] ---- deployment.extensions/elasticsearch-cdm-0-1 paused ---- + .. Check that the Elasticsearch cluster is in a `green` or `yellow` state: + +[source,terminal] ---- $ oc exec -c elasticsearch -- es_util --query=_cluster/health?pretty=true ---- @@ -142,10 +151,13 @@ If you performed a rollout on the Elasticsearch pod you used in the previous com + For example: + +[source,terminal] ---- $ oc exec elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_util --query=_cluster/health?pretty=true ---- + +.Example output +[source,json] ---- { "cluster_name" : "elasticsearch", @@ -171,12 +183,14 @@ $ oc exec elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_uti . After all the deployments for the cluster have been rolled out, re-enable shard balancing: + +[source,terminal] ---- $ oc exec -c elasticsearch -- es_util --query="_cluster/settings" -XPUT -d '{ "persistent": { "cluster.routing.allocation.enable" : "all" } }' ---- + For example: + +[source,terminal] ---- $ oc exec elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_util --query="_cluster/settings" -XPUT -d '{ "persistent": { "cluster.routing.allocation.enable" : "all" } }' ---- diff --git a/modules/cluster-logging-must-gather-about.adoc b/modules/cluster-logging-must-gather-about.adoc index d27d0c947daf..ce0c605b83bd 100644 --- a/modules/cluster-logging-must-gather-about.adoc +++ b/modules/cluster-logging-must-gather-about.adoc @@ -1,7 +1,3 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-support.adoc - :_mod-docs-content-type: CONCEPT [id="about-must-gather_{context}"] = About the must-gather tool diff --git a/modules/cluster-logging-must-gather-collecting.adoc b/modules/cluster-logging-must-gather-collecting.adoc index 36ea531933b2..8dd90b62d445 100644 --- a/modules/cluster-logging-must-gather-collecting.adoc +++ b/modules/cluster-logging-must-gather-collecting.adoc @@ -1,7 +1,3 @@ -// Module included in the following assemblies: -// -// * observability/logging/cluster-logging-support.adoc - :_mod-docs-content-type: PROCEDURE [id="cluster-logging-must-gather-collecting_{context}"] = Collecting {logging} data @@ -16,18 +12,19 @@ To collect {logging} information with `must-gather`: . Run the `oc adm must-gather` command against the {logging} image: + -ifndef::openshift-origin[] +If you are using OKD: ++ [source,terminal] ---- -$ oc adm must-gather --image=$(oc -n openshift-logging get deployment.apps/cluster-logging-operator -o jsonpath='{.spec.template.spec.containers[?(@.name == "cluster-logging-operator")].image}') +$ oc adm must-gather --image=quay.io/openshift/origin-cluster-logging-operator ---- -endif::openshift-origin[] -ifdef::openshift-origin[] ++ +Otherwise: ++ [source,terminal] ---- -$ oc adm must-gather --image=quay.io/openshift/origin-cluster-logging-operator +$ oc adm must-gather --image=$(oc -n openshift-logging get deployment.apps/cluster-logging-operator -o jsonpath='{.spec.template.spec.containers[?(@.name == "cluster-logging-operator")].image}') ---- -endif::openshift-origin[] + The `must-gather` tool creates a new directory that starts with `must-gather.local` within the current directory. For example: `must-gather.local.4157245944708210408`. diff --git a/modules/cluster-logging-troubleshooting-unknown.adoc b/modules/cluster-logging-troubleshooting-unknown.adoc index 91b196f84bdd..92e173aa7ce8 100644 --- a/modules/cluster-logging-troubleshooting-unknown.adoc +++ b/modules/cluster-logging-troubleshooting-unknown.adoc @@ -2,7 +2,6 @@ // // * logging/cluster-logging-troublehsooting.adoc -:_mod-docs-content-type: PROCEDURE [id="cluster-logging-troubleshooting-unknown_{context}"] = Troubleshooting a Kubernetes unknown error while connecting to Elasticsearch diff --git a/modules/cluster-logging-visualizer-launch.adoc b/modules/cluster-logging-visualizer-launch.adoc index 242746e11c82..040e35903c23 100644 --- a/modules/cluster-logging-visualizer-launch.adoc +++ b/modules/cluster-logging-visualizer-launch.adoc @@ -2,7 +2,6 @@ // // * observability/logging/cluster-logging-visualizer.adoc -:_mod-docs-content-type: PROCEDURE [id="cluster-logging-visualizer-launch_{context}"] = Launching the log visualizer @@ -28,7 +27,7 @@ yes + [NOTE] ==== -The audit logs are not stored in the internal {product-title} Elasticsearch instance by default. To view the audit logs in Kibana, you must use the Log Forward API to configure a pipeline that uses the `default` output for audit logs. +The audit logs are not stored in the internal {product-title} Elasticsearch instance by default. To view the audit logs in Kibana, you must use the Log Forwarding API to configure a pipeline that uses the `default` output for audit logs. ==== .Procedure diff --git a/modules/enabling-loki-alerts.adoc b/modules/enabling-loki-alerts.adoc new file mode 100644 index 000000000000..ba51e0332891 --- /dev/null +++ b/modules/enabling-loki-alerts.adoc @@ -0,0 +1,101 @@ +:_mod-docs-content-type: PROCEDURE +[id="logging-enabling-loki-alerts_{context}"] += Creating a log-based alerting rule with Loki + +The `AlertingRule` CR contains a set of specifications and webhook validation definitions to declare groups of alerting rules for a single `LokiStack` instance. In addition, the webhook validation definition provides support for rule validation conditions: + +* If an `AlertingRule` CR includes an invalid `interval` period, it is an invalid alerting rule +* If an `AlertingRule` CR includes an invalid `for` period, it is an invalid alerting rule. +* If an `AlertingRule` CR includes an invalid LogQL `expr`, it is an invalid alerting rule. +* If an `AlertingRule` CR includes two groups with the same name, it is an invalid alerting rule. +* If none of the above applies, an alerting rule is considered valid. + +.AlertingRule definitions +[options="header"] +|=== +| Tenant type | Valid namespaces for `AlertingRule` CRs +| application a| `` +| audit a| `openshift-logging` +| infrastructure a| `openshift-/\*`, `kube-/\*`, `default` +|=== + +.Procedure + +. Create an `AlertingRule` custom resource (CR): ++ + +.Example infrastructure `AlertingRule` CR +[source,yaml] +---- + apiVersion: loki.grafana.com/v1 + kind: AlertingRule + metadata: + name: loki-operator-alerts + namespace: openshift-operators-redhat <1> + labels: <2> + openshift.io/: "true" + spec: + tenantID: "infrastructure" <3> + groups: + - name: LokiOperatorHighReconciliationError + rules: + - alert: HighPercentageError + expr: | <4> + sum(rate({kubernetes_namespace_name="openshift-operators-redhat", kubernetes_pod_name=~"loki-operator-controller-manager.*"} |= "error" [1m])) by (job) + / + sum(rate({kubernetes_namespace_name="openshift-operators-redhat", kubernetes_pod_name=~"loki-operator-controller-manager.*"}[1m])) by (job) + > 0.01 + for: 10s + labels: + severity: critical <5> + annotations: + summary: High Loki Operator Reconciliation Errors <6> + description: High Loki Operator Reconciliation Errors <7> +---- +<1> The namespace where this `AlertingRule` CR is created must have a label matching the LokiStack `spec.rules.namespaceSelector` definition. +<2> The `labels` block must match the LokiStack `spec.rules.selector` definition. +<3> `AlertingRule` CRs for `infrastructure` tenants are only supported in the `openshift-\*`, `kube-\*`, or `default` namespaces. +<4> The value for `kubernetes_namespace_name:` must match the value for `metadata.namespace`. +<5> The value of this mandatory field must be `critical`, `warning`, or `info`. +<6> This field is mandatory. +<7> This field is mandatory. + ++ +.Example application `AlertingRule` CR +[source,yaml] +---- + apiVersion: loki.grafana.com/v1 + kind: AlertingRule + metadata: + name: app-user-workload + namespace: app-ns <1> + labels: <2> + openshift.io/: "true" + spec: + tenantID: "application" + groups: + - name: AppUserWorkloadHighError + rules: + - alert: + expr: | <3> + sum(rate({kubernetes_namespace_name="app-ns", kubernetes_pod_name=~"podName.*"} |= "error" [1m])) by (job) + for: 10s + labels: + severity: critical <4> + annotations: + summary: <5> + description: <6> +---- +<1> The namespace where this `AlertingRule` CR is created must have a label matching the LokiStack `spec.rules.namespaceSelector` definition. +<2> The `labels` block must match the LokiStack `spec.rules.selector` definition. +<3> Value for `kubernetes_namespace_name:` must match the value for `metadata.namespace`. +<4> The value of this mandatory field must be `critical`, `warning`, or `info`. +<5> The value of this mandatory field is a summary of the rule. +<6> The value of this mandatory field is a detailed description of the rule. + +. Apply the `AlertingRule` CR: ++ +[source,terminal] +---- +$ oc apply -f .yaml +---- diff --git a/modules/identity-federation.adoc b/modules/identity-federation.adoc new file mode 100644 index 000000000000..a9beb7b5ce51 --- /dev/null +++ b/modules/identity-federation.adoc @@ -0,0 +1,59 @@ +:_mod-docs-content-type: PROCEDURE +[id="identity-federation_{context}"] += Enabling authentication to cloud-based log stores using short-lived tokens + +Workload identity federation enables authentication to cloud-based log stores using short-lived tokens. + +.Procedure + +* Use one of the following options to enable authentication: + +** If you use the {product-title} web console to install the {loki-op}, clusters that use short-lived tokens are automatically detected. You are prompted to create roles and supply the data required for the {loki-op} to create a `CredentialsRequest` object, which populates a secret. + +** If you use the {oc-first} to install the {loki-op}, you must manually create a `Subscription` object using the appropriate template for your storage provider, as shown in the following examples. This authentication strategy is only supported for the storage providers indicated. ++ +.Example Azure sample subscription +[source,yaml] +---- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: loki-operator + namespace: openshift-operators-redhat +spec: + channel: "stable-6.0" + installPlanApproval: Manual + name: loki-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + config: + env: + - name: CLIENTID + value: + - name: TENANTID + value: + - name: SUBSCRIPTIONID + value: + - name: REGION + value: +---- ++ +.Example AWS sample subscription +[source,yaml] +---- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: loki-operator + namespace: openshift-operators-redhat +spec: + channel: "stable-6.0" + installPlanApproval: Manual + name: loki-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + config: + env: + - name: ROLEARN + value: +---- diff --git a/modules/installing-logging-operator-cli.adoc b/modules/installing-logging-operator-cli.adoc new file mode 100644 index 000000000000..bb3de2ee9682 --- /dev/null +++ b/modules/installing-logging-operator-cli.adoc @@ -0,0 +1,163 @@ +// Module is included in the following assemblies: +// +// +:_mod-docs-content-type: PROCEDURE +[id="installing-logging-operator-cli_{context}"] += Installing {clo} by using the CLI + +Install {clo} on your {product-title} cluster to collect and forward logs to a log store by using the {oc-first}. + +.Prerequisites + +* You have administrator permissions. +* You installed the {oc-first}. +* You installed and configured {loki-op}. +* You have created the `openshift-logging` namespace. + +.Procedure + +. Create an `OperatorGroup` object: ++ +.Example `OperatorGroup` object +[source,yaml] +---- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: cluster-logging + namespace: openshift-logging # <1> +spec: + upgradeStrategy: Default +---- +<1> You must specify `openshift-logging` as the namespace. + +. Apply the `OperatorGroup` object by running the following command: ++ +[source,terminal] +---- +$ oc apply -f .yaml +---- + +. Create a `Subscription` object for {clo}: ++ +.Example `Subscription` object +[source,yaml] +---- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: cluster-logging + namespace: openshift-logging # <1> +spec: + channel: stable-6. # <2> + installPlanApproval: Automatic # <3> + name: cluster-logging + source: redhat-operators # <4> + sourceNamespace: openshift-marketplace +---- +<1> You must specify `openshift-logging` as the namespace. +<2> Specify `stable-6.` as the channel. +<3> If the approval strategy in the subscription is set to `Automatic`, the update process initiates as soon as a new operator version is available in the selected channel. If the approval strategy is set to `Manual`, you must manually approve pending updates. +<4> Specify `redhat-operators` as the value. If your {product-title} cluster is installed on a restricted network, also known as a disconnected cluster, specify the name of the `CatalogSource` object that you created when you configured Operator Lifecycle Manager (OLM). + +. Apply the `Subscription` object by running the following command: ++ +[source,terminal] +---- +$ oc apply -f .yaml +---- + +. Create a service account to be used by the log collector: ++ +[source,terminal] +---- +$ oc create sa logging-collector -n openshift-logging +---- + +. Assign the necessary permissions to the service account for the collector to be able to collect and forward logs. In this example, the collector is provided permissions to collect logs from both infrastructure and application logs. ++ +[source,terminal] +---- +$ oc adm policy add-cluster-role-to-user logging-collector-logs-writer -z logging-collector -n openshift-logging +$ oc adm policy add-cluster-role-to-user collect-application-logs -z logging-collector -n openshift-logging +$ oc adm policy add-cluster-role-to-user collect-infrastructure-logs -z logging-collector -n openshift-logging +---- + +. Create a `ClusterLogForwarder` CR: ++ +.Example `ClusterLogForwarder` CR +[source,yaml] +---- +apiVersion: observability.openshift.io/v1 +kind: ClusterLogForwarder +metadata: + name: instance + namespace: openshift-logging # <1> +spec: + serviceAccount: + name: logging-collector # <2> + outputs: + - name: lokistack-out + type: lokiStack # <3> + lokiStack: + target: # <4> + name: logging-loki + namespace: openshift-logging + authentication: + token: + from: serviceAccount + tls: + ca: + key: service-ca.crt + configMapName: openshift-service-ca.crt + pipelines: + - name: infra-app-logs + inputRefs: # <5> + - application + - infrastructure + outputRefs: + - lokistack-out +---- +<1> You must specify the `openshift-logging` namespace. +<2> Specify the name of the service account created before. +<3> Select the `lokiStack` output type to send logs to the `LokiStack` instance. +<4> Point the `ClusterLogForwarder` to the `LokiStack` instance created earlier. +<5> Select the log output types you want to send to the `LokiStack` instance. + +. Apply the `ClusterLogForwarder CR` object by running the following command: ++ +[source,terminal] +---- +$ oc apply -f .yaml +---- + +.Verification + +. Verify the installation by running the following command: ++ +[source,terminal] +---- +$ oc get pods -n openshift-logging +---- ++ +.Example output +[source,terminal] +---- +$ oc get pods -n openshift-logging +NAME READY STATUS RESTARTS AGE +cluster-logging-operator-fb7f7cf69-8jsbq 1/1 Running 0 98m +instance-222js 2/2 Running 0 18m +instance-g9ddv 2/2 Running 0 18m +instance-hfqq8 2/2 Running 0 18m +instance-sphwg 2/2 Running 0 18m +instance-vv7zn 2/2 Running 0 18m +instance-wk5zz 2/2 Running 0 18m +logging-loki-compactor-0 1/1 Running 0 42m +logging-loki-distributor-7d7688bcb9-dvcj8 1/1 Running 0 42m +logging-loki-gateway-5f6c75f879-bl7k9 2/2 Running 0 42m +logging-loki-gateway-5f6c75f879-xhq98 2/2 Running 0 42m +logging-loki-index-gateway-0 1/1 Running 0 42m +logging-loki-ingester-0 1/1 Running 0 42m +logging-loki-querier-6b7b56bccc-2v9q4 1/1 Running 0 42m +logging-loki-query-frontend-84fb57c578-gq2f7 1/1 Running 0 42m +---- diff --git a/modules/installing-logging-operator-web-console.adoc b/modules/installing-logging-operator-web-console.adoc new file mode 100644 index 000000000000..29919b25df4b --- /dev/null +++ b/modules/installing-logging-operator-web-console.adoc @@ -0,0 +1,163 @@ +:_mod-docs-content-type: PROCEDURE +[id="installing-logging-operator-web-console_{context}"] += Installing {clo} by using the web console + +Install {clo} on your {product-title} cluster to collect and forward logs to a log store from the OperatorHub by using the {product-title} web console. + +.Prerequisites + +* You have administrator permissions. +* You have access to the {product-title} web console. +* You installed and configured {loki-op}. + +.Procedure + +. In the {product-title} web console *Administrator* perspective, go to *Operators* -> *OperatorHub*. + +. Type {clo} in the *Filter by keyword* field. Click *{clo}* in the list of available Operators, and then click *Install*. + +. Select *stable-x.y* as the *Update channel*. The latest version is already selected in the *Version* field. ++ +The {clo} must be deployed to the {logging} namespace `openshift-logging`, so the *Installation mode* and *Installed Namespace* are already selected. If this namespace does not already exist, it will be created for you. + +. Select *Enable Operator-recommended cluster monitoring on this namespace.* ++ +This option sets the `openshift.io/cluster-monitoring: "true"` label in the `Namespace` object. You must select this option to ensure that cluster monitoring scrapes the `openshift-logging` namespace. + +. For *Update approval* select *Automatic*, then click *Install*. ++ +If the approval strategy in the subscription is set to *Automatic*, the update process initiates as soon as a new operator version is available in the selected channel. If the approval strategy is set to *Manual*, you must manually approve pending updates. ++ +[NOTE] +==== +An Operator might display a `Failed` status before the installation completes. If the operator installation completes with an `InstallSucceeded` message, refresh the page. +==== + +. While the operator installs, create the service account that will be used by the log collector to collect the logs. + +.. Click the *+* in the top right of the screen to access the *Import YAML* page. + +.. Enter the YAML definition for the service account. ++ +.Example `ServiceAccount` object +[source,yaml] +---- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: logging-collector # <1> + namespace: openshift-logging # <2> +---- +<1> Note down the name used for the service account `logging-collector` to use it later when creating the `ClusterLogForwarder` resource. +<2> Set the namespace to `openshift-logging` because that is the namespace for deploying the `ClusterLogForwarder` resource. + +.. Click the *Create* button. + +. Create the `ClusterRoleBinding` objects to grant the necessary permissions to the log collector for accessing the logs that you want to collect and to write the log store, for example infrastructure and application logs. + +.. Click the *+* in the top right of the screen to access the *Import YAML* page. + +.. Enter the YAML definition for the `ClusterRoleBinding` resources. ++ +.Example `ClusterRoleBinding` resources +[source,yaml] +---- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: logging-collector:write-logs +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: logging-collector-logs-writer # <1> +subjects: +- kind: ServiceAccount + name: logging-collector + namespace: openshift-logging +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: logging-collector:collect-application +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: collect-application-logs # <2> +subjects: +- kind: ServiceAccount + name: logging-collector + namespace: openshift-logging +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: logging-collector:collect-infrastructure +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: collect-infrastructure-logs # <3> +subjects: +- kind: ServiceAccount + name: logging-collector + namespace: openshift-logging +---- +<1> The cluster role to allow the log collector to write logs to LokiStack. +<2> The cluster role to allow the log collector to collect logs from applications. +<3> The cluster role to allow the log collector to collect logs from infrastructure. + +.. Click the *Create* button. + +. Go to the *Operators* -> *Installed Operators* page. Select the operator and click the *All instances* tab. + +. After granting the necessary permissions to the service account, navigate to the *Installed Operators* page. Select the {clo} under the *Provided APIs*, find the *ClusterLogForwarder* resource and click *Create Instance*. + +. Select *YAML view*, and then use the following template to create a `ClusterLogForwarder` CR: ++ +.Example `ClusterLogForwarder` CR +[source,yaml] +---- +apiVersion: observability.openshift.io/v1 +kind: ClusterLogForwarder +metadata: + name: instance + namespace: openshift-logging # <1> +spec: + serviceAccount: + name: logging-collector # <2> + outputs: + - name: lokistack-out + type: lokiStack # <3> + lokiStack: + target: # <4> + name: logging-loki + namespace: openshift-logging + authentication: + token: + from: serviceAccount + tls: + ca: + key: service-ca.crt + configMapName: openshift-service-ca.crt + pipelines: + - name: infra-app-logs + inputRefs: # <5> + - application + - infrastructure + outputRefs: + - lokistack-out +---- +<1> You must specify `openshift-logging` as the namespace. +<2> Specify the name of the service account created earlier. +<3> Select the `lokiStack` output type to send logs to the `LokiStack` instance. +<4> Point the `ClusterLogForwarder` to the `LokiStack` instance created earlier. +<5> Select the log output types you want to send to the `LokiStack` instance. + +. Click *Create*. + +.Verification +. In the *ClusterLogForwarder* tab verify that you see your `ClusterLogForwarder` instance. + +. In the *Status* column, verify that you see the messages: + +* `Condition: observability.openshift.io/Authorized` +* `observability.openshift.io/Valid, Ready` diff --git a/modules/installing-loki-operator-cli.adoc b/modules/installing-loki-operator-cli.adoc new file mode 100644 index 000000000000..e198d401d4c3 --- /dev/null +++ b/modules/installing-loki-operator-cli.adoc @@ -0,0 +1,203 @@ +:_mod-docs-content-type: PROCEDURE +[id="install-loki-operator-cli_{context}"] += Installing the {loki-op} by using the CLI + +Install {loki-op} on your {product-title} cluster to manage the log store `Loki` by using the {product-title} command-line interface (CLI). You can deploy and configure the `Loki` log store by reconciling the resource LokiStack with the {loki-op}. + +.Prerequisites + +* You have administrator permissions. +* You installed the {oc-first}. +* You have access to a supported object store. For example: AWS S3, Google Cloud Storage, Azure, Swift, Minio, or {rh-storage}. + +.Procedure + +. Create a `Namespace` object for {loki-op}: ++ +.Example `Namespace` object +[source,yaml] +---- +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-operators-redhat # <1> + labels: + openshift.io/cluster-monitoring: "true" # <2> +---- +<1> You must specify `openshift-operators-redhat` as the namespace. To enable monitoring for the operator, configure Cluster Monitoring Operator to scrape metrics from the `openshift-operators-redhat` namespace and not the `openshift-operators` namespace. The `openshift-operators` namespace might contain community operators, which are untrusted and could publish a metric with the same name as an {product-title} metric, causing conflicts. +<2> A string value that specifies the label as shown to ensure that cluster monitoring scrapes the `openshift-operators-redhat` namespace. + +. Apply the `Namespace` object by running the following command: ++ +[source,terminal] +---- +$ oc apply -f .yaml +---- + +. Create an `OperatorGroup` object. ++ +.Example `OperatorGroup` object +[source,yaml] +---- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: loki-operator + namespace: openshift-operators-redhat # <1> +spec: + upgradeStrategy: Default +---- +<1> You must specify `openshift-operators-redhat` as the namespace. + +. Apply the `OperatorGroup` object by running the following command: ++ +[source,terminal] +---- +$ oc apply -f .yaml +---- + +. Create a `Subscription` object for {loki-op}: ++ +.Example `Subscription` object +[source,yaml] +---- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: loki-operator + namespace: openshift-operators-redhat # <1> +spec: + channel: stable-6. # <2> + installPlanApproval: Automatic # <3> + name: loki-operator + source: redhat-operators # <4> + sourceNamespace: openshift-marketplace +---- +<1> You must specify `openshift-operators-redhat` as the namespace. +<2> Specify `stable-6.` as the channel. +<3> If the approval strategy in the subscription is set to `Automatic`, the update process initiates as soon as a new operator version is available in the selected channel. If the approval strategy is set to `Manual`, you must manually approve pending updates. +<4> Specify `redhat-operators` as the value. If your {product-title} cluster is installed on a restricted network, also known as a disconnected cluster, specify the name of the `CatalogSource` object that you created when you configured Operator Lifecycle Manager (OLM). + +. Apply the `Subscription` object by running the following command: ++ +[source,terminal] +---- +$ oc apply -f .yaml +---- + +. Create a `namespace` object for deploy the LokiStack: ++ +.Example `namespace` object +[source,yaml] +---- +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-logging # <1> + labels: + openshift.io/cluster-monitoring: "true" # <2> +---- +<1> The `openshift-logging` namespace is dedicated for all {logging} workloads. +<2> A string value that specifies the label, as shown, to ensure that cluster monitoring scrapes the `openshift-logging` namespace. + +. Apply the `namespace` object by running the following command: ++ +[source,terminal] +---- +$ oc apply -f .yaml +---- + +. Create a secret with the credentials to access the object storage. For example, create a secret to access {aws-first} s3. ++ +.Example `Secret` object +[source,yaml] +---- +apiVersion: v1 +kind: Secret +metadata: + name: logging-loki-s3 # <1> + namespace: openshift-logging +stringData: # <2> + access_key_id: + access_key_secret: + bucketnames: s3-bucket-name + endpoint: https://s3.eu-central-1.amazonaws.com + region: eu-central-1 +---- +<1> Use the name `logging-loki-s3` to match the name used in LokiStack. +<2> For the contents of the secret see the Loki object storage section. ++ +-- +include::snippets/logging-retention-period-snip.adoc[leveloffset=+1] +-- + +. Apply the `Secret` object by running the following command: ++ +[source,terminal] +---- +$ oc apply -f .yaml +---- + +. Create a `LokiStack` CR: ++ +.Example `LokiStack` CR +[source,yaml] +---- +apiVersion: loki.grafana.com/v1 +kind: LokiStack +metadata: + name: logging-loki # <1> + namespace: openshift-logging # <2> +spec: + size: 1x.small # <3> + storage: + schemas: + - version: v13 + effectiveDate: "--
" # <4> + secret: + name: logging-loki-s3 # <5> + type: s3 # <6> + storageClassName: # <7> + tenants: + mode: openshift-logging # <8> +---- +<1> Use the name `logging-loki`. +<2> You must specify `openshift-logging` as the namespace. +<3> Specify the deployment size. Supported size options for production instances of Loki are `1x.extra-small`, `1x.small`, or `1x.medium`. Additionally, `1x.pico` is supported starting with {logging} 6.1. +<4> For new installations this date should be set to the equivalent of "yesterday", as this will be the date from when the schema takes effect. +<5> Specify the name of your log store secret. +<6> Specify the corresponding storage type. +<7> Specify the name of a storage class for temporary storage. For best performance, specify a storage class that allocates block storage. You can list the available storage classes for your cluster by using the `oc get storageclasses` command. +<8> The `openshift-logging` mode is the default tenancy mode where a tenant is created for log types, such as audit, infrastructure, and application. This enables access control for individual users and user groups to different log streams. + + +. Apply the `LokiStack` CR object by running the following command: ++ +[source,terminal] +---- +$ oc apply -f .yaml +---- + +.Verification + +* Verify the installation by running the following command: ++ +[source,terminal] +---- +$ oc get pods -n openshift-logging +---- ++ +.Example output +[source,terminal] +---- +$ oc get pods -n openshift-logging +NAME READY STATUS RESTARTS AGE +logging-loki-compactor-0 1/1 Running 0 42m +logging-loki-distributor-7d7688bcb9-dvcj8 1/1 Running 0 42m +logging-loki-gateway-5f6c75f879-bl7k9 2/2 Running 0 42m +logging-loki-gateway-5f6c75f879-xhq98 2/2 Running 0 42m +logging-loki-index-gateway-0 1/1 Running 0 42m +logging-loki-ingester-0 1/1 Running 0 42m +logging-loki-querier-6b7b56bccc-2v9q4 1/1 Running 0 42m +logging-loki-query-frontend-84fb57c578-gq2f7 1/1 Running 0 42m +---- diff --git a/modules/installing-loki-operator-web-console.adoc b/modules/installing-loki-operator-web-console.adoc new file mode 100644 index 000000000000..7774925dd165 --- /dev/null +++ b/modules/installing-loki-operator-web-console.adoc @@ -0,0 +1,133 @@ +:_mod-docs-content-type: PROCEDURE +[id="installing-loki-operator-web-console_{context}"] += Installing {loki-op} by using the web console + +Install {loki-op} on your {product-title} cluster to manage the log store `Loki` from the OperatorHub by using the {product-title} web console. You can deploy and configure the `Loki` log store by reconciling the resource LokiStack with the {loki-op}. + +.Prerequisites + +* You have administrator permissions. +* You have access to the {product-title} web console. +* You have access to a supported object store (AWS S3, Google Cloud Storage, Azure, Swift, Minio, {rh-storage}). + +.Procedure + +. In the {product-title} web console *Administrator* perspective, go to *Operators* -> *OperatorHub*. + +. Type {loki-op} in the *Filter by keyword* field. Click *{loki-op}* in the list of available Operators, and then click *Install*. ++ +[IMPORTANT] +==== +The Community {loki-op} is not supported by Red{nbsp}Hat. +==== + +. Select *stable-x.y* as the *Update channel*. ++ +The {loki-op} must be deployed to the global Operator group namespace `openshift-operators-redhat`, so the *Installation mode* and *Installed Namespace* are already selected. If this namespace does not already exist, it will be created for you. + +. Select *Enable Operator-recommended cluster monitoring on this namespace.* ++ +This option sets the `openshift.io/cluster-monitoring: "true"` label in the `Namespace` object. You must select this option to ensure that cluster monitoring scrapes the `openshift-operators-redhat` namespace. + +. For *Update approval* select *Automatic*, then click *Install*. ++ +If the approval strategy in the subscription is set to *Automatic*, the update process initiates as soon as a new Operator version is available in the selected channel. If the approval strategy is set to *Manual*, you must manually approve pending updates. ++ +[NOTE] +==== +An Operator might display a `Failed` status before the installation completes. If the Operator install completes with an `InstallSucceeded` message, refresh the page. +==== + +. While the Operator installs, create the namespace to which the log store will be deployed. + +.. Click *+* in the top right of the screen to access the *Import YAML* page. + +.. Add the YAML definition for the `openshift-logging` namespace: ++ +.Example `namespace` object +[source,yaml] +---- +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-logging # <1> + labels: + openshift.io/cluster-monitoring: "true" # <2> +---- +<1> The `openshift-logging` namespace is dedicated for all {logging} workloads. +<2> A string value that specifies the label, as shown, to ensure that cluster monitoring scrapes the `openshift-logging` namespace. + +.. Click *Create*. + +. Create a secret with the credentials to access the object storage. + +.. Click *+* in the top right of the screen to access the *Import YAML* page. + +.. Add the YAML definition for the secret. For example, create a secret to access Amazon Web Services (AWS) s3: ++ +.Example `Secret` object +[source,yaml] +---- +apiVersion: v1 +kind: Secret +metadata: + name: logging-loki-s3 <1> + namespace: openshift-logging <2> +stringData: <3> + access_key_id: + access_key_secret: + bucketnames: s3-bucket-name + endpoint: https://s3.eu-central-1.amazonaws.com + region: eu-central-1 +---- +<1> Note down the name used for the secret `logging-loki-s3` to use it later when creating the `LokiStack` resource. +<2> Set the namespace to `openshift-logging` as that will be the namespace used to deploy `LokiStack`. +<3> For the contents of the secret see the Loki object storage section. ++ +-- +include::snippets/logging-retention-period-snip.adoc[leveloffset=+1] +-- + +.. Click *Create*. + +. Navigate to the *Installed Operators* page. Select the {loki-op} under the *Provided APIs* find the *LokiStack* resource and click *Create Instance*. + +. Select *YAML view*, and then use the following template to create a `LokiStack` CR: ++ +-- +.Example `LokiStack` CR +[source,yaml] +---- +apiVersion: loki.grafana.com/v1 +kind: LokiStack +metadata: + name: logging-loki # <1> + namespace: openshift-logging # <2> +spec: + size: 1x.small # <3> + storage: + schemas: + - version: v13 + effectiveDate: "--
" + secret: + name: logging-loki-s3 # <4> + type: s3 # <5> + storageClassName: # <6> + tenants: + mode: openshift-logging # <7> +---- +<1> Use the name `logging-loki`. +<2> You must specify `openshift-logging` as the namespace. +<3> Specify the deployment size. Supported size options for production instances of Loki are `1x.extra-small`, `1x.small`, or `1x.medium`. Additionally, 1x.pico is supported starting with {logging} 6.1. +<4> Specify the name of your log store secret. +<5> Specify the corresponding storage type. +<6> Specify the name of a storage class for temporary storage. For best performance, specify a storage class that allocates block storage. You can list the available storage classes for your cluster by using the `oc get storageclasses` command. +<7> The `openshift-logging` mode is the default tenancy mode where a tenant is created for log types, such as audit, infrastructure, and application. This enables access control for individual users and user groups to different log streams. +-- + +. Click *Create*. + +.Verification + +. In the *LokiStack* tab veriy that you see your `LokiStack` instance. +. In the *Status* column, verify that you see the message `Condition: Ready` with a green checkmark. diff --git a/modules/logging-forwarding-azure.adoc b/modules/logging-forwarding-azure.adoc index 3650a7bb3456..ef642021dd04 100644 --- a/modules/logging-forwarding-azure.adoc +++ b/modules/logging-forwarding-azure.adoc @@ -127,7 +127,7 @@ spec: secret: name: my-secret pipelines: - - name: app-pipeline + - name: app-pipeline inputRefs: - application outputRefs: diff --git a/modules/logging-http-forward.adoc b/modules/logging-http-forward.adoc index e8fa694751c5..47a97813eb37 100644 --- a/modules/logging-http-forward.adoc +++ b/modules/logging-http-forward.adoc @@ -1,7 +1,3 @@ -// Module included in the following assemblies: -// -// * observability/logging/log_collection_forwarding/configuring-log-forwarding.adoc - :_mod-docs-content-type: PROCEDURE [id="logging-http-forward_{context}"] = Forwarding logs over HTTP diff --git a/modules/logging-loki-cli-install.adoc b/modules/logging-loki-cli-install.adoc index 1d8fde64cb67..571c859026a5 100644 --- a/modules/logging-loki-cli-install.adoc +++ b/modules/logging-loki-cli-install.adoc @@ -202,13 +202,6 @@ spec: logStore: lokistack: name: logging-loki - retentionPolicy: - application: - maxAge: 7d - audit: - maxAge: 7d - infra: - maxAge: 7d type: lokistack visualization: type: ocp-console diff --git a/modules/logging-loki-gui-install.adoc b/modules/logging-loki-gui-install.adoc index 48be3f57e8be..0648a406cc84 100644 --- a/modules/logging-loki-gui-install.adoc +++ b/modules/logging-loki-gui-install.adoc @@ -141,13 +141,6 @@ spec: logStore: lokistack: name: logging-loki - retentionPolicy: - application: - maxAge: 7d - audit: - maxAge: 7d - infra: - maxAge: 7d type: lokistack visualization: type: ocp-console diff --git a/modules/logging-loki-storage-odf.adoc b/modules/logging-loki-storage-odf.adoc index dc88ed867cd4..3fe69f83bf88 100644 --- a/modules/logging-loki-storage-odf.adoc +++ b/modules/logging-loki-storage-odf.adoc @@ -10,7 +10,7 @@ * You installed the {loki-op}. * You installed the {oc-first}. * You deployed link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/[{rh-storage}]. -* You configured your {rh-storage} cluster link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.17/html/managing_and_allocating_storage_resources/adding-file-and-object-storage-to-an-existing-external-ocs-cluster[for object storage]. +* You configured your {rh-storage} cluster link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.18/html/managing_and_allocating_storage_resources/adding-file-and-object-storage-to-an-existing-external-ocs-cluster[for object storage]. .Procedure diff --git a/modules/logging-oc-explain.adoc b/modules/logging-oc-explain.adoc new file mode 100644 index 000000000000..264c2d4214ed --- /dev/null +++ b/modules/logging-oc-explain.adoc @@ -0,0 +1,75 @@ +// Module included in the following assemblies: +// +:_mod-docs-content-type: CONCEPT +[id="logging-oc-explain_{context}"] + += Using the `oc explain` command + +The `oc explain` command is an essential tool in the OpenShift CLI `oc` that provides detailed descriptions of the fields within Custom Resources (CRs). This command is invaluable for administrators and developers who are configuring or troubleshooting resources in an OpenShift cluster. + +== Resource Descriptions +`oc explain` offers in-depth explanations of all fields associated with a specific object. This includes standard resources like pods and services, as well as more complex entities like statefulsets and custom resources defined by Operators. + +To view the documentation for the `outputs` field of the `ClusterLogForwarder` custom resource, you can use: + +[source,terminal] +---- +$ oc explain clusterlogforwarders.observability.openshift.io.spec.outputs +---- + +[NOTE] +==== +In place of `clusterlogforwarder` the short form `obsclf` can be used. +==== + +This will display detailed information about these fields, including their types, default values, and any associated sub-fields. + +== Hierarchical Structure +The command displays the structure of resource fields in a hierarchical format, clarifying the relationships between different configuration options. + +For instance, here's how you can drill down into the `storage` configuration for a `LokiStack` custom resource: + +[source,terminal] +---- +$ oc explain lokistacks.loki.grafana.com +$ oc explain lokistacks.loki.grafana.com.spec +$ oc explain lokistacks.loki.grafana.com.spec.storage +$ oc explain lokistacks.loki.grafana.com.spec.storage.schemas +---- + +Each command reveals a deeper level of the resource specification, making the structure clear. + +== Type Information +`oc explain` also indicates the type of each field (such as string, integer, or boolean), allowing you to verify that resource definitions use the correct data types. + +For example: + +[source,terminal] +---- +$ oc explain lokistacks.loki.grafana.com.spec.size +---- + +This will show that `size` should be defined using an integer value. + +== Default Values +When applicable, the command shows the default values for fields, providing insights into what values will be used if none are explicitly specified. + +Again using `lokistacks.loki.grafana.com` as an example: + +[source,terminal] +---- +$ oc explain lokistacks.spec.template.distributor.replicas +---- + +.Example output +[source,terminal] +---- +GROUP: loki.grafana.com +KIND: LokiStack +VERSION: v1 + +FIELD: replicas + +DESCRIPTION: + Replicas defines the number of replica pods of the component. +---- diff --git a/modules/logging-upgrading-clo.adoc b/modules/logging-upgrading-clo.adoc index f8ee1f5a4df8..fa7e6d2db6d9 100644 --- a/modules/logging-upgrading-clo.adoc +++ b/modules/logging-upgrading-clo.adoc @@ -22,11 +22,11 @@ To update the {clo} to a new major release version, you must modify the update c . Click the *Red Hat OpenShift Logging* Operator. -. Click *Subscription*. In the *Subscription details* section, click the *Update channel* link. This link text might be *stable* or *stable-5.9*, depending on your current update channel. +. Click *Subscription*. In the *Subscription details* section, click the *Update channel* link. This link text might be *stable* or *stable-5.8*, depending on your current update channel. -. In the *Change Subscription Update Channel* window, select the latest major version update channel, *stable-5.9*, and click *Save*. Note the `cluster-logging.v5.9.` version. +. In the *Change Subscription Update Channel* window, select the latest major version update channel, *stable-5.8*, and click *Save*. Note the `cluster-logging.v5.8.` version. -. Wait for a few seconds, and then go to *Operators* -> *Installed Operators* to verify that the {clo} version matches the latest `cluster-logging.v5.9.` version. +. Wait for a few seconds, and then go to *Operators* -> *Installed Operators* to verify that the {clo} version matches the latest `cluster-logging.v5.8.` version. . On the *Operators* -> *Installed Operators* page, wait for the *Status* field to report *Succeeded*. diff --git a/modules/logging-upgrading-loki-schema.adoc b/modules/logging-upgrading-loki-schema.adoc index 78904230942b..73ca258ec173 100644 --- a/modules/logging-upgrading-loki-schema.adoc +++ b/modules/logging-upgrading-loki-schema.adoc @@ -6,7 +6,7 @@ [id="logging-upgrading-loki-schema_{context}"] = Upgrading the LokiStack storage schema -If you are using the {clo} with the {loki-op}, the {clo} 5.9 or later supports the `v13` schema version in the `LokiStack` custom resource. Upgrading to the `v13` schema version is recommended because it is the schema version to be supported going forward. +If you are using the {clo} with the {loki-op}, the {clo} 5.8 or later supports the `v13` schema version in the `LokiStack` custom resource. Upgrading to the `v13` schema version is recommended because it is the schema version to be supported going forward. .Procedure diff --git a/modules/loki-memberlist-ip.adoc b/modules/loki-memberlist-ip.adoc new file mode 100644 index 000000000000..fda73d3f2f07 --- /dev/null +++ b/modules/loki-memberlist-ip.adoc @@ -0,0 +1,29 @@ +:_mod-docs-content-type: CONCEPT +[id="loki-memberlist-ip_{context}"] += Configuring Loki to tolerate memberlist creation failure + +In an {product-title} cluster, administrators generally use a non-private IP network range. As a result, the LokiStack memberlist configuration fails because, by default, it only uses private IP networks. + +As an administrator, you can select the pod network for the memberlist configuration. You can modify the `LokiStack` custom resource (CR) to use the `podIP` address in the `hashRing` spec. To configure the `LokiStack` CR, use the following command: + +[source,terminal] +---- +$ oc patch LokiStack logging-loki -n openshift-logging --type=merge -p '{"spec": {"hashRing":{"memberlist":{"instanceAddrType":"podIP"},"type":"memberlist"}}}' +---- + +.Example LokiStack to include `podIP` +[source,yaml] +---- +apiVersion: loki.grafana.com/v1 +kind: LokiStack +metadata: + name: logging-loki + namespace: openshift-logging +spec: +# ... + hashRing: + type: memberlist + memberlist: + instanceAddrType: podIP +# ... +---- diff --git a/modules/loki-pod-placement.adoc b/modules/loki-pod-placement.adoc new file mode 100644 index 000000000000..4e4597d6ea0b --- /dev/null +++ b/modules/loki-pod-placement.adoc @@ -0,0 +1,195 @@ +:_mod-docs-content-type: CONCEPT +[id="loki-pod-placement_{context}"] += Loki pod placement + +You can control which nodes the Loki pods run on, and prevent other workloads from using those nodes, by using tolerations or node selectors on the pods. + +You can apply tolerations to the log store pods with the LokiStack custom resource (CR) and apply taints to a node with the node specification. A taint on a node is a `key:value` pair that instructs the node to repel all pods that do not allow the taint. Using a specific `key:value` pair that is not on other pods ensures that only the log store pods can run on that node. + +.Example LokiStack with node selectors +[source,yaml] +---- +apiVersion: loki.grafana.com/v1 +kind: LokiStack +metadata: + name: logging-loki + namespace: openshift-logging +spec: +# ... + template: + compactor: # <1> + nodeSelector: + node-role.kubernetes.io/infra: "" # <2> + distributor: + nodeSelector: + node-role.kubernetes.io/infra: "" + gateway: + nodeSelector: + node-role.kubernetes.io/infra: "" + indexGateway: + nodeSelector: + node-role.kubernetes.io/infra: "" + ingester: + nodeSelector: + node-role.kubernetes.io/infra: "" + querier: + nodeSelector: + node-role.kubernetes.io/infra: "" + queryFrontend: + nodeSelector: + node-role.kubernetes.io/infra: "" + ruler: + nodeSelector: + node-role.kubernetes.io/infra: "" +# ... +---- +<1> Specifies the component pod type that applies to the node selector. +<2> Specifies the pods that are moved to nodes containing the defined label. + + +.Example LokiStack CR with node selectors and tolerations +[source,yaml] +---- +apiVersion: loki.grafana.com/v1 +kind: LokiStack +metadata: + name: logging-loki + namespace: openshift-logging +spec: +# ... + template: + compactor: + nodeSelector: + node-role.kubernetes.io/infra: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/infra + value: reserved + - effect: NoExecute + key: node-role.kubernetes.io/infra + value: reserved + distributor: + nodeSelector: + node-role.kubernetes.io/infra: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/infra + value: reserved + - effect: NoExecute + key: node-role.kubernetes.io/infra + value: reserved + indexGateway: + nodeSelector: + node-role.kubernetes.io/infra: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/infra + value: reserved + - effect: NoExecute + key: node-role.kubernetes.io/infra + value: reserved + ingester: + nodeSelector: + node-role.kubernetes.io/infra: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/infra + value: reserved + - effect: NoExecute + key: node-role.kubernetes.io/infra + value: reserved + querier: + nodeSelector: + node-role.kubernetes.io/infra: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/infra + value: reserved + - effect: NoExecute + key: node-role.kubernetes.io/infra + value: reserved + queryFrontend: + nodeSelector: + node-role.kubernetes.io/infra: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/infra + value: reserved + - effect: NoExecute + key: node-role.kubernetes.io/infra + value: reserved + ruler: + nodeSelector: + node-role.kubernetes.io/infra: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/infra + value: reserved + - effect: NoExecute + key: node-role.kubernetes.io/infra + value: reserved + gateway: + nodeSelector: + node-role.kubernetes.io/infra: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/infra + value: reserved + - effect: NoExecute + key: node-role.kubernetes.io/infra + value: reserved +# ... +---- + +To configure the `nodeSelector` and `tolerations` fields of the LokiStack (CR), you can use the [command]`oc explain` command to view the description and fields for a particular resource: + +[source,terminal] +---- +$ oc explain lokistack.spec.template +---- + +.Example output +[source,text] +---- +KIND: LokiStack +VERSION: loki.grafana.com/v1 + +RESOURCE: template + +DESCRIPTION: + Template defines the resource/limits/tolerations/nodeselectors per + component + +FIELDS: + compactor + Compactor defines the compaction component spec. + + distributor + Distributor defines the distributor component spec. +... +---- + +For more detailed information, you can add a specific field: + +[source,terminal] +---- +$ oc explain lokistack.spec.template.compactor +---- + +.Example output +[source,text] +---- +KIND: LokiStack +VERSION: loki.grafana.com/v1 + +RESOURCE: compactor + +DESCRIPTION: + Compactor defines the compaction component spec. + +FIELDS: + nodeSelector + NodeSelector defines the labels required by a node to schedule the + component onto it. +... +---- diff --git a/modules/loki-rate-limit-errors.adoc b/modules/loki-rate-limit-errors.adoc index cf23ff428900..31bbfd2c5d69 100644 --- a/modules/loki-rate-limit-errors.adoc +++ b/modules/loki-rate-limit-errors.adoc @@ -1,8 +1,3 @@ -// Module is included in the following assemblies: -// * logging/cluster-logging-loki.adoc -// * observability/logging/log_collection_forwarding/log-forwarding.adoc -// * observability/logging/troubleshooting/log-forwarding-troubleshooting.adoc - :_mod-docs-content-type: PROCEDURE [id="loki-rate-limit-errors_{context}"] = Troubleshooting Loki rate limit errors @@ -47,12 +42,6 @@ The `LokiStack` CR is not available on Grafana-hosted Loki. This topic does not 2023-08-25T16:08:49.301780Z WARN sink{component_kind="sink" component_id=default_loki_infra component_type=loki component_name=default_loki_infra}: vector::sinks::util::retries: Retrying after error. error=Server responded with an error: 429 Too Many Requests internal_log_rate_limit=true ---- + -.Example Fluentd error message -[source,text] ----- -2023-08-30 14:52:15 +0000 [warn]: [default_loki_infra] failed to flush the buffer. retry_times=2 next_retry_time=2023-08-30 14:52:19 +0000 chunk="604251225bf5378ed1567231a1c03b8b" error_class=Fluent::Plugin::LokiOutput::LogPostError error="429 Too Many Requests Ingestion rate limit exceeded for user infrastructure (limit: 4194304 bytes/sec) while attempting to ingest '4082' lines totaling '7820025' bytes, reduce log volume or contact your Loki administrator to see if the limit can be increased\n" ----- -+ The error is also visible on the receiving end. For example, in the LokiStack ingester pod: + .Example Loki ingester error message diff --git a/modules/loki-rbac-rules-permissions.adoc b/modules/loki-rbac-rules-permissions.adoc index 0b527c6572dd..3c869a649f3c 100644 --- a/modules/loki-rbac-rules-permissions.adoc +++ b/modules/loki-rbac-rules-permissions.adoc @@ -1,7 +1,3 @@ -// Module included in the following assemblies: -// -// * observability/logging/logging_alerts/custom-logging-alerts.adoc - :_mod-docs-content-type: REFERENCE [id="loki-rbac-rules-permissions_{context}"] = Authorizing LokiStack rules RBAC permissions @@ -9,7 +5,7 @@ Administrators can allow users to create and manage their own alerting and recording rules by binding cluster roles to usernames. Cluster roles are defined as `ClusterRole` objects that contain necessary role-based access control (RBAC) permissions for users. -In logging 5.8 and later, the following cluster roles for alerting and recording rules are available for LokiStack: +The following cluster roles for alerting and recording rules are available for LokiStack: [options="header"] |=== @@ -41,7 +37,7 @@ In logging 5.8 and later, the following cluster roles for alerting and recording |=== -[id="loki-rbac-rules-permissions-examples"] +[id="loki-rbac-rules-permissions-examples_{context}"] == Examples To apply cluster roles for a user, you must bind an existing cluster role to a specific username. diff --git a/modules/loki-reliability-hardening.adoc b/modules/loki-reliability-hardening.adoc new file mode 100644 index 000000000000..bd54c1d24c18 --- /dev/null +++ b/modules/loki-reliability-hardening.adoc @@ -0,0 +1,35 @@ +:_mod-docs-content-type: CONCEPT +[id="loki-reliability-hardening_{context}"] += Configuring Loki to tolerate node failure + +The {loki-op} supports setting pod anti-affinity rules to request that pods of the same component are scheduled on different available nodes in the cluster. + +include::snippets/about-pod-affinity.adoc[] + +The Operator sets default, preferred `podAntiAffinity` rules for all Loki components, which includes the `compactor`, `distributor`, `gateway`, `indexGateway`, `ingester`, `querier`, `queryFrontend`, and `ruler` components. + +You can override the preferred `podAntiAffinity` settings for Loki components by configuring required settings in the `requiredDuringSchedulingIgnoredDuringExecution` field: + +.Example user settings for the ingester component +[source,yaml] +---- +apiVersion: loki.grafana.com/v1 +kind: LokiStack +metadata: + name: logging-loki + namespace: openshift-logging +spec: +# ... + template: + ingester: + podAntiAffinity: + # ... + requiredDuringSchedulingIgnoredDuringExecution: <1> + - labelSelector: + matchLabels: <2> + app.kubernetes.io/component: ingester + topologyKey: kubernetes.io/hostname +# ... +---- +<1> The stanza to define a required rule. +<2> The key-value pair (label) that must be matched to apply the rule. diff --git a/modules/loki-restart-hardening.adoc b/modules/loki-restart-hardening.adoc new file mode 100644 index 000000000000..445746c29e34 --- /dev/null +++ b/modules/loki-restart-hardening.adoc @@ -0,0 +1,5 @@ +:_mod-docs-content-type: CONCEPT +[id="loki-restart-hardening_{context}"] += LokiStack behavior during cluster restarts + +When an {product-title} cluster is restarted, LokiStack ingestion and the query path continue to operate within the available CPU and memory resources available for the node. This means that there is no downtime for the LokiStack during {product-title} cluster updates. This behavior is achieved by using `PodDisruptionBudget` resources. The {loki-op} provisions `PodDisruptionBudget` resources for Loki, which determine the minimum number of pods that must be available per component to ensure normal operations under certain conditions. diff --git a/modules/loki-retention.adoc b/modules/loki-retention.adoc new file mode 100644 index 000000000000..f1cf84503f8e --- /dev/null +++ b/modules/loki-retention.adoc @@ -0,0 +1,114 @@ +:_mod-docs-content-type: PROCEDURE +[id="loki-retention_{context}"] += Enabling stream-based retention with Loki + +You can configure retention policies based on log streams. Rules for these may be set globally, per-tenant, or both. If you configure both, tenant rules apply before global rules. + +include::snippets/logging-retention-period-snip.adoc[] + +[NOTE] +==== +Schema v13 is recommended. +==== + +.Procedure + +. Create a `LokiStack` CR: ++ +** Enable stream-based retention globally as shown in the following example: ++ +.Example global stream-based retention for AWS +[source,yaml] +---- +apiVersion: loki.grafana.com/v1 +kind: LokiStack +metadata: + name: logging-loki + namespace: openshift-logging +spec: + limits: + global: <1> + retention: <2> + days: 20 + streams: + - days: 4 + priority: 1 + selector: '{kubernetes_namespace_name=~"test.+"}' <3> + - days: 1 + priority: 1 + selector: '{log_type="infrastructure"}' + managementState: Managed + replicationFactor: 1 + size: 1x.small + storage: + schemas: + - effectiveDate: "2020-10-11" + version: v13 + secret: + name: logging-loki-s3 + type: aws + storageClassName: gp3-csi + tenants: + mode: openshift-logging +---- +<1> Sets retention policy for all log streams. *Note: This field does not impact the retention period for stored logs in object storage.* +<2> Retention is enabled in the cluster when this block is added to the CR. +<3> Contains the link:https://grafana.com/docs/loki/latest/logql/query_examples/#query-examples[LogQL query] used to define the log stream.spec: + limits: + +** Enable stream-based retention per-tenant basis as shown in the following example: ++ +.Example per-tenant stream-based retention for AWS +[source,yaml] +---- +apiVersion: loki.grafana.com/v1 +kind: LokiStack +metadata: + name: logging-loki + namespace: openshift-logging +spec: + limits: + global: + retention: + days: 20 + tenants: <1> + application: + retention: + days: 1 + streams: + - days: 4 + selector: '{kubernetes_namespace_name=~"test.+"}' <2> + infrastructure: + retention: + days: 5 + streams: + - days: 1 + selector: '{kubernetes_namespace_name=~"openshift-cluster.+"}' + managementState: Managed + replicationFactor: 1 + size: 1x.small + storage: + schemas: + - effectiveDate: "2020-10-11" + version: v13 + secret: + name: logging-loki-s3 + type: aws + storageClassName: gp3-csi + tenants: + mode: openshift-logging +---- +<1> Sets retention policy by tenant. Valid tenant types are `application`, `audit`, and `infrastructure`. +<2> Contains the link:https://grafana.com/docs/loki/latest/logql/query_examples/#query-examples[LogQL query] used to define the log stream. + +. Apply the `LokiStack` CR: ++ +[source,terminal] +---- +$ oc apply -f .yaml +---- ++ +[NOTE] +==== +This is not for managing the retention for stored logs. Global retention periods for stored logs to a supported maximum of 30 days is configured with your object storage. +==== diff --git a/modules/loki-sizing.adoc b/modules/loki-sizing.adoc new file mode 100644 index 000000000000..58d1d676403c --- /dev/null +++ b/modules/loki-sizing.adoc @@ -0,0 +1,90 @@ +:_mod-docs-content-type: CONCEPT +[id="loki-sizing_{context}"] += Loki deployment sizing + +Sizing for Loki follows the format of `1x.` where the value `1x` is number of instances and `` specifies performance capabilities. + +The `1x.pico` configuration defines a single Loki deployment with minimal resource and limit requirements, offering high availability (HA) support for all Loki components. This configuration is suited for deployments that do not require a single replication factor or auto-compaction. + +Disk requests are similar across size configurations, allowing customers to test different sizes to determine the best fit for their deployment needs. + + +[IMPORTANT] +==== +It is not possible to change the number `1x` for the deployment size. +==== + +.Loki sizing +[cols="1h,5*",options="header"] +|=== +| +|1x.demo +|1x.pico [6.1+ only] +|1x.extra-small +|1x.small +|1x.medium + +|Data transfer +|Demo use only +|50GB/day +|100GB/day +|500GB/day +|2TB/day + +|Queries per second (QPS) +|Demo use only +|1-25 QPS at 200ms +|1-25 QPS at 200ms +|25-50 QPS at 200ms +|25-75 QPS at 200ms + +|Replication factor +|None +|2 +|2 +|2 +|2 + +|Total CPU requests +|None +|7 vCPUs +|14 vCPUs +|34 vCPUs +|54 vCPUs + +|Total CPU requests if using the ruler +|None +|8 vCPUs +|16 vCPUs +|42 vCPUs +|70 vCPUs + +|Total memory requests +|None +|17Gi +|31Gi +|67Gi +|139Gi + + +|Total memory requests if using the ruler +|None +|18Gi +|35Gi +|83Gi +|171Gi + +|Total disk requests +|40Gi +|590Gi +|430Gi +|430Gi +|590Gi + +|Total disk requests if using the ruler +|60Gi +|910Gi +|750Gi +|750Gi +|910Gi +|=== diff --git a/modules/loki-zone-aware-replication.adoc b/modules/loki-zone-aware-replication.adoc new file mode 100644 index 000000000000..96d60984e538 --- /dev/null +++ b/modules/loki-zone-aware-replication.adoc @@ -0,0 +1,29 @@ +:_mod-docs-content-type: CONCEPT +[id="loki-zone-aware-replication_{context}"] += Zone aware data replication + +The {loki-op} offers support for zone-aware data replication through pod topology spread constraints. Enabling this feature enhances reliability and safeguards against log loss in the event of a single zone failure. When configuring the deployment size as `1x.extra-small`, `1x.small`, or `1x.medium`, the `replication.factor` field is automatically set to 2. + +To ensure proper replication, you need to have at least as many availability zones as the replication factor specifies. While it is possible to have more availability zones than the replication factor, having fewer zones can lead to write failures. Each zone should host an equal number of instances for optimal operation. + +.Example LokiStack CR with zone replication enabled +[source,yaml] +---- +apiVersion: loki.grafana.com/v1 +kind: LokiStack +metadata: + name: logging-loki + namespace: openshift-logging +spec: + replicationFactor: 2 # <1> + replication: + factor: 2 # <2> + zones: + - maxSkew: 1 # <3> + topologyKey: topology.kubernetes.io/zone # <4> +---- +<1> Deprecated field, values entered are overwritten by `replication.factor`. +<2> This value is automatically set when deployment size is selected at setup. +<3> The maximum difference in number of pods between any two topology domains. The default is 1, and you cannot specify a value of 0. +<4> Defines zones in the form of a topology key that corresponds to a node label. + diff --git a/modules/loki-zone-fail-recovery.adoc b/modules/loki-zone-fail-recovery.adoc new file mode 100644 index 000000000000..a15282ff10a1 --- /dev/null +++ b/modules/loki-zone-fail-recovery.adoc @@ -0,0 +1,82 @@ +:_mod-docs-content-type: PROCEDURE +[id="loki-zone-fail-recovery_{context}"] += Recovering Loki pods from failed zones + +In {product-title} a zone failure happens when specific availability zone resources become inaccessible. Availability zones are isolated areas within a cloud provider's data center, aimed at enhancing redundancy and fault tolerance. If your {product-title} cluster is not configured to handle this, a zone failure can lead to service or data loss. + +Loki pods are part of a link:https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/[StatefulSet], and they come with Persistent Volume Claims (PVCs) provisioned by a `StorageClass` object. Each Loki pod and its PVCs reside in the same zone. When a zone failure occurs in a cluster, the StatefulSet controller automatically attempts to recover the affected pods in the failed zone. + +[WARNING] +==== +The following procedure will delete the PVCs in the failed zone, and all data contained therein. To avoid complete data loss the replication factor field of the `LokiStack` CR should always be set to a value greater than 1 to ensure that Loki is replicating. +==== + +.Prerequisites +* Verify your `LokiStack` CR has a replication factor greater than 1. +* Zone failure detected by the control plane, and nodes in the failed zone are marked by cloud provider integration. + +The StatefulSet controller automatically attempts to reschedule pods in a failed zone. Because the associated PVCs are also in the failed zone, automatic rescheduling to a different zone does not work. You must manually delete the PVCs in the failed zone to allow successful re-creation of the stateful Loki Pod and its provisioned PVC in the new zone. + + +.Procedure +. List the pods in `Pending` status by running the following command: ++ +[source,terminal] +---- +$ oc get pods --field-selector status.phase==Pending -n openshift-logging +---- ++ +.Example `oc get pods` output +[source,terminal] +---- +NAME READY STATUS RESTARTS AGE # <1> +logging-loki-index-gateway-1 0/1 Pending 0 17m +logging-loki-ingester-1 0/1 Pending 0 16m +logging-loki-ruler-1 0/1 Pending 0 16m +---- +<1> These pods are in `Pending` status because their corresponding PVCs are in the failed zone. + +. List the PVCs in `Pending` status by running the following command: ++ +[source,terminal] +---- +$ oc get pvc -o=json -n openshift-logging | jq '.items[] | select(.status.phase == "Pending") | .metadata.name' -r +---- ++ +.Example `oc get pvc` output +[source,terminal] +---- +storage-logging-loki-index-gateway-1 +storage-logging-loki-ingester-1 +wal-logging-loki-ingester-1 +storage-logging-loki-ruler-1 +wal-logging-loki-ruler-1 +---- + +. Delete the PVC(s) for a pod by running the following command: ++ +[source,terminal] +---- +$ oc delete pvc -n openshift-logging +---- ++ +. Delete the pod(s) by running the following command: ++ +[source,terminal] +---- +$ oc delete pod -n openshift-logging +---- ++ +Once these objects have been successfully deleted, they should automatically be rescheduled in an available zone. + +[id="logging-loki-zone-fail-term-state_{context}"] +== Troubleshooting PVC in a terminating state + +The PVCs might hang in the terminating state without being deleted, if PVC metadata finalizers are set to `kubernetes.io/pv-protection`. Removing the finalizers should allow the PVCs to delete successfully. + +* Remove the finalizer for each PVC by running the command below, then retry deletion. ++ +[source,terminal] +---- +$ oc patch pvc -p '{"metadata":{"finalizers":null}}' -n openshift-logging +---- diff --git a/modules/nodes-scheduler-node-selectors-about.adoc b/modules/nodes-scheduler-node-selectors-about.adoc index c139eb9ba70a..38e2158c1d1d 100644 --- a/modules/nodes-scheduler-node-selectors-about.adoc +++ b/modules/nodes-scheduler-node-selectors-about.adoc @@ -39,7 +39,6 @@ You cannot add a node selector directly to an existing scheduled pod. You must l + For example, the following `Node` object has the `region: east` label: + -ifndef::openshift-origin[] .Sample `Node` object with a label [source,yaml] ---- @@ -66,35 +65,7 @@ metadata: #... ---- <1> Labels to match the pod node selector. -endif::openshift-origin[] -ifdef::openshift-origin[] -.Sample `Node` object with a label -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: s1 - selfLink: /api/v1/nodes/ip-10-0-131-14.ec2.internal - uid: 7bc2580a-8b8e-11e9-8e01-021ab4174c74 - resourceVersion: '478704' - creationTimestamp: '2019-06-10T14:46:08Z' - labels: - kubernetes.io/os: linux - topology.kubernetes.io/zone: us-east-1a - node.openshift.io/os_version: '4.5' - node-role.kubernetes.io/worker: '' - topology.kubernetes.io/region: us-east-1 - node.openshift.io/os_id: fedora - node.kubernetes.io/instance-type: m4.large - kubernetes.io/hostname: ip-10-0-131-14 - kubernetes.io/arch: amd64 - region: east <1> - type: user-node -#... ----- -<1> Labels to match the pod node selector. -endif::openshift-origin[] + + A pod has the `type: user-node,region: east` node selector: + diff --git a/modules/nodes-scheduler-taints-tolerations-about.adoc b/modules/nodes-scheduler-taints-tolerations-about.adoc index f9d82a2cb391..41dbe163c8fc 100644 --- a/modules/nodes-scheduler-taints-tolerations-about.adoc +++ b/modules/nodes-scheduler-taints-tolerations-about.adoc @@ -4,6 +4,7 @@ // * post_installation_configuration/node-tasks.adoc // * observability/logging/scheduling_resources/logging-taints-tolerations.adoc +// TODO RESOLVE DEPDENDENCY ifeval::["{context}" == "nodes-scheduler-taints-tolerations"] :nodes-scheduler-taints-tolerations: endif::[] diff --git a/modules/setting-up-log-collection.adoc b/modules/setting-up-log-collection.adoc new file mode 100644 index 000000000000..d957f68df2df --- /dev/null +++ b/modules/setting-up-log-collection.adoc @@ -0,0 +1,201 @@ +:_mod-docs-content-type: PROCEDURE +[id="setting-up-log-collection_{context}"] += Setting up log collection + +This release of Cluster Logging requires administrators to explicitly grant log collection permissions to the service account associated with *ClusterLogForwarder*. This was not required in previous releases for the legacy logging scenario consisting of a *ClusterLogging* and, optionally, a *ClusterLogForwarder.logging.openshift.io* resource. + +The {clo} provides `collect-audit-logs`, `collect-application-logs`, and `collect-infrastructure-logs` cluster roles, which enable the collector to collect audit logs, application logs, and infrastructure logs respectively. + +Setup log collection by binding the required cluster roles to your service account. + +== Legacy service accounts +To use the existing legacy service account `logcollector`, create the following *ClusterRoleBinding*: + +[source,terminal] +---- +$ oc adm policy add-cluster-role-to-user collect-application-logs system:serviceaccount:openshift-logging:logcollector +---- + +[source,terminal] +---- +$ oc adm policy add-cluster-role-to-user collect-infrastructure-logs system:serviceaccount:openshift-logging:logcollector +---- + +Additionally, create the following *ClusterRoleBinding* if collecting audit logs: + +[source,terminal] +---- +$ oc adm policy add-cluster-role-to-user collect-audit-logs system:serviceaccount:openshift-logging:logcollector +---- + + +== Creating service accounts +.Prerequisites + +* The {clo} is installed in the `openshift-logging` namespace. +* You have administrator permissions. + +.Procedure + +. Create a service account for the collector. If you want to write logs to storage that requires a token for authentication, you must include a token in the service account. + +. Bind the appropriate cluster roles to the service account: ++ +.Example binding command +[source,terminal] +---- +$ oc adm policy add-cluster-role-to-user system:serviceaccount:: +---- + +=== Cluster Role Binding for your Service Account +The role_binding.yaml file binds the ClusterLogging operator's ClusterRole to a specific ServiceAccount, allowing it to manage Kubernetes resources cluster-wide. + +[source,yaml] +---- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: manager-rolebinding +roleRef: <1> + apiGroup: rbac.authorization.k8s.io <2> + kind: ClusterRole <3> + name: cluster-logging-operator <4> +subjects: <5> + - kind: ServiceAccount <6> + name: cluster-logging-operator <7> + namespace: openshift-logging <8> +---- +<1> roleRef: References the ClusterRole to which the binding applies. +<2> apiGroup: Indicates the RBAC API group, specifying that the ClusterRole is part of Kubernetes' RBAC system. +<3> kind: Specifies that the referenced role is a ClusterRole, which applies cluster-wide. +<4> name: The name of the ClusterRole being bound to the ServiceAccount, here cluster-logging-operator. +<5> subjects: Defines the entities (users or service accounts) that are being granted the permissions from the ClusterRole. +<6> kind: Specifies that the subject is a ServiceAccount. +<7> Name: The name of the ServiceAccount being granted the permissions. +<8> namespace: Indicates the namespace where the ServiceAccount is located. + +=== Writing application logs +The write-application-logs-clusterrole.yaml file defines a ClusterRole that grants permissions to write application logs to the Loki logging application. + +[source,yaml] +---- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cluster-logging-write-application-logs +rules: <1> + - apiGroups: <2> + - loki.grafana.com <3> + resources: <4> + - application <5> + resourceNames: <6> + - logs <7> + verbs: <8> + - create <9> +---- +<1> rules: Specifies the permissions granted by this ClusterRole. +<2> apiGroups: Refers to the API group loki.grafana.com, which relates to the Loki logging system. +<3> loki.grafana.com: The API group for managing Loki-related resources. +<4> resources: The resource type that the ClusterRole grants permission to interact with. +<5> application: Refers to the application resources within the Loki logging system. +<6> resourceNames: Specifies the names of resources that this role can manage. +<7> logs: Refers to the log resources that can be created. +<8> verbs: The actions allowed on the resources. +<9> create: Grants permission to create new logs in the Loki system. + + +=== Writing audit logs +The write-audit-logs-clusterrole.yaml file defines a ClusterRole that grants permissions to create audit logs in the Loki logging system. +[source,yaml] +---- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cluster-logging-write-audit-logs +rules: <1> + - apiGroups: <2> + - loki.grafana.com <3> + resources: <4> + - audit <5> + resourceNames: <6> + - logs <7> + verbs: <8> + - create <9> +---- +<1> rules: Defines the permissions granted by this ClusterRole. +<2> apiGroups: Specifies the API group loki.grafana.com. +<3> loki.grafana.com: The API group responsible for Loki logging resources. +<4> resources: Refers to the resource type this role manages, in this case, audit. +<5> audit: Specifies that the role manages audit logs within Loki. +<6> resourceNames: Defines the specific resources that the role can access. +<7> logs: Refers to the logs that can be managed under this role. +<8> verbs: The actions allowed on the resources. +<9> create: Grants permission to create new audit logs. + +=== Writing infrastructure logs +The write-infrastructure-logs-clusterrole.yaml file defines a ClusterRole that grants permission to create infrastructure logs in the Loki logging system. + +.Sample YAML +[source,yaml] +---- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cluster-logging-write-infrastructure-logs +rules: <1> + - apiGroups: <2> + - loki.grafana.com <3> + resources: <4> + - infrastructure <5> + resourceNames: <6> + - logs <7> + verbs: <8> + - create <9> +---- +<1> rules: Specifies the permissions this ClusterRole grants. +<2> apiGroups: Specifies the API group for Loki-related resources. +<3> loki.grafana.com: The API group managing the Loki logging system. +<4> resources: Defines the resource type that this role can interact with. +<5> infrastructure: Refers to infrastructure-related resources that this role manages. +<6> resourceNames: Specifies the names of resources this role can manage. +<7> logs: Refers to the log resources related to infrastructure. +<8> verbs: The actions permitted by this role. +<9> create: Grants permission to create infrastructure logs in the Loki system. + +=== ClusterLogForwarder editor role +The clusterlogforwarder-editor-role.yaml file defines a ClusterRole that allows users to manage ClusterLogForwarders in OpenShift. + + +[source,yaml] +---- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: clusterlogforwarder-editor-role +rules: <1> + - apiGroups: <2> + - observability.openshift.io <3> + resources: <4> + - clusterlogforwarders <5> + verbs: <6> + - create <7> + - delete <8> + - get <9> + - list <10> + - patch <11> + - update <12> + - watch <13> +---- +<1> rules: Specifies the permissions this ClusterRole grants. +<2> apiGroups: Refers to the OpenShift-specific API group +<3> obervability.openshift.io: The API group for managing observability resources, like logging. +<4> resources: Specifies the resources this role can manage. +<5> clusterlogforwarders: Refers to the log forwarding resources in OpenShift. +<6> verbs: Specifies the actions allowed on the ClusterLogForwarders. +<7> create: Grants permission to create new ClusterLogForwarders. +<8> delete: Grants permission to delete existing ClusterLogForwarders. +<9> get: Grants permission to retrieve information about specific ClusterLogForwarders. +<10> list: Allows listing all ClusterLogForwarders. +<11> patch: Grants permission to partially modify ClusterLogForwarders. +<12> update: Grants permission to update existing ClusterLogForwarders. +<13> watch: Grants permission to monitor changes to ClusterLogForwarders. diff --git a/modules/unmanaged-operators.adoc b/modules/unmanaged-operators.adoc index 673db4424cc9..01b046fba5bd 100644 --- a/modules/unmanaged-operators.adoc +++ b/modules/unmanaged-operators.adoc @@ -1,9 +1,4 @@ -// Module included in the following assemblies: -// -// * architecture/architecture-installation.adoc -// * updating/updating-cluster-within-minor.adoc -// * observability/logging/cluster-logging-support.adoc - +:_mod-docs-content-type: CONCEPT [id="unmanaged-operators_{context}"] = Support policy for unmanaged Operators diff --git a/observability/logging/_attributes copy b/observability/logging/_attributes copy new file mode 120000 index 000000000000..f27fd275ea6b --- /dev/null +++ b/observability/logging/_attributes copy @@ -0,0 +1 @@ +../_attributes/ \ No newline at end of file diff --git a/observability/logging/about-logging.adoc b/observability/logging/about-logging.adoc new file mode 100644 index 000000000000..5f54f9c7ee6b --- /dev/null +++ b/observability/logging/about-logging.adoc @@ -0,0 +1,46 @@ +:_mod-docs-content-type: ASSEMBLY +include::_attributes/common-attributes.adoc[] +[id="about-logging"] += {product-title} overview +:context: about-logging + +toc::[] + +The `ClusterLogForwarder` custom resource (CR) is the central configuration point for log collection and forwarding. + +[id="inputs-and-outputs_{context}"] +== Inputs and outputs + +Inputs specify the sources of logs to be forwarded. Logging provides the following built-in input types that select logs from different parts of your cluster: + +* `application` +* `receiver` +* `infrastructure` +* `audit` + +You can also define custom inputs based on namespaces or pod labels to fine-tune log selection. + +Outputs define the destinations where logs are sent. Each output type has its own set of configuration options, allowing you to customize the behavior and authentication settings. + +[id="receiver-input-type_{context}"] +== Receiver input type +The receiver input type enables the Logging system to accept logs from external sources. It supports two formats for receiving logs: `http` and `syslog`. + +The `ReceiverSpec` field defines the configuration for a receiver input. + +[id="pipelines-and-filters_{context}"] +== Pipelines and filters + +Pipelines determine the flow of logs from inputs to outputs. A pipeline consists of one or more input refs, output refs, and optional filter refs. You can use filters to transform or drop log messages within a pipeline. The order of filters matters, as they are applied sequentially, and earlier filters can prevent log messages from reaching later stages. + +[id="operator-behavior_{context}"] +== Operator behavior + +The Cluster Logging Operator manages the deployment and configuration of the collector based on the `managementState` field of the `ClusterLogForwarder` resource: + +- When set to `Managed` (default), the Operator actively manages the logging resources to match the configuration defined in the spec. +- When set to `Unmanaged`, the Operator does not take any action, allowing you to manually manage the logging components. + +[id="validation_{context}"] +== Validation +Logging includes extensive validation rules and default values to ensure a smooth and error-free configuration experience. The `ClusterLogForwarder` resource enforces validation checks on required fields, dependencies between fields, and the format of input values. Default values are provided for certain fields, reducing the need for explicit configuration in common scenarios. diff --git a/observability/logging/logging_release_notes/cluster-logging-collector.adoc b/observability/logging/cluster-logging-collector.adoc similarity index 100% rename from observability/logging/logging_release_notes/cluster-logging-collector.adoc rename to observability/logging/cluster-logging-collector.adoc diff --git a/observability/logging/cluster-logging-deploying.adoc b/observability/logging/cluster-logging-deploying.adoc index e2c7bc5f92ec..2501f6e1ae4e 100644 --- a/observability/logging/cluster-logging-deploying.adoc +++ b/observability/logging/cluster-logging-deploying.adoc @@ -7,16 +7,14 @@ include::_attributes/attributes-openshift-dedicated.adoc[] toc::[] -{Product-title} Operators use custom resources (CR) to manage applications and their components. High-level configuration and settings are provided by the user within a CR. The Operator translates high-level directives into low-level actions, based on best practices embedded within the Operator’s logic. A custom resource definition (CRD) defines a CR and lists all the configurations available to users of the Operator. Installing an Operator creates the CRDs, which are then used to generate CRs. +{product-title} Operators use custom resources (CRs) to manage applications and their components. You provide high-level configuration and settings through the CR. The Operator translates high-level directives into low-level actions, based on best practices embedded within the logic of the Operator. A custom resource definition (CRD) defines a CR and lists all the configurations available to users of the Operator. Installing an Operator creates the CRDs to generate CRs. [IMPORTANT] ==== -You must install the {clo} *after* the log store Operator. +You must install the {clo} after the log store Operator. ==== -You deploy {logging} by installing the {loki-op} or {es-op} to manage your log store, followed by the {clo} to manage the components of logging. You can use either the {product-title} web console or the {product-title} CLI to install or configure {logging}. - -include::snippets/logging-elastic-dep-snip.adoc[leveloffset=+1] +You deploy {logging} by installing the {loki-op} to manage your log store, followed by the {clo} to manage the components of logging. You can use either the {product-title} web console or the {oc-first} to install or configure {logging}. [TIP] ==== @@ -24,17 +22,13 @@ You can alternatively apply all example objects. ==== ifdef::openshift-origin[] -[id="prerequisites_cluster-logging-deploying"] +[id="prerequisites_cluster-logging-deploying_{context}"] == Prerequisites -* Ensure that you have downloaded the {cluster-manager-url-pull} as shown in _Obtaining the installation program_ in the installation documentation for your platform. +* You have downloaded the {cluster-manager-url-pull} as shown in "Obtaining the installation program" in the installation documentation for your platform. + -If you have the pull secret, add the `redhat-operators` catalog to the OperatorHub custom resource (CR) as shown in _Configuring {product-title} to use Red{nbsp}Hat Operators_. +If you have the pull secret, add the `redhat-operators` catalog to the `OperatorHub` custom resource (CR) as shown in "Configuring {product-title} to use Red{nbsp}Hat Operators". endif::[] -include::modules/logging-es-deploy-console.adoc[leveloffset=+1] - -include::modules/logging-es-deploy-cli.adoc[leveloffset=+1] - -- include::snippets/logging-retention-period-snip.adoc[leveloffset=+1] -- diff --git a/observability/logging/cluster-logging-support.adoc b/observability/logging/cluster-logging-support.adoc index 755fef3f272c..ef38d1a7962f 100644 --- a/observability/logging/cluster-logging-support.adoc +++ b/observability/logging/cluster-logging-support.adoc @@ -1,16 +1,16 @@ :_mod-docs-content-type: ASSEMBLY [id="cluster-logging-support"] += Cluster logging support include::_attributes/common-attributes.adoc[] -= Support :context: cluster-logging-support toc::[] include::snippets/logging-supported-config-snip.adoc[] include::snippets/logging-compatibility-snip.adoc[] -include::snippets/log6x-loki-statement-snip.adoc[] +include::snippets/logging-loki-statement-snip.adoc[] -{logging-uc} {for} is an opinionated collector and normalizer of application, infrastructure, and audit logs. You can use it to forward logs to various supported systems. +{logging-uc} {for} is an opinionated collector and normalizer of application, infrastructure, and audit logs. It is intended to be used for forwarding logs to various supported systems. {logging-uc} is not: @@ -26,52 +26,18 @@ include::snippets/log6x-loki-statement-snip.adoc[] The following table describes the supported {logging-uc} APIs. -.Loki API support states -[cols="3",options="header"] -|=== -|CustomResourceDefinition (CRD) -|ApiVersion -|Support state +include::snippets/logging-api-support-states-snip.adoc[] -|LokiStack -|lokistack.loki.grafana.com/v1 -|Supported from 5.5 - -|RulerConfig -|rulerconfig.loki.grafana/v1 -|Supported from 5.7 - -|AlertingRule -|alertingrule.loki.grafana/v1 -|Supported from 5.7 - -|RecordingRule -|recordingrule.loki.grafana/v1 -|Supported from 5.7 - -|LogFileMetricExporter -|LogFileMetricExporter.logging.openshift.io/v1alpha1 -|Supported from 5.8 - -|ClusterLogForwarder -|clusterlogforwarder.logging.openshift.io/v1 -|Supported from 4.5. -|=== - -include::modules/cluster-logging-maintenance-support-list.adoc[leveloffset=+1] +include::modules/cluster-logging-maintenance-support-list-6x.adoc[leveloffset=+1] include::modules/unmanaged-operators.adoc[leveloffset=+1] -[id="support-exception-for-coo-logging-ui-plugin_{context}"] -== Support exception for the Logging UI Plugin - -Until the General Availability (GA) of the Cluster Observability Operator (COO), which is currently in link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview] (TP), Red{nbsp}Hat provides support to customers who are using Logging 6.0 or later with the COO for its Logging UI Plugin on {product-title} 4.14 or later. This support exception is temporary as the COO includes several independent features, some of which are still TP features, but the Logging UI Plugin is ready for GA. [id="cluster-logging-support-must-gather_{context}"] -== Collecting logging data for Red Hat Support +== Collecting {logging} data for Red Hat Support When opening a support case, it is helpful to provide debugging information about your cluster to Red{nbsp}Hat Support. -You can use the xref:../../support/gathering-cluster-data.adoc#gathering-cluster-data[must-gather tool] to collect diagnostic information for project-level resources, cluster-level resources, and each of the {logging} components. +You can use the link:https://docs.openshift.com/container-platform/latest/support/gathering-cluster-data.html#gathering-cluster-data[must-gather tool] to collect diagnostic information for project-level resources, cluster-level resources, and each of the {logging} components. For prompt support, supply diagnostic information for both {product-title} and {logging}. include::modules/cluster-logging-must-gather-about.adoc[leveloffset=+2] diff --git a/observability/logging/cluster-logging.adoc b/observability/logging/cluster-logging.adoc index 9587e169c6db..0922fcd96a66 100644 --- a/observability/logging/cluster-logging.adoc +++ b/observability/logging/cluster-logging.adoc @@ -11,40 +11,33 @@ As a cluster administrator, you can deploy {logging} on an {product-title} clust include::snippets/logging-kibana-dep-snip.adoc[] -{product-title} cluster administrators can deploy {logging} by using Operators. For information, see xref :../../observability/logging/cluster-logging-deploying.adoc#cluster-logging-deploying[Installing {logging}]. +{product-title} cluster administrators can deploy {logging} by using Operators. For information, see xref:../../observability/logging/cluster-logging-deploying.adoc#cluster-logging-deploying[Installing {logging}]. The Operators are responsible for deploying, upgrading, and maintaining {logging}. After the Operators are installed, you can create a `ClusterLogging` custom resource (CR) to schedule {logging} pods and other resources necessary to support {logging}. You can also create a `ClusterLogForwarder` CR to specify which logs are collected, how they are transformed, and where they are forwarded to. -[NOTE] -==== -Because the internal {product-title} Elasticsearch log store does not provide secure storage for audit logs, audit logs are not stored in the internal Elasticsearch instance by default. If you want to send the audit logs to the default internal Elasticsearch log store, for example to view the audit logs in Kibana, you must use the Log Forwarding API as described in xref :../../observability/logging/log_storage/logging-config-es-store.adoc#cluster-logging-elasticsearch-audit_logging-config-es-store[Forward audit logs to the log store]. -==== - include::modules/logging-architecture-overview.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* xref :../../observability/logging/log_visualization/log-visualization-ocp-console.adoc#log-visualization-ocp-console[Log visualization with the web console] +* xref:../../observability/logging/log_visualization/log-visualization-ocp-console.adoc#log-visualization-ocp-console[Log visualization with the web console] include::modules/cluster-logging-about.adoc[leveloffset=+1] ifdef::openshift-rosa,openshift-dedicated[] include::modules/cluster-logging-cloudwatch.adoc[leveloffset=+1] -For information, see xref :../../observability/logging/log_collection_forwarding/log-forwarding.adoc#about-log-collection_log-forwarding[About log collection and forwarding]. +For information, see xref:../../observability/logging/log_collection_forwarding/log-forwarding.adoc#about-log-collection_log-forwarding[About log collection and forwarding]. endif::[] include::modules/cluster-logging-json-logging-about.adoc[leveloffset=+2] include::modules/cluster-logging-collecting-storing-kubernetes-events.adoc[leveloffset=+2] -For information, see xref :../../observability/logging/log_collection_forwarding/cluster-logging-eventrouter.adoc#cluster-logging-eventrouter[About collecting and storing Kubernetes events]. +For information, see xref:../../observability/logging/log_collection_forwarding/cluster-logging-eventrouter.adoc#cluster-logging-eventrouter[About collecting and storing Kubernetes events]. include::modules/cluster-logging-troubleshoot-logging.adoc[leveloffset=+2] include::modules/cluster-logging-export-fields.adoc[leveloffset=+2] -For information, see xref :../../observability/logging/cluster-logging-exported-fields.adoc#cluster-logging-exported-fields[About exporting fields]. - include::modules/cluster-logging-eventrouter-about.adoc[leveloffset=+2] -For information, see xref :../../observability/logging/log_collection_forwarding/cluster-logging-eventrouter.adoc#cluster-logging-eventrouter[Collecting and storing Kubernetes events]. +For information, see xref:../../observability/logging/log_collection_forwarding/cluster-logging-eventrouter.adoc#cluster-logging-eventrouter[Collecting and storing Kubernetes events]. diff --git a/observability/logging/configuring-lokistack-storage.adoc b/observability/logging/configuring-lokistack-storage.adoc new file mode 100644 index 000000000000..3a4894a64f35 --- /dev/null +++ b/observability/logging/configuring-lokistack-storage.adoc @@ -0,0 +1,49 @@ +:_mod-docs-content-type: ASSEMBLY +include::_attributes/common-attributes.adoc[] +[id="configuring-lokistack-storage"] += Configuring LokiStack storage +:context: configuring-lokistack-storage + +toc::[] + +You can configure a `LokiStack` custom resource (CR) to store application, audit, and infrastructure-related logs. + +include::snippets/loki-statement-snip.adoc[leveloffset=+1] + +include::modules/loki-sizing.adoc[leveloffset=+1] + +[id="prerequisites_{context}"] +== Prerequisites + +* You have installed the {loki-op} by using the command-line interface (CLI) or web console. +* You have created a `serviceAccount` CR in the same namespace as the `ClusterLogForwarder` CR. +* You have assigned the `collect-audit-logs`, `collect-application-logs`, and `collect-infrastructure-logs` cluster roles to the `serviceAccount` CR. + +[id="setup_{context}"] +== Core set up and configuration + +Use role-based access controls, basic monitoring, and pod placement to deploy Loki. + +include::modules/loki-rbac-rules-permissions.adoc[leveloffset=+2] +include::modules/enabling-loki-alerts.adoc[leveloffset=+2] +include::modules/loki-memberlist-ip.adoc[leveloffset=+2] +include::modules/loki-retention.adoc[leveloffset=+2] +include::modules/loki-pod-placement.adoc[leveloffset=+2] + +[id="performance_{context}"] +== Enhanced reliability and performance + +Use the following configurations to ensure reliability and efficiency of Loki in production. + +include::modules/identity-federation.adoc[leveloffset=+2] +include::modules/loki-reliability-hardening.adoc[leveloffset=+2] +include::modules/loki-restart-hardening.adoc[leveloffset=+2] + +[id="advanced_{context}"] +== Advanced deployment and scalability + +To configure high availability, scalability, and error handling, use the following information. + +include::modules/loki-zone-aware-replication.adoc[leveloffset=+2] +include::modules/loki-zone-fail-recovery.adoc[leveloffset=+2] +include::modules/loki-rate-limit-errors.adoc[leveloffset=+2] diff --git a/observability/logging/log_visualization/logging-kibana.adoc b/observability/logging/log_visualization/logging-kibana.adoc index 27aaf807cabd..3a420b85f829 100644 --- a/observability/logging/log_visualization/logging-kibana.adoc +++ b/observability/logging/log_visualization/logging-kibana.adoc @@ -16,12 +16,12 @@ Using Kibana, you can do the following with your data: * Create and view custom dashboards using the *Dashboard* tab. Use and configuration of the Kibana interface is beyond the scope of this documentation. For more information about using the interface, see the link:https://www.elastic.co/guide/en/kibana/6.8/connect-to-elasticsearch.html[Kibana documentation]. - +//// [NOTE] ==== -The audit logs are not stored in the internal {product-title} Elasticsearch instance by default. To view the audit logs in Kibana, you must use the xref:../../../observability/logging/log_storage/logging-config-es-store.adoc#cluster-logging-elasticsearch-audit_logging-config-es-store[Log Forwarding API] to configure a pipeline that uses the `default` output for audit logs. +The audit logs are not stored in the internal {product-title} Elasticsearch instance by default. To view the audit logs in Kibana, you must use the xref:../../observability/logging/log_storage/logging-config-es-store.adoc#cluster-logging-elasticsearch-audit_logging-config-es-store[Log Forwarding API] to configure a pipeline that uses the `default` output for audit logs. ==== - +//// include::modules/cluster-logging-visualizer-indices.adoc[leveloffset=+1] include::modules/cluster-logging-visualizer-kibana.adoc[leveloffset=+1] diff --git a/observability/logging/logging-5-8-release-notes.adoc b/observability/logging/logging-5-8-release-notes.adoc new file mode 100644 index 000000000000..f9c035bd7574 --- /dev/null +++ b/observability/logging/logging-5-8-release-notes.adoc @@ -0,0 +1,21 @@ +:_mod-docs-content-type: ASSEMBLY +[id="logging-5-8-release-notes"] +include::_attributes/common-attributes.adoc[] += Logging 5.8 release notes +:context: logging-5-8-release-notes + +toc::[] + +include::snippets/logging-compatibility-snip.adoc[] + +include::snippets/logging-stable-updates-snip.adoc[] + +include::modules/logging-release-notes-5-8-4.adoc[leveloffset=+1] + +include::modules/logging-release-notes-5-8-3.adoc[leveloffset=+1] + +include::modules/logging-release-notes-5-8-2.adoc[leveloffset=+1] + +include::modules/logging-release-notes-5-8-1.adoc[leveloffset=+1] + +include::modules/logging-release-notes-5-8-0.adoc[leveloffset=+1] diff --git a/observability/logging/logging-visualization.adoc b/observability/logging/logging-visualization.adoc new file mode 100644 index 000000000000..f2d167034be9 --- /dev/null +++ b/observability/logging/logging-visualization.adoc @@ -0,0 +1,9 @@ +:_mod-docs-content-type: ASSEMBLY +[id="logging-visualization"] += Visualization for logging +include::_attributes/common-attributes.adoc[] +:context: logging-visualization + +toc::[] + +Visualization for logging is provided by deploying the link:https://docs.openshift.com/container-platform/latest/observability/cluster_observability_operator/ui_plugins/logging-ui-plugin.adoc#logging-ui-plugin[Logging UI Plugin] of the link:https://docs.openshift.com/container-platform/latest/observability/cluster_observability_operator/cluster-observability-operator-overview.adoc#cluster-observability-operator-overview[Cluster Observability Operator], which requires Operator installation. diff --git a/observability/logging/logging_release_notes/cluster-logging-deploying.adoc b/observability/logging/logging_release_notes/cluster-logging-deploying.adoc deleted file mode 100644 index 9a1fce7ddcb5..000000000000 --- a/observability/logging/logging_release_notes/cluster-logging-deploying.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: cluster-logging-deploying -[id="cluster-logging-deploying"] -= Installing Logging -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -toc::[] - -{product-title} Operators use custom resources (CRs) to manage applications and their components. You provide high-level configuration and settings through the CR. The Operator translates high-level directives into low-level actions, based on best practices embedded within the logic of the Operator. A custom resource definition (CRD) defines a CR and lists all the configurations available to users of the Operator. Installing an Operator creates the CRDs to generate CRs. - -[IMPORTANT] -==== -You must install the {clo} after the log store Operator. -==== - -You deploy {logging} by installing the {loki-op} to manage your log store, followed by the {clo} to manage the components of logging. You can use either the {product-title} web console or the {oc-first} to install or configure {logging}. - -[TIP] -==== -You can alternatively apply all example objects. -==== - -ifdef::openshift-origin[] -[id="prerequisites_cluster-logging-deploying_{context}"] -== Prerequisites -* You have downloaded the {cluster-manager-url-pull} as shown in "Obtaining the installation program" in the installation documentation for your platform. -+ -If you have the pull secret, add the `redhat-operators` catalog to the `OperatorHub` custom resource (CR) as shown in "Configuring {product-title} to use Red{nbsp}Hat Operators". -endif::[] - --- -include::snippets/logging-retention-period-snip.adoc[leveloffset=+1] --- - -include::modules/logging-loki-cli-install.adoc[leveloffset=+1] - -include::modules/logging-loki-gui-install.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -ifdef::openshift-enterprise,openshift-origin[] -* xref:../../../networking/ovn_kubernetes_network_provider/about-ovn-kubernetes.adoc#ovn-k-network-policy[About OVN-Kubernetes network policy] -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -* link:https://docs.openshift.com/container-platform/latest/networking/ovn_kubernetes_network_provider/about-ovn-kubernetes.html[About the OVN-Kubernetes default Container Network Interface (CNI) network provider] -endif::[] diff --git a/observability/logging/logging_release_notes/cluster-logging-support.adoc b/observability/logging/logging_release_notes/cluster-logging-support.adoc deleted file mode 100644 index 2852d5f73c90..000000000000 --- a/observability/logging/logging_release_notes/cluster-logging-support.adoc +++ /dev/null @@ -1,78 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="cluster-logging-support"] -include::_attributes/common-attributes.adoc[] -= Support -:context: cluster-logging-support - -toc::[] - -include::snippets/logging-supported-config-snip.adoc[] -include::snippets/logging-compatibility-snip.adoc[] -include::snippets/log6x-loki-statement-snip.adoc[] - -{logging-uc} {for} is an opinionated collector and normalizer of application, infrastructure, and audit logs. It is intended to be used for forwarding logs to various supported systems. - -{logging-uc} is not: - -* A high scale log collection system -* Security Information and Event Monitoring (SIEM) compliant -* A "bring your own" (BYO) log collector configuration -* Historical or long term log retention or storage -* A guaranteed log sink -* Secure storage - audit logs are not stored by default - -[id="cluster-logging-support-CRDs_{context}"] -== Supported API custom resource definitions - -The following table describes the supported {logging-uc} APIs. - -.Loki API support states -[cols="3",options="header"] -|=== -|CustomResourceDefinition (CRD) -|ApiVersion -|Support state - -|LokiStack -|lokistack.loki.grafana.com/v1 -|Supported from 5.5 - -|RulerConfig -|rulerconfig.loki.grafana/v1 -|Supported from 5.7 - -|AlertingRule -|alertingrule.loki.grafana/v1 -|Supported from 5.7 - -|RecordingRule -|recordingrule.loki.grafana/v1 -|Supported from 5.7 - -|LogFileMetricExporter -|LogFileMetricExporter.logging.openshift.io/v1alpha1 -|Supported from 5.8 - -|ClusterLogForwarder -|clusterlogforwarder.logging.openshift.io/v1 -|Supported from 4.5. -|=== - -include::modules/cluster-logging-maintenance-support-list.adoc[leveloffset=+1] -include::modules/unmanaged-operators.adoc[leveloffset=+1] - -[id="support-exception-for-coo-logging-ui-plugin_{context}"] -== Support exception for the Logging UI Plugin - -Until the approaching General Availability (GA) release of the Cluster Observability Operator (COO), which is currently in link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview] (TP), Red{nbsp}Hat provides support to customers who are using Logging 6.0 or later with the COO for its Logging UI Plugin on {product-title} 4.14 or later. This support exception is temporary as the COO includes several independent features, some of which are still TP features, but the Logging UI Plugin is ready for GA. - -[id="cluster-logging-support-must-gather_{context}"] -== Collecting logging data for Red Hat Support - -When opening a support case, it is helpful to provide debugging information about your cluster to Red{nbsp}Hat Support. - -You can use the xref:../../../support/gathering-cluster-data.adoc#gathering-cluster-data[must-gather tool] to collect diagnostic information for project-level resources, cluster-level resources, and each of the {logging} components. -For prompt support, supply diagnostic information for both {product-title} and {logging}. - -include::modules/cluster-logging-must-gather-about.adoc[leveloffset=+2] -include::modules/cluster-logging-must-gather-collecting.adoc[leveloffset=+2] diff --git a/observability/logging/logging_release_notes/installing-logging.adoc b/observability/logging/logging_release_notes/installing-logging.adoc new file mode 100644 index 000000000000..7fdf553540ee --- /dev/null +++ b/observability/logging/logging_release_notes/installing-logging.adoc @@ -0,0 +1,53 @@ +:_mod-docs-content-type: ASSEMBLY +include::_attributes/common-attributes.adoc[] +[id="installing-logging"] += Installing Logging +:context: installing-logging + +toc::[] + +{product-title} Operators use custom resources (CRs) to manage applications and their components. You provide high-level configuration and settings through the CR. The Operator translates high-level directives into low-level actions, based on best practices embedded within the logic of the Operator. A custom resource definition (CRD) defines a CR and lists all the configurations available to users of the Operator. Installing an Operator creates the CRDs to generate CRs. + + +To get started with {logging}, you must install the following Operators: + +* {loki-op} to manage your log store. +* {clo} to manage log collection and forwarding. +* {coo-first} to manage visualization. + +You can use either the {product-title} web console or the {product-title} CLI to install or configure {logging}. + +[IMPORTANT] +==== +You must configure the {clo} after the {loki-op}. +==== + + + +[id="prerequisites_cluster-logging-deploying_{context}"] +== Prerequisites +* If you are using OKD, you have downloaded the {cluster-manager-url-pull} as shown in "Obtaining the installation program" in the installation documentation for your platform. ++ +If you have the pull secret, add the `redhat-operators` catalog to the `OperatorHub` custom resource (CR) as shown in "Configuring {product-title} to use Red{nbsp}Hat Operators". + + +[id="installing-loki-and-logging-cli_{context}"] +== Installation by using the CLI + +The following sections describe installing the {loki-op} and the {clo} by using the CLI. + +include::modules/installing-loki-operator-cli.adoc[leveloffset=+2] +include::modules/installing-logging-operator-cli.adoc[leveloffset=+2] + +[id="installing-loki-and-logging-gui_{context}"] +== Installation by using the web console + +The following sections describe installing the {loki-op} and the {clo} by using the web console. + +include::modules/installing-loki-operator-web-console.adoc[leveloffset=+2] +include::modules/installing-logging-operator-web-console.adoc[leveloffset=+2] + +[role="_additional-resources"] +.Additional resources + +* link:https://docs.openshift.com/container-platform/latest/networking/ovn_kubernetes_network_provider/about-ovn-kubernetes.html[About the OVN-Kubernetes network policy] diff --git a/observability/logging/logging_release_notes/upgrading/docinfo.xml b/observability/logging/logging_release_notes/upgrading/docinfo.xml new file mode 100644 index 000000000000..44b523428092 --- /dev/null +++ b/observability/logging/logging_release_notes/upgrading/docinfo.xml @@ -0,0 +1,11 @@ +Upgrading logging +{product-title} +{product-version} +Upgrading older viersions and upgrade paths + + This document includes information about how to upgrade older versions of logging, and what upgrade paths are supported. + + + Red Hat OpenShift Documentation Team + + diff --git a/observability/logging/upgrading-to-logging-60.adoc b/observability/logging/upgrading-to-logging-60.adoc new file mode 100644 index 000000000000..557434fceeda --- /dev/null +++ b/observability/logging/upgrading-to-logging-60.adoc @@ -0,0 +1,477 @@ +:_mod-docs-content-type: ASSEMBLY +include::_attributes/common-attributes.adoc[] +[id="upgrading-to-logging-60"] += Upgrading to Logging 6.0 +:context: upgrading-to-logging-60 + +toc::[] + +Logging v6.0 is a significant upgrade from previous releases, achieving several longstanding goals of Cluster Logging: + +* Introduction of distinct operators to manage logging components (e.g., collectors, storage, visualization). +* Removal of support for managed log storage and visualization based on Elastic products (i.e., Elasticsearch, Kibana). +* Deprecation of the Fluentd log collector implementation. +* Removal of support for `ClusterLogging.logging.openshift.io` and `ClusterLogForwarder.logging.openshift.io` resources. + +[NOTE] +==== +The *cluster-logging-operator* does not provide an automated upgrade process. +==== + +Given the various configurations for log collection, forwarding, and storage, no automated upgrade is provided by the *cluster-logging-operator*. This documentation assists administrators in converting existing `ClusterLogging.logging.openshift.io` and `ClusterLogForwarder.logging.openshift.io` specifications to the new API. Examples of migrated `ClusterLogForwarder.observability.openshift.io` resources for common use cases are included. + +include::modules/logging-oc-explain.adoc[leveloffset=+1] + +== Log Storage + +The only managed log storage solution available in this release is a Lokistack, managed by the *loki-operator*. This solution, previously available as the preferred alternative to the managed Elasticsearch offering, remains unchanged in its deployment process. + +[IMPORTANT] +==== +To continue using an existing Red Hat managed Elasticsearch or Kibana deployment provided by the *elasticsearch-operator*, remove the owner references from the `Elasticsearch` resource named `elasticsearch`, and the `Kibana` resource named `kibana` in the `openshift-logging` namespace before removing the `ClusterLogging` resource named `instance` in the same namespace. +==== + + +. Temporarily set *ClusterLogging* to state `Unmanaged` ++ +[source,terminal] +---- +$ oc -n openshift-logging patch clusterlogging/instance -p '{"spec":{"managementState": "Unmanaged"}}' --type=merge +---- + +. Remove *ClusterLogging* `ownerReferences` from the *Elasticsearch* resource ++ +The following command ensures that *ClusterLogging* no longer owns the *Elasticsearch* resource. Updates to the *ClusterLogging* resource's `logStore` field will no longer affect the *Elasticsearch* resource. ++ +[source,terminal] +---- +$ oc -n openshift-logging patch elasticsearch/elasticsearch -p '{"metadata":{"ownerReferences": []}}' --type=merge +---- + +. Remove *ClusterLogging* `ownerReferences` from the *Kibana* resource ++ +The following command ensures that *ClusterLogging* no longer owns the *Kibana* resource. Updates to the *ClusterLogging* resource's `visualization` field will no longer affect the *Kibana* resource. ++ +[source,terminal] +---- +$ oc -n openshift-logging patch kibana/kibana -p '{"metadata":{"ownerReferences": []}}' --type=merge +---- + +. Set *ClusterLogging* to state `Managed` ++ +[source,terminal] +---- +$ oc -n openshift-logging patch clusterlogging/instance -p '{"spec":{"managementState": "Managed"}}' --type=merge +---- + +== Log Visualization +[subs="+quotes"] +The OpenShift console UI plugin for log visualization has been moved to the *cluster-observability-operator* from the *cluster-logging-operator*. +// Pending support statement. + + + +== Log Collection and Forwarding +// Can't link to github, need to figure a workaround. + +Log collection and forwarding configurations are now specified under the new link:https://github.com/openshift/cluster-logging-operator/blob/master/docs/reference/operator/api_observability_v1.adoc[API], part of the `observability.openshift.io` API group. The following sections highlight the differences from the old API resources. + +[NOTE] +==== +Vector is the only supported collector implementation. +==== + +== Management, Resource Allocation, and Workload Scheduling + +Configuration for management state (e.g., Managed, Unmanaged), resource requests and limits, tolerations, and node selection is now part of the new *ClusterLogForwarder* API. + +.Previous Configuration +[source,yaml] +---- +apiVersion: "logging.openshift.io/v1" +kind: "ClusterLogging" +spec: + managementState: "Managed" + collection: + resources: + limits: {} + requests: {} + nodeSelector: {} + tolerations: {} +---- + +.Current Configuration +[source,yaml] +---- +apiVersion: "observability.openshift.io/v1" +kind: ClusterLogForwarder +spec: + managementState: Managed + collector: + resources: + limits: {} + requests: {} + nodeSelector: {} + tolerations: {} +---- + +== Input Specifications + +The input specification is an optional part of the *ClusterLogForwarder* specification. Administrators can continue to use the predefined values of *application*, *infrastructure*, and *audit* to collect these sources. + +=== Application Inputs + +Namespace and container inclusions and exclusions have been consolidated into a single field. + +.5.9 Application Input with Namespace and Container Includes and Excludes +[source,yaml] +---- +apiVersion: "logging.openshift.io/v1" +kind: ClusterLogForwarder +spec: + inputs: + - name: application-logs + type: application + application: + namespaces: + - foo + - bar + includes: + - namespace: my-important + container: main + excludes: + - container: too-verbose +---- + +.6.0 Application Input with Namespace and Container Includes and Excludes +[source,yaml] +---- +apiVersion: "observability.openshift.io/v1" +kind: ClusterLogForwarder +spec: + inputs: + - name: application-logs + type: application + application: + includes: + - namespace: foo + - namespace: bar + - namespace: my-important + container: main + excludes: + - container: too-verbose +---- + +[NOTE] +==== +*application*, *infrastructure*, and *audit* are reserved words and cannot be used as names when defining an input. +==== + +=== Input Receivers + +Changes to input receivers include: + +* Explicit configuration of the type at the receiver level. +* Port settings moved to the receiver level. + +.5.9 Input Receivers +[source,yaml] +---- +apiVersion: "logging.openshift.io/v1" +kind: ClusterLogForwarder +spec: + inputs: + - name: an-http + receiver: + http: + port: 8443 + format: kubeAPIAudit + - name: a-syslog + receiver: + type: syslog + syslog: + port: 9442 +---- + +.6.0 Input Receivers +[source,yaml] +---- +apiVersion: "observability.openshift.io/v1" +kind: ClusterLogForwarder +spec: + inputs: + - name: an-http + type: receiver + receiver: + type: http + port: 8443 + http: + format: kubeAPIAudit + - name: a-syslog + type: receiver + receiver: + type: syslog + port: 9442 +---- + +== Output Specifications + +High-level changes to output specifications include: + +* URL settings moved to each output type specification. +* Tuning parameters moved to each output type specification. +* Separation of TLS configuration from authentication. +* Explicit configuration of keys and secret/configmap for TLS and authentication. + +== Secrets and TLS Configuration + +Secrets and TLS configurations are now separated into authentication and TLS configuration for each output. They must be explicitly defined in the specification rather than relying on administrators to define secrets with recognized keys. Upgrading TLS and authorization configurations requires administrators to understand previously recognized keys to continue using existing secrets. Examples in the following sections provide details on how to configure *ClusterLogForwarder* secrets to forward to existing Red Hat managed log storage solutions. + +== Red Hat Managed Elasticsearch + +.v5.9 Forwarding to Red Hat Managed Elasticsearch +[source,yaml] +---- +apiVersion: logging.openshift.io/v1 +kind: ClusterLogging +metadata: + name: instance + namespace: openshift-logging +spec: + logStore: + type: elasticsearch +---- + +.v6.0 Forwarding to Red Hat Managed Elasticsearch +[source,yaml] +---- +apiVersion: observability.openshift.io/v1 +kind: ClusterLogForwarder +metadata: + name: instance + namespace: openshift-logging +spec: + serviceAccount: + name: + managementState: Managed + outputs: + - name: audit-elasticsearch + type: elasticsearch + elasticsearch: + url: https://elasticsearch:9200 + version: 6 + index: audit-write + tls: + ca: + key: ca-bundle.crt + secretName: collector + certificate: + key: tls.crt + secretName: collector + key: + key: tls.key + secretName: collector + - name: app-elasticsearch + type: elasticsearch + elasticsearch: + url: https://elasticsearch:9200 + version: 6 + index: app-write + tls: + ca: + key: ca-bundle.crt + secretName: collector + certificate: + key: tls.crt + secretName: collector + key: + key: tls.key + secretName: collector + - name: infra-elasticsearch + type: elasticsearch + elasticsearch: + url: https://elasticsearch:9200 + version: 6 + index: infra-write + tls: + ca: + key: ca-bundle.crt + secretName: collector + certificate: + key: tls.crt + secretName: collector + key: + key: tls.key + secretName: collector + pipelines: + - name: app + inputRefs: + - application + outputRefs: + - app-elasticsearch + - name: audit + inputRefs: + - audit + outputRefs: + - audit-elasticsearch + - name: infra + inputRefs: + - infrastructure + outputRefs: + - infra-elasticsearch +---- + +== Red Hat Managed LokiStack + +.v5.9 Forwarding to Red Hat Managed LokiStack +[source,yaml] +---- +apiVersion: logging.openshift.io/v1 +kind: ClusterLogging +metadata: + name: instance + namespace: openshift-logging +spec: + logStore: + type: lokistack + lokistack: + name: logging-loki +---- + +.v6.0 Forwarding to Red Hat Managed LokiStack +[source,yaml] +---- +apiVersion: observability.openshift.io/v1 +kind: ClusterLogForwarder +metadata: + name: instance + namespace: openshift-logging +spec: + serviceAccount: + name: + outputs: + - name: default-lokistack + type: lokiStack + lokiStack: + target: + name: logging-loki + namespace: openshift-logging + authentication: + token: + from: serviceAccount + tls: + ca: + key: service-ca.crt + configMapName: openshift-service-ca.crt + pipelines: + - outputRefs: + - default-lokistack + - inputRefs: + - application + - infrastructure +---- + +== Filters and Pipeline Configuration + +Pipeline configurations now define only the routing of input sources to their output destinations, with any required transformations configured separately as filters. All attributes of pipelines from previous releases have been converted to filters in this release. Individual filters are defined in the `filters` specification and referenced by a pipeline. + +.5.9 Filters +[source,yaml] +---- +apiVersion: logging.openshift.io/v1 +kind: ClusterLogForwarder +spec: + pipelines: + - name: application-logs + parse: json + labels: + foo: bar + detectMultilineErrors: true +---- + +.6.0 Filter Configuration +[source,yaml] +---- +apiVersion: observability.openshift.io/v1 +kind: ClusterLogForwarder +spec: + filters: + - name: detectexception + type: detectMultilineException + - name: parse-json + type: parse + - name: labels + type: openshiftLabels + openshiftLabels: + foo: bar + pipelines: + - name: application-logs + filterRefs: + - detectexception + - labels + - parse-json +---- + +== Validation and Status + +Most validations are enforced when a resource is created or updated, providing immediate feedback. This is a departure from previous releases, where validation occurred post-creation and required inspecting the resource status. Some validation still occurs post-creation for cases where it is not possible to validate at creation or update time. + +Instances of the `ClusterLogForwarder.observability.openshift.io` must satisfy the following conditions before the operator will deploy the log collector: Authorized, Valid, Ready. An example of these conditions is: + +.6.0 Status Conditions +[source,yaml] +---- +apiVersion: observability.openshift.io/v1 +kind: ClusterLogForwarder +status: + conditions: + - lastTransitionTime: "2024-09-13T03:28:44Z" + message: 'permitted to collect log types: [application]' + reason: ClusterRolesExist + status: "True" + type: observability.openshift.io/Authorized + - lastTransitionTime: "2024-09-13T12:16:45Z" + message: "" + reason: ValidationSuccess + status: "True" + type: observability.openshift.io/Valid + - lastTransitionTime: "2024-09-13T12:16:45Z" + message: "" + reason: ReconciliationComplete + status: "True" + type: Ready + filterConditions: + - lastTransitionTime: "2024-09-13T13:02:59Z" + message: filter "detectexception" is valid + reason: ValidationSuccess + status: "True" + type: observability.openshift.io/ValidFilter-detectexception + - lastTransitionTime: "2024-09-13T13:02:59Z" + message: filter "parse-json" is valid + reason: ValidationSuccess + status: "True" + type: observability.openshift.io/ValidFilter-parse-json + inputConditions: + - lastTransitionTime: "2024-09-13T12:23:03Z" + message: input "application1" is valid + reason: ValidationSuccess + status: "True" + type: observability.openshift.io/ValidInput-application1 + outputConditions: + - lastTransitionTime: "2024-09-13T13:02:59Z" + message: output "default-lokistack-application1" is valid + reason: ValidationSuccess + status: "True" + type: observability.openshift.io/ValidOutput-default-lokistack-application1 + pipelineConditions: + - lastTransitionTime: "2024-09-13T03:28:44Z" + message: pipeline "default-before" is valid + reason: ValidationSuccess + status: "True" + type: observability.openshift.io/ValidPipeline-default-before +---- + +[NOTE] +==== +Conditions that are satisfied and applicable have a "status" value of "True". Conditions with a status other than "True" provide a reason and a message explaining the issue. +==== diff --git a/snippets/logging-api-support-states-snip.adoc b/snippets/logging-api-support-states-snip.adoc new file mode 100644 index 000000000000..d770eee82bf3 --- /dev/null +++ b/snippets/logging-api-support-states-snip.adoc @@ -0,0 +1,33 @@ +:_mod-docs-content-type: SNIPPET + +.Logging API support states +[cols="3",options="header"] +|=== +|CustomResourceDefinition (CRD) +|ApiVersion +|Support state + +|LokiStack +|lokistack.loki.grafana.com/v1 +|Supported from 5.5 + +|RulerConfig +|rulerconfig.loki.grafana/v1 +|Supported from 5.7 + +|AlertingRule +|alertingrule.loki.grafana/v1 +|Supported from 5.7 + +|RecordingRule +|recordingrule.loki.grafana/v1 +|Supported from 5.7 + +|LogFileMetricExporter +|LogFileMetricExporter.logging.openshift.io/v1alpha1 +|Supported from 5.8 + +|ClusterLogForwarder +|clusterlogforwarder.observability.openshift.io/v1 +|Supported from 6.0 +|=== diff --git a/snippets/loki-statement-snip.adoc b/snippets/loki-statement-snip.adoc new file mode 100644 index 000000000000..c0a1221f1e17 --- /dev/null +++ b/snippets/loki-statement-snip.adoc @@ -0,0 +1,8 @@ +:_mod-docs-content-type: SNIPPET + +Loki is a horizontally scalable, highly available, multi-tenant log aggregation system offered as a GA log store for {logging} {for} that can be visualized with the OpenShift {ObservabilityShortName} UI. The Loki configuration provided by OpenShift {logging-uc} is a short-term log store designed to enable users to perform fast troubleshooting with the collected logs. For that purpose, the {logging} {for} configuration of Loki has short-term storage, and is optimized for very recent queries. + +[IMPORTANT] +==== +For long-term storage or queries over a long time period, users should look to log stores external to their cluster. Loki sizing is only tested and supported for short term storage, for a maximum of 30 days. +====