From d296a65f58d75933799495985cb17b2d6cb1addb Mon Sep 17 00:00:00 2001 From: libander Date: Fri, 26 Apr 2024 10:49:41 -0500 Subject: [PATCH] OBSDOCS-845, OBSDOCS-762, API Reference correction / updates / creation. --- _topic_maps/_topic_map.yml | 8 +- .../api_reference/logging-5-6-reference.adoc | 1157 +++++++++++- .../api_reference/logging-5-7-reference.adoc | 1307 +++++++++++++- .../api_reference/logging-5-8-reference.adoc | 1566 ++++++++++++++++- 4 files changed, 4027 insertions(+), 11 deletions(-) diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index c563655c5d9e..a8442ebb0a19 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -2535,10 +2535,10 @@ Topics: - Name: API reference Dir: api_reference Topics: - # - Name: 5.8 Logging API reference - # File: logging-5-8-reference - # - Name: 5.7 Logging API reference - # File: logging-5-7-reference + - Name: 5.8 Logging API reference + File: logging-5-8-reference + - Name: 5.7 Logging API reference + File: logging-5-7-reference - Name: 5.6 Logging API reference File: logging-5-6-reference - Name: Glossary diff --git a/observability/logging/api_reference/logging-5-6-reference.adoc b/observability/logging/api_reference/logging-5-6-reference.adoc index b28f99945c7c..5e5bd687c291 100644 --- a/observability/logging/api_reference/logging-5-6-reference.adoc +++ b/observability/logging/api_reference/logging-5-6-reference.adoc @@ -1,9 +1,1160 @@ :_mod-docs-content-type: ASSEMBLY -[id="logging-5-6-reference"] -= 5.6 Logging API reference include::_attributes/common-attributes.adoc[] +include::_attributes/attributes-openshift-dedicated.adoc[] +[id="logging-5-6-reference"] += 5.6 logging API reference :context: logging-5-6-reference toc::[] -include::modules/logging-5.6-api-ref.adoc[leveloffset=+1] + +//// +** These release notes are generated from the content in the openshift/cluster-logging-operator repository. +** Do not modify the content here manually except for the metadata and section IDs - changes to the content should be made in the source code. +//// + +[id="logging-5-6-reference-ClusterLogForwarder"] +== ClusterLogForwarder + +ClusterLogForwarder is an API to configure forwarding logs. + +You configure forwarding by specifying a list of `pipelines`, +which forward from a set of named inputs to a set of named outputs. + +There are built-in input names for common log categories, and you can +define custom inputs to do additional filtering. + +There is a built-in output name for the default openshift log store, but +you can define your own outputs with a URL and other connection information +to forward logs to other stores or processors, inside or outside the cluster. + +For more details see the documentation on the API fields. + +[options="header"] +|====================== +|Property|Type|Description + +|spec|object| Specification of the desired behavior of ClusterLogForwarder +|status|object| Status of the ClusterLogForwarder +|====================== + +=== .spec + +ClusterLogForwarderSpec defines how logs should be forwarded to remote targets. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|inputs|array| *(optional)* Inputs are named filters for log messages to be forwarded. +|outputDefaults|object| *(optional)* DEPRECATED OutputDefaults specify forwarder config explicitly for the +|outputs|array| *(optional)* Outputs are named destinations for log messages. +|pipelines|array| Pipelines forward the messages selected by a set of inputs to a set of outputs. +|====================== + +=== .spec.inputs[] + +InputSpec defines a selector of log messages. + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|application|object| *(optional)* Application, if present, enables named set of `application` logs that +|name|string| Name used to refer to the input of a `pipeline`. +|====================== + +=== .spec.inputs[].application + +Application log selector. +All conditions in the selector must be satisfied (logical AND) to select logs. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|namespaces|array| *(optional)* Namespaces from which to collect application logs. +|selector|object| *(optional)* Selector for logs from pods with matching labels. +|====================== + +=== .spec.inputs[].application.namespaces[] + +Type:: array + +=== .spec.inputs[].application.selector + +A label selector is a label query over a set of resources. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|matchLabels|object| *(optional)* matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels +|====================== + +=== .spec.inputs[].application.selector.matchLabels + +Type:: object + +=== .spec.outputDefaults + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|elasticsearch|object| *(optional)* Elasticsearch OutputSpec default values +|====================== + +=== .spec.outputDefaults.elasticsearch + +ElasticsearchStructuredSpec is spec related to structured log changes to determine the elasticsearch index + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|enableStructuredContainerLogs|bool| *(optional)* EnableStructuredContainerLogs enables multi-container structured logs to allow +|structuredTypeKey|string| *(optional)* StructuredTypeKey specifies the metadata key to be used as name of elasticsearch index +|structuredTypeName|string| *(optional)* StructuredTypeName specifies the name of elasticsearch schema +|====================== + +=== .spec.outputs[] + +Output defines a destination for log messages. + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|syslog|object| *(optional)* +|fluentdForward|object| *(optional)* +|elasticsearch|object| *(optional)* +|kafka|object| *(optional)* +|cloudwatch|object| *(optional)* +|loki|object| *(optional)* +|googleCloudLogging|object| *(optional)* +|splunk|object| *(optional)* +|name|string| Name used to refer to the output from a `pipeline`. +|secret|object| *(optional)* Secret for authentication. +|tls|object| TLS contains settings for controlling options on TLS client connections. +|type|string| Type of output plugin. +|url|string| *(optional)* URL to send log records to. +|====================== + +=== .spec.outputs[].secret + +OutputSecretSpec is a secret reference containing name only, no namespace. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|name|string| Name of a secret in the namespace configured for log forwarder secrets. +|====================== + +=== .spec.outputs[].tls + +OutputTLSSpec contains options for TLS connections that are agnostic to the output type. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|insecureSkipVerify|bool| If InsecureSkipVerify is true, then the TLS client will be configured to ignore errors with certificates. +|====================== + +=== .spec.pipelines[] + +PipelinesSpec link a set of inputs to a set of outputs. + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|detectMultilineErrors|bool| *(optional)* DetectMultilineErrors enables multiline error detection of container logs +|inputRefs|array| InputRefs lists the names (`input.name`) of inputs to this pipeline. +|labels|object| *(optional)* Labels applied to log records passing through this pipeline. +|name|string| *(optional)* Name is optional, but must be unique in the `pipelines` list if provided. +|outputRefs|array| OutputRefs lists the names (`output.name`) of outputs from this pipeline. +|parse|string| *(optional)* Parse enables parsing of log entries into structured logs +|====================== + +=== .spec.pipelines[].inputRefs[] + +Type:: array + +=== .spec.pipelines[].labels + +Type:: object + +=== .spec.pipelines[].outputRefs[] + +Type:: array + +=== .status + +ClusterLogForwarderStatus defines the observed state of ClusterLogForwarder + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|conditions|object| Conditions of the log forwarder. +|inputs|Conditions| Inputs maps input name to condition of the input. +|outputs|Conditions| Outputs maps output name to condition of the output. +|pipelines|Conditions| Pipelines maps pipeline name to condition of the pipeline. +|====================== + +=== .status.conditions + +Type:: object + +=== .status.inputs + +Type:: Conditions + +=== .status.outputs + +Type:: Conditions + +=== .status.pipelines + +Type:: Conditions + +[id="logging-5-6-reference-ClusterLogging"] +== ClusterLogging + +A Red Hat OpenShift Logging instance. ClusterLogging is the Schema for the clusterlogging API. + +[options="header"] +|====================== +|Property|Type|Description + +|spec|object| Specification of the desired behavior of ClusterLogging +|status|object| Status defines the observed state of ClusterLogging +|====================== + +=== .spec + +ClusterLoggingSpec defines the desired state of ClusterLogging + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|collection|object| Specification of the Collection component for the cluster +|curation|object| **(DEPRECATED)** *(optional)* Deprecated. Specification of the Curation component for the cluster +|forwarder|object| **(DEPRECATED)** *(optional)* Deprecated. Specification for Forwarder component for the cluster +|logStore|object| *(optional)* Specification of the Log Storage component for the cluster +|managementState|string| *(optional)* Indicator if the resource is 'Managed' or 'Unmanaged' by the operator +|visualization|object| *(optional)* Specification of the Visualization component for the cluster +|====================== + +=== .spec.collection + +This is the struct that will contain information pertinent to Log and event collection + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|resources|object| *(optional)* The resource requirements for the collector +|nodeSelector|object| *(optional)* Define which Nodes the Pods are scheduled on. +|tolerations|array| *(optional)* Define the tolerations the Pods will accept +|fluentd|object| *(optional)* Fluentd represents the configuration for forwarders of type fluentd. +|logs|object| **(DEPRECATED)** *(optional)* Deprecated. Specification of Log Collection for the cluster +|type|string| *(optional)* The type of Log Collection to configure +|====================== + +=== .spec.collection.fluentd + +FluentdForwarderSpec represents the configuration for forwarders of type fluentd. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|buffer|object| +|inFile|object| +|====================== + +=== .spec.collection.fluentd.buffer + +FluentdBufferSpec represents a subset of fluentd buffer parameters to tune +the buffer configuration for all fluentd outputs. It supports a subset of +parameters to configure buffer and queue sizing, flush operations and retry +flushing. + +For general parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#buffering-parameters + +For flush parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#flushing-parameters + +For retry parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#retries-parameters + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|chunkLimitSize|string| *(optional)* ChunkLimitSize represents the maximum size of each chunk. Events will be +|flushInterval|string| *(optional)* FlushInterval represents the time duration to wait between two consecutive flush +|flushMode|string| *(optional)* FlushMode represents the mode of the flushing thread to write chunks. The mode +|flushThreadCount|int| *(optional)* FlushThreadCount reprents the number of threads used by the fluentd buffer +|overflowAction|string| *(optional)* OverflowAction represents the action for the fluentd buffer plugin to +|retryMaxInterval|string| *(optional)* RetryMaxInterval represents the maximum time interval for exponential backoff +|retryTimeout|string| *(optional)* RetryTimeout represents the maximum time interval to attempt retries before giving up +|retryType|string| *(optional)* RetryType represents the type of retrying flush operations. Flush operations can +|retryWait|string| *(optional)* RetryWait represents the time duration between two consecutive retries to flush +|totalLimitSize|string| *(optional)* TotalLimitSize represents the threshold of node space allowed per fluentd +|====================== + +=== .spec.collection.fluentd.inFile + +FluentdInFileSpec represents a subset of fluentd in-tail plugin parameters +to tune the configuration for all fluentd in-tail inputs. + +For general parameters refer to: +https://docs.fluentd.org/input/tail#parameters + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|readLinesLimit|int| *(optional)* ReadLinesLimit represents the number of lines to read with each I/O operation +|====================== + +=== .spec.collection.logs + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|fluentd|object| Specification of the Fluentd Log Collection component +|type|string| The type of Log Collection to configure +|====================== + +=== .spec.collection.logs.fluentd + +CollectorSpec is spec to define scheduling and resources for a collector + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|nodeSelector|object| *(optional)* Define which Nodes the Pods are scheduled on. +|resources|object| *(optional)* The resource requirements for the collector +|tolerations|array| *(optional)* Define the tolerations the Pods will accept +|====================== + +=== .spec.collection.logs.fluentd.nodeSelector + +Type:: object + +=== .spec.collection.logs.fluentd.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.collection.logs.fluentd.resources.limits + +Type:: object + +=== .spec.collection.logs.fluentd.resources.requests + +Type:: object + +=== .spec.collection.logs.fluentd.tolerations[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. +|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. +|operator|string| *(optional)* Operator represents a key's relationship to the value. +|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be +|value|string| *(optional)* Value is the taint value the toleration matches to. +|====================== + +=== .spec.collection.logs.fluentd.tolerations[].tolerationSeconds + +Type:: int + +=== .spec.curation + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +This is the struct that will contain information pertinent to Log curation (Curator) + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|curator|object| The specification of curation to configure +|type|string| The kind of curation to configure +|====================== + +=== .spec.curation.curator + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|nodeSelector|object| Define which Nodes the Pods are scheduled on. +|resources|object| *(optional)* The resource requirements for Curator +|schedule|string| The cron schedule that the Curator job is run. Defaults to "30 3 * * *" +|tolerations|array| +|====================== + +=== .spec.curation.curator.nodeSelector + +Type:: object + +=== .spec.curation.curator.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.curation.curator.resources.limits + +Type:: object + +=== .spec.curation.curator.resources.requests + +Type:: object + +=== .spec.curation.curator.tolerations[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. +|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. +|operator|string| *(optional)* Operator represents a key's relationship to the value. +|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be +|value|string| *(optional)* Value is the taint value the toleration matches to. +|====================== + +=== .spec.curation.curator.tolerations[].tolerationSeconds + +Type:: int + +=== .spec.forwarder + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +ForwarderSpec contains global tuning parameters for specific forwarder implementations. +This field is not required for general use, it allows performance tuning by users +familiar with the underlying forwarder technology. +Currently supported: `fluentd`. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|fluentd|object| +|====================== + +=== .spec.forwarder.fluentd + +FluentdForwarderSpec represents the configuration for forwarders of type fluentd. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|buffer|object| +|inFile|object| +|====================== + +=== .spec.forwarder.fluentd.buffer + +FluentdBufferSpec represents a subset of fluentd buffer parameters to tune +the buffer configuration for all fluentd outputs. It supports a subset of +parameters to configure buffer and queue sizing, flush operations and retry +flushing. + +For general parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#buffering-parameters + +For flush parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#flushing-parameters + +For retry parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#retries-parameters + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|chunkLimitSize|string| *(optional)* ChunkLimitSize represents the maximum size of each chunk. Events will be +|flushInterval|string| *(optional)* FlushInterval represents the time duration to wait between two consecutive flush +|flushMode|string| *(optional)* FlushMode represents the mode of the flushing thread to write chunks. The mode +|flushThreadCount|int| *(optional)* FlushThreadCount reprents the number of threads used by the fluentd buffer +|overflowAction|string| *(optional)* OverflowAction represents the action for the fluentd buffer plugin to +|retryMaxInterval|string| *(optional)* RetryMaxInterval represents the maximum time interval for exponential backoff +|retryTimeout|string| *(optional)* RetryTimeout represents the maximum time interval to attempt retries before giving up +|retryType|string| *(optional)* RetryType represents the type of retrying flush operations. Flush operations can +|retryWait|string| *(optional)* RetryWait represents the time duration between two consecutive retries to flush +|totalLimitSize|string| *(optional)* TotalLimitSize represents the threshold of node space allowed per fluentd +|====================== + +=== .spec.forwarder.fluentd.inFile + +FluentdInFileSpec represents a subset of fluentd in-tail plugin parameters +to tune the configuration for all fluentd in-tail inputs. + +For general parameters refer to: +https://docs.fluentd.org/input/tail#parameters + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|readLinesLimit|int| *(optional)* ReadLinesLimit represents the number of lines to read with each I/O operation +|====================== + +=== .spec.logStore + +The LogStoreSpec contains information about how logs are stored. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|elasticsearch|object| Specification of the Elasticsearch Log Store component +|lokistack|object| LokiStack contains information about which LokiStack to use for log storage if Type is set to LogStoreTypeLokiStack. +|retentionPolicy|object| *(optional)* Retention policy defines the maximum age for an index after which it should be deleted +|type|string| The Type of Log Storage to configure. The operator currently supports either using ElasticSearch +|====================== + +=== .spec.logStore.elasticsearch + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|nodeCount|int| Number of nodes to deploy for Elasticsearch +|nodeSelector|object| Define which Nodes the Pods are scheduled on. +|proxy|object| Specification of the Elasticsearch Proxy component +|redundancyPolicy|string| *(optional)* +|resources|object| *(optional)* The resource requirements for Elasticsearch +|storage|object| *(optional)* The storage specification for Elasticsearch data nodes +|tolerations|array| +|====================== + +=== .spec.logStore.elasticsearch.nodeSelector + +Type:: object + +=== .spec.logStore.elasticsearch.proxy + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|resources|object| +|====================== + +=== .spec.logStore.elasticsearch.proxy.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.logStore.elasticsearch.proxy.resources.limits + +Type:: object + +=== .spec.logStore.elasticsearch.proxy.resources.requests + +Type:: object + +=== .spec.logStore.elasticsearch.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.logStore.elasticsearch.resources.limits + +Type:: object + +=== .spec.logStore.elasticsearch.resources.requests + +Type:: object + +=== .spec.logStore.elasticsearch.storage + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|size|object| The max storage capacity for the node to provision. +|storageClassName|string| *(optional)* The name of the storage class to use with creating the node's PVC. +|====================== + +=== .spec.logStore.elasticsearch.storage.size + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|Format|string| Change Format at will. See the comment for Canonicalize for +|d|object| d is the quantity in inf.Dec form if d.Dec != nil +|i|int| i is the quantity in int64 scaled form, if d.Dec == nil +|s|string| s is the generated value of this quantity to avoid recalculation +|====================== + +=== .spec.logStore.elasticsearch.storage.size.d + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|Dec|object| +|====================== + +=== .spec.logStore.elasticsearch.storage.size.d.Dec + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|scale|int| +|unscaled|object| +|====================== + +=== .spec.logStore.elasticsearch.storage.size.d.Dec.unscaled + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|abs|Word| sign +|neg|bool| +|====================== + +=== .spec.logStore.elasticsearch.storage.size.d.Dec.unscaled.abs + +Type:: Word + +=== .spec.logStore.elasticsearch.storage.size.i + +Type:: int + +[options="header"] +|====================== +|Property|Type|Description + +|scale|int| +|value|int| +|====================== + +=== .spec.logStore.elasticsearch.tolerations[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. +|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. +|operator|string| *(optional)* Operator represents a key's relationship to the value. +|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be +|value|string| *(optional)* Value is the taint value the toleration matches to. +|====================== + +=== .spec.logStore.elasticsearch.tolerations[].tolerationSeconds + +Type:: int + +=== .spec.logStore.lokistack + +LokiStackStoreSpec is used to set up cluster-logging to use a LokiStack as logging storage. +It points to an existing LokiStack in the same namespace. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|name|string| Name of the LokiStack resource. +|====================== + +=== .spec.logStore.retentionPolicy + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|application|object| +|audit|object| +|infra|object| +|====================== + +=== .spec.logStore.retentionPolicy.application + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|diskThresholdPercent|int| *(optional)* The threshold percentage of ES disk usage that when reached, old indices should be deleted (e.g. 75) +|maxAge|string| *(optional)* +|namespaceSpec|array| *(optional)* The per namespace specification to delete documents older than a given minimum age +|pruneNamespacesInterval|string| *(optional)* How often to run a new prune-namespaces job +|====================== + +=== .spec.logStore.retentionPolicy.application.namespaceSpec[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|minAge|string| *(optional)* Delete the records matching the namespaces which are older than this MinAge (e.g. 1d) +|namespace|string| Target Namespace to delete logs older than MinAge (defaults to 7d) +|====================== + +=== .spec.logStore.retentionPolicy.audit + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|diskThresholdPercent|int| *(optional)* The threshold percentage of ES disk usage that when reached, old indices should be deleted (e.g. 75) +|maxAge|string| *(optional)* +|namespaceSpec|array| *(optional)* The per namespace specification to delete documents older than a given minimum age +|pruneNamespacesInterval|string| *(optional)* How often to run a new prune-namespaces job +|====================== + +=== .spec.logStore.retentionPolicy.audit.namespaceSpec[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|minAge|string| *(optional)* Delete the records matching the namespaces which are older than this MinAge (e.g. 1d) +|namespace|string| Target Namespace to delete logs older than MinAge (defaults to 7d) +|====================== + +=== .spec.logStore.retentionPolicy.infra + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|diskThresholdPercent|int| *(optional)* The threshold percentage of ES disk usage that when reached, old indices should be deleted (e.g. 75) +|maxAge|string| *(optional)* +|namespaceSpec|array| *(optional)* The per namespace specification to delete documents older than a given minimum age +|pruneNamespacesInterval|string| *(optional)* How often to run a new prune-namespaces job +|====================== + +=== .spec.logStore.retentionPolicy.infra.namespaceSpec[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|minAge|string| *(optional)* Delete the records matching the namespaces which are older than this MinAge (e.g. 1d) +|namespace|string| Target Namespace to delete logs older than MinAge (defaults to 7d) +|====================== + +=== .spec.visualization + +This is the struct that will contain information pertinent to Log visualization (Kibana) + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|kibana|object| Specification of the Kibana Visualization component +|type|string| The type of Visualization to configure +|====================== + +=== .spec.visualization.kibana + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|nodeSelector|object| Define which Nodes the Pods are scheduled on. +|proxy|object| Specification of the Kibana Proxy component +|replicas|int| Number of instances to deploy for a Kibana deployment +|resources|object| *(optional)* The resource requirements for Kibana +|tolerations|array| +|====================== + +=== .spec.visualization.kibana.nodeSelector + +Type:: object + +=== .spec.visualization.kibana.proxy + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|resources|object| +|====================== + +=== .spec.visualization.kibana.proxy.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.visualization.kibana.proxy.resources.limits + +Type:: object + +=== .spec.visualization.kibana.proxy.resources.requests + +Type:: object + +=== .spec.visualization.kibana.replicas + +Type:: int + +=== .spec.visualization.kibana.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.visualization.kibana.resources.limits + +Type:: object + +=== .spec.visualization.kibana.resources.requests + +Type:: object + +=== .spec.visualization.kibana.tolerations[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. +|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. +|operator|string| *(optional)* Operator represents a key's relationship to the value. +|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be +|value|string| *(optional)* Value is the taint value the toleration matches to. +|====================== + +=== .spec.visualization.kibana.tolerations[].tolerationSeconds + +Type:: int + +=== .status + +ClusterLoggingStatus defines the observed state of ClusterLogging + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|collection|object| *(optional)* +|conditions|object| *(optional)* +|curation|object| *(optional)* +|logStore|object| *(optional)* +|visualization|object| *(optional)* +|====================== + +=== .status.collection + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|logs|object| *(optional)* +|====================== + +=== .status.collection.logs + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|fluentdStatus|object| *(optional)* +|====================== + +=== .status.collection.logs.fluentdStatus + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|clusterCondition|object| *(optional)* +|daemonSet|string| *(optional)* +|nodes|object| *(optional)* +|pods|string| *(optional)* +|====================== + +=== .status.collection.logs.fluentdStatus.clusterCondition + +`operator-sdk generate crds` does not allow map-of-slice, must use a named type. + +Type:: object + +=== .status.collection.logs.fluentdStatus.nodes + +Type:: object + +=== .status.conditions + +Type:: object + +=== .status.curation + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|curatorStatus|array| *(optional)* +|====================== + +=== .status.curation.curatorStatus[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|clusterCondition|object| *(optional)* +|cronJobs|string| *(optional)* +|schedules|string| *(optional)* +|suspended|bool| *(optional)* +|====================== + +=== .status.curation.curatorStatus[].clusterCondition + +`operator-sdk generate crds` does not allow map-of-slice, must use a named type. + +Type:: object + +=== .status.logStore + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|elasticsearchStatus|array| *(optional)* +|====================== + +=== .status.logStore.elasticsearchStatus[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|cluster|object| *(optional)* +|clusterConditions|object| *(optional)* +|clusterHealth|string| *(optional)* +|clusterName|string| *(optional)* +|deployments|array| *(optional)* +|nodeConditions|object| *(optional)* +|nodeCount|int| *(optional)* +|pods|object| *(optional)* +|replicaSets|array| *(optional)* +|shardAllocationEnabled|string| *(optional)* +|statefulSets|array| *(optional)* +|====================== + +=== .status.logStore.elasticsearchStatus[].cluster + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|activePrimaryShards|int| The number of Active Primary Shards for the Elasticsearch Cluster +|activeShards|int| The number of Active Shards for the Elasticsearch Cluster +|initializingShards|int| The number of Initializing Shards for the Elasticsearch Cluster +|numDataNodes|int| The number of Data Nodes for the Elasticsearch Cluster +|numNodes|int| The number of Nodes for the Elasticsearch Cluster +|pendingTasks|int| +|relocatingShards|int| The number of Relocating Shards for the Elasticsearch Cluster +|status|string| The current Status of the Elasticsearch Cluster +|unassignedShards|int| The number of Unassigned Shards for the Elasticsearch Cluster +|====================== + +=== .status.logStore.elasticsearchStatus[].clusterConditions + +Type:: object + +=== .status.logStore.elasticsearchStatus[].deployments[] + +Type:: array + +=== .status.logStore.elasticsearchStatus[].nodeConditions + +Type:: object + +=== .status.logStore.elasticsearchStatus[].pods + +Type:: object + +=== .status.logStore.elasticsearchStatus[].replicaSets[] + +Type:: array diff --git a/observability/logging/api_reference/logging-5-7-reference.adoc b/observability/logging/api_reference/logging-5-7-reference.adoc index 2c1be2346c72..be03c018f50c 100644 --- a/observability/logging/api_reference/logging-5-7-reference.adoc +++ b/observability/logging/api_reference/logging-5-7-reference.adoc @@ -1,7 +1,1310 @@ :_mod-docs-content-type: ASSEMBLY -[id="logging-5-7-reference"] -= 5.7 Logging API reference include::_attributes/common-attributes.adoc[] +include::_attributes/attributes-openshift-dedicated.adoc[] +[id="logging-5-7-reference"] += 5.7 logging API reference :context: logging-5-7-reference toc::[] + + +//// +** These release notes are generated from the content in the openshift/cluster-logging-operator repository. +** Do not modify the content here manually except for the metadata and section IDs - changes to the content should be made in the source code. +//// + +[id="logging-5-7-reference-ClusterLogForwarder"] +== ClusterLogForwarder + +ClusterLogForwarder is an API to configure forwarding logs. + +You configure forwarding by specifying a list of `pipelines`, +which forward from a set of named inputs to a set of named outputs. + +There are built-in input names for common log categories, and you can +define custom inputs to do additional filtering. + +There is a built-in output name for the default openshift log store, but +you can define your own outputs with a URL and other connection information +to forward logs to other stores or processors, inside or outside the cluster. + +For more details see the documentation on the API fields. + +[options="header"] +|====================== +|Property|Type|Description + +|spec|object| Specification of the desired behavior of ClusterLogForwarder +|status|object| Status of the ClusterLogForwarder +|====================== + +=== .spec + +ClusterLogForwarderSpec defines how logs should be forwarded to remote targets. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|inputs|array| *(optional)* Inputs are named filters for log messages to be forwarded. +|outputDefaults|object| *(optional)* DEPRECATED OutputDefaults specify forwarder config explicitly for the +|outputs|array| *(optional)* Outputs are named destinations for log messages. +|pipelines|array| Pipelines forward the messages selected by a set of inputs to a set of outputs. +|====================== + +=== .spec.inputs[] + +InputSpec defines a selector of log messages. + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|application|object| *(optional)* Application, if present, enables named set of `application` logs that +|name|string| Name used to refer to the input of a `pipeline`. +|====================== + +=== .spec.inputs[].application + +Application log selector. +All conditions in the selector must be satisfied (logical AND) to select logs. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|namespaces|array| *(optional)* Namespaces from which to collect application logs. +|selector|object| *(optional)* Selector for logs from pods with matching labels. +|====================== + +=== .spec.inputs[].application.namespaces[] + +Type:: array + +=== .spec.inputs[].application.selector + +A label selector is a label query over a set of resources. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|matchLabels|object| *(optional)* matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels +|====================== + +=== .spec.inputs[].application.selector.matchLabels + +Type:: object + +=== .spec.outputDefaults + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|elasticsearch|object| *(optional)* Elasticsearch OutputSpec default values +|====================== + +=== .spec.outputDefaults.elasticsearch + +ElasticsearchStructuredSpec is spec related to structured log changes to determine the elasticsearch index + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|enableStructuredContainerLogs|bool| *(optional)* EnableStructuredContainerLogs enables multi-container structured logs to allow +|structuredTypeKey|string| *(optional)* StructuredTypeKey specifies the metadata key to be used as name of elasticsearch index +|structuredTypeName|string| *(optional)* StructuredTypeName specifies the name of elasticsearch schema +|====================== + +=== .spec.outputs[] + +Output defines a destination for log messages. + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|syslog|object| *(optional)* +|fluentdForward|object| *(optional)* +|elasticsearch|object| *(optional)* +|kafka|object| *(optional)* +|cloudwatch|object| *(optional)* +|loki|object| *(optional)* +|googleCloudLogging|object| *(optional)* +|splunk|object| *(optional)* +|http|object| *(optional)* +|name|string| Name used to refer to the output from a `pipeline`. +|secret|object| *(optional)* Secret for authentication. +|tls|object| TLS contains settings for controlling options on TLS client connections. +|type|string| Type of output plugin. +|url|string| *(optional)* URL to send log records to. +|====================== + +=== .spec.outputs[].secret + +OutputSecretSpec is a secret reference containing name only, no namespace. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|name|string| Name of a secret in the namespace configured for log forwarder secrets. +|====================== + +=== .spec.outputs[].tls + +OutputTLSSpec contains options for TLS connections that are agnostic to the output type. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|insecureSkipVerify|bool| If InsecureSkipVerify is true, then the TLS client will be configured to ignore errors with certificates. +|securityProfile|object| TLSSecurityProfile is the security profile to apply to the output connection +|====================== + +=== .spec.outputs[].tls.securityProfile + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|custom|object| *(optional)* custom is a user-defined TLS security profile. Be extremely careful using a custom +|intermediate|object| *(optional)* intermediate is a TLS security profile based on: +|modern|object| *(optional)* modern is a TLS security profile based on: +|old|object| *(optional)* old is a TLS security profile based on: +|type|string| *(optional)* type is one of Old, Intermediate, Modern or Custom. Custom provides +|====================== + +=== .spec.outputs[].tls.securityProfile.custom + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|ciphers|array| ciphers is used to specify the cipher algorithms that are negotiated +|minTLSVersion|string| minTLSVersion is used to specify the minimal version of the TLS protocol +|====================== + +=== .spec.outputs[].tls.securityProfile.intermediate + +Type:: object + +=== .spec.outputs[].tls.securityProfile.modern + +Type:: object + +=== .spec.outputs[].tls.securityProfile.old + +Type:: object + +=== .spec.pipelines[] + +PipelinesSpec link a set of inputs to a set of outputs. + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|detectMultilineErrors|bool| *(optional)* DetectMultilineErrors enables multiline error detection of container logs +|inputRefs|array| InputRefs lists the names (`input.name`) of inputs to this pipeline. +|labels|object| *(optional)* Labels applied to log records passing through this pipeline. +|name|string| *(optional)* Name is optional, but must be unique in the `pipelines` list if provided. +|outputRefs|array| OutputRefs lists the names (`output.name`) of outputs from this pipeline. +|parse|string| *(optional)* Parse enables parsing of log entries into structured logs +|====================== + +=== .spec.pipelines[].inputRefs[] + +Type:: array + +=== .spec.pipelines[].labels + +Type:: object + +=== .spec.pipelines[].outputRefs[] + +Type:: array + +=== .status + +ClusterLogForwarderStatus defines the observed state of ClusterLogForwarder + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|conditions|object| Conditions of the log forwarder. +|inputs|Conditions| Inputs maps input name to condition of the input. +|outputs|Conditions| Outputs maps output name to condition of the output. +|pipelines|Conditions| Pipelines maps pipeline name to condition of the pipeline. +|====================== + +=== .status.conditions + +Type:: object + +=== .status.inputs + +Type:: Conditions + +=== .status.outputs + +Type:: Conditions + +=== .status.pipelines + +Type:: Conditions + +[id="logging-5-7-reference-ClusterLogging"] +== ClusterLogging + +A Red Hat OpenShift Logging instance. ClusterLogging is the Schema for the clusterlogging API. + +[options="header"] +|====================== +|Property|Type|Description + +|spec|object| Specification of the desired behavior of ClusterLogging +|status|object| Status defines the observed state of ClusterLogging +|====================== + +=== .spec + +ClusterLoggingSpec defines the desired state of ClusterLogging + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|collection|object| Specification of the Collection component for the cluster +|curation|object| **(DEPRECATED)** *(optional)* Deprecated. Specification of the Curation component for the cluster +|forwarder|object| **(DEPRECATED)** *(optional)* Deprecated. Specification for Forwarder component for the cluster +|logStore|object| *(optional)* Specification of the Log Storage component for the cluster +|managementState|string| *(optional)* Indicator if the resource is 'Managed' or 'Unmanaged' by the operator +|visualization|object| *(optional)* Specification of the Visualization component for the cluster +|====================== + +=== .spec.collection + +This is the struct that will contain information pertinent to Log and event collection + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|resources|object| *(optional)* The resource requirements for the collector +|nodeSelector|object| *(optional)* Define which Nodes the Pods are scheduled on. +|tolerations|array| *(optional)* Define the tolerations the Pods will accept +|fluentd|object| *(optional)* Fluentd represents the configuration for forwarders of type fluentd. +|logs|object| **(DEPRECATED)** *(optional)* Deprecated. Specification of Log Collection for the cluster +|type|string| The type of Log Collection to configure +|====================== + +=== .spec.collection.fluentd + +FluentdForwarderSpec represents the configuration for forwarders of type fluentd. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|buffer|object| +|inFile|object| +|====================== + +=== .spec.collection.fluentd.buffer + +FluentdBufferSpec represents a subset of fluentd buffer parameters to tune +the buffer configuration for all fluentd outputs. It supports a subset of +parameters to configure buffer and queue sizing, flush operations and retry +flushing. + +For general parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#buffering-parameters + +For flush parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#flushing-parameters + +For retry parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#retries-parameters + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|chunkLimitSize|string| *(optional)* ChunkLimitSize represents the maximum size of each chunk. Events will be +|flushInterval|string| *(optional)* FlushInterval represents the time duration to wait between two consecutive flush +|flushMode|string| *(optional)* FlushMode represents the mode of the flushing thread to write chunks. The mode +|flushThreadCount|int| *(optional)* FlushThreadCount reprents the number of threads used by the fluentd buffer +|overflowAction|string| *(optional)* OverflowAction represents the action for the fluentd buffer plugin to +|retryMaxInterval|string| *(optional)* RetryMaxInterval represents the maximum time interval for exponential backoff +|retryTimeout|string| *(optional)* RetryTimeout represents the maximum time interval to attempt retries before giving up +|retryType|string| *(optional)* RetryType represents the type of retrying flush operations. Flush operations can +|retryWait|string| *(optional)* RetryWait represents the time duration between two consecutive retries to flush +|totalLimitSize|string| *(optional)* TotalLimitSize represents the threshold of node space allowed per fluentd +|====================== + +=== .spec.collection.fluentd.inFile + +FluentdInFileSpec represents a subset of fluentd in-tail plugin parameters +to tune the configuration for all fluentd in-tail inputs. + +For general parameters refer to: +https://docs.fluentd.org/input/tail#parameters + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|readLinesLimit|int| *(optional)* ReadLinesLimit represents the number of lines to read with each I/O operation +|====================== + +=== .spec.collection.logs + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +Specification of Log Collection for the cluster +See spec.collection + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|fluentd|object| Specification of the Fluentd Log Collection component +|type|string| The type of Log Collection to configure +|====================== + +=== .spec.collection.logs.fluentd + +CollectorSpec is spec to define scheduling and resources for a collector + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|nodeSelector|object| *(optional)* Define which Nodes the Pods are scheduled on. +|resources|object| *(optional)* The resource requirements for the collector +|tolerations|array| *(optional)* Define the tolerations the Pods will accept +|====================== + +=== .spec.collection.logs.fluentd.nodeSelector + +Type:: object + +=== .spec.collection.logs.fluentd.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.collection.logs.fluentd.resources.limits + +Type:: object + +=== .spec.collection.logs.fluentd.resources.requests + +Type:: object + +=== .spec.collection.logs.fluentd.tolerations[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. +|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. +|operator|string| *(optional)* Operator represents a key's relationship to the value. +|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be +|value|string| *(optional)* Value is the taint value the toleration matches to. +|====================== + +=== .spec.collection.logs.fluentd.tolerations[].tolerationSeconds + +Type:: int + +=== .spec.curation + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +This is the struct that will contain information pertinent to Log curation (Curator) + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|curator|object| The specification of curation to configure +|type|string| The kind of curation to configure +|====================== + +=== .spec.curation.curator + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|nodeSelector|object| Define which Nodes the Pods are scheduled on. +|resources|object| *(optional)* The resource requirements for Curator +|schedule|string| The cron schedule that the Curator job is run. Defaults to "30 3 * * *" +|tolerations|array| +|====================== + +=== .spec.curation.curator.nodeSelector + +Type:: object + +=== .spec.curation.curator.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.curation.curator.resources.limits + +Type:: object + +=== .spec.curation.curator.resources.requests + +Type:: object + +=== .spec.curation.curator.tolerations[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. +|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. +|operator|string| *(optional)* Operator represents a key's relationship to the value. +|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be +|value|string| *(optional)* Value is the taint value the toleration matches to. +|====================== + +=== .spec.curation.curator.tolerations[].tolerationSeconds + +Type:: int + +=== .spec.forwarder + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +ForwarderSpec contains global tuning parameters for specific forwarder implementations. +This field is not required for general use, it allows performance tuning by users +familiar with the underlying forwarder technology. +Currently supported: `fluentd`. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|fluentd|object| +|====================== + +=== .spec.forwarder.fluentd + +FluentdForwarderSpec represents the configuration for forwarders of type fluentd. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|buffer|object| +|inFile|object| +|====================== + +=== .spec.forwarder.fluentd.buffer + +FluentdBufferSpec represents a subset of fluentd buffer parameters to tune +the buffer configuration for all fluentd outputs. It supports a subset of +parameters to configure buffer and queue sizing, flush operations and retry +flushing. + +For general parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#buffering-parameters + +For flush parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#flushing-parameters + +For retry parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#retries-parameters + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|chunkLimitSize|string| *(optional)* ChunkLimitSize represents the maximum size of each chunk. Events will be +|flushInterval|string| *(optional)* FlushInterval represents the time duration to wait between two consecutive flush +|flushMode|string| *(optional)* FlushMode represents the mode of the flushing thread to write chunks. The mode +|flushThreadCount|int| *(optional)* FlushThreadCount reprents the number of threads used by the fluentd buffer +|overflowAction|string| *(optional)* OverflowAction represents the action for the fluentd buffer plugin to +|retryMaxInterval|string| *(optional)* RetryMaxInterval represents the maximum time interval for exponential backoff +|retryTimeout|string| *(optional)* RetryTimeout represents the maximum time interval to attempt retries before giving up +|retryType|string| *(optional)* RetryType represents the type of retrying flush operations. Flush operations can +|retryWait|string| *(optional)* RetryWait represents the time duration between two consecutive retries to flush +|totalLimitSize|string| *(optional)* TotalLimitSize represents the threshold of node space allowed per fluentd +|====================== + +=== .spec.forwarder.fluentd.inFile + +FluentdInFileSpec represents a subset of fluentd in-tail plugin parameters +to tune the configuration for all fluentd in-tail inputs. + +For general parameters refer to: +https://docs.fluentd.org/input/tail#parameters + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|readLinesLimit|int| *(optional)* ReadLinesLimit represents the number of lines to read with each I/O operation +|====================== + +=== .spec.logStore + +The LogStoreSpec contains information about how logs are stored. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|elasticsearch|object| **(DEPRECATED)** Specification of the Elasticsearch Log Store component +|lokistack|object| LokiStack contains information about which LokiStack to use for log storage if Type is set to LogStoreTypeLokiStack. +|retentionPolicy|object| **(DEPRECATED)** *(optional)* Retention policy defines the maximum age for an Elasticsearch index after which it should be deleted +|type|string| The Type of Log Storage to configure. The operator currently supports either using ElasticSearch +|====================== + +=== .spec.logStore.elasticsearch + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|nodeCount|int| Number of nodes to deploy for Elasticsearch +|nodeSelector|object| Define which Nodes the Pods are scheduled on. +|proxy|object| Specification of the Elasticsearch Proxy component +|redundancyPolicy|string| *(optional)* +|resources|object| *(optional)* The resource requirements for Elasticsearch +|storage|object| *(optional)* The storage specification for Elasticsearch data nodes +|tolerations|array| +|====================== + +=== .spec.logStore.elasticsearch.nodeSelector + +Type:: object + +=== .spec.logStore.elasticsearch.proxy + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|resources|object| +|====================== + +=== .spec.logStore.elasticsearch.proxy.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.logStore.elasticsearch.proxy.resources.limits + +Type:: object + +=== .spec.logStore.elasticsearch.proxy.resources.requests + +Type:: object + +=== .spec.logStore.elasticsearch.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.logStore.elasticsearch.resources.limits + +Type:: object + +=== .spec.logStore.elasticsearch.resources.requests + +Type:: object + +=== .spec.logStore.elasticsearch.storage + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|size|object| The max storage capacity for the node to provision. +|storageClassName|string| *(optional)* The name of the storage class to use with creating the node's PVC. +|====================== + +=== .spec.logStore.elasticsearch.storage.size + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|Format|string| Change Format at will. See the comment for Canonicalize for +|d|object| d is the quantity in inf.Dec form if d.Dec != nil +|i|int| i is the quantity in int64 scaled form, if d.Dec == nil +|s|string| s is the generated value of this quantity to avoid recalculation +|====================== + +=== .spec.logStore.elasticsearch.storage.size.d + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|Dec|object| +|====================== + +=== .spec.logStore.elasticsearch.storage.size.d.Dec + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|scale|int| +|unscaled|object| +|====================== + +=== .spec.logStore.elasticsearch.storage.size.d.Dec.unscaled + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|abs|Word| sign +|neg|bool| +|====================== + +=== .spec.logStore.elasticsearch.storage.size.d.Dec.unscaled.abs + +Type:: Word + +=== .spec.logStore.elasticsearch.storage.size.i + +Type:: int + +[options="header"] +|====================== +|Property|Type|Description + +|scale|int| +|value|int| +|====================== + +=== .spec.logStore.elasticsearch.tolerations[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. +|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. +|operator|string| *(optional)* Operator represents a key's relationship to the value. +|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be +|value|string| *(optional)* Value is the taint value the toleration matches to. +|====================== + +=== .spec.logStore.elasticsearch.tolerations[].tolerationSeconds + +Type:: int + +=== .spec.logStore.lokistack + +LokiStackStoreSpec is used to set up cluster-logging to use a LokiStack as logging storage. +It points to an existing LokiStack in the same namespace. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|name|string| Name of the LokiStack resource. +|====================== + +=== .spec.logStore.retentionPolicy + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|application|object| +|audit|object| +|infra|object| +|====================== + +=== .spec.logStore.retentionPolicy.application + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|diskThresholdPercent|int| *(optional)* The threshold percentage of ES disk usage that when reached, old indices should be deleted (e.g. 75) +|maxAge|string| *(optional)* +|namespaceSpec|array| *(optional)* The per namespace specification to delete documents older than a given minimum age +|pruneNamespacesInterval|string| *(optional)* How often to run a new prune-namespaces job +|====================== + +=== .spec.logStore.retentionPolicy.application.namespaceSpec[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|minAge|string| *(optional)* Delete the records matching the namespaces which are older than this MinAge (e.g. 1d) +|namespace|string| Target Namespace to delete logs older than MinAge (defaults to 7d) +|====================== + +=== .spec.logStore.retentionPolicy.audit + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|diskThresholdPercent|int| *(optional)* The threshold percentage of ES disk usage that when reached, old indices should be deleted (e.g. 75) +|maxAge|string| *(optional)* +|namespaceSpec|array| *(optional)* The per namespace specification to delete documents older than a given minimum age +|pruneNamespacesInterval|string| *(optional)* How often to run a new prune-namespaces job +|====================== + +=== .spec.logStore.retentionPolicy.audit.namespaceSpec[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|minAge|string| *(optional)* Delete the records matching the namespaces which are older than this MinAge (e.g. 1d) +|namespace|string| Target Namespace to delete logs older than MinAge (defaults to 7d) +|====================== + +=== .spec.logStore.retentionPolicy.infra + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|diskThresholdPercent|int| *(optional)* The threshold percentage of ES disk usage that when reached, old indices should be deleted (e.g. 75) +|maxAge|string| *(optional)* +|namespaceSpec|array| *(optional)* The per namespace specification to delete documents older than a given minimum age +|pruneNamespacesInterval|string| *(optional)* How often to run a new prune-namespaces job +|====================== + +=== .spec.logStore.retentionPolicy.infra.namespaceSpec[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|minAge|string| *(optional)* Delete the records matching the namespaces which are older than this MinAge (e.g. 1d) +|namespace|string| Target Namespace to delete logs older than MinAge (defaults to 7d) +|====================== + +=== .spec.visualization + +This is the struct that will contain information pertinent to Log visualization (Kibana) + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|kibana|object| **(DEPRECATED)** *(optional)* Specification of the Kibana Visualization component +|nodeSelector|object| Define which Nodes the Pods are scheduled on. +|ocpConsole|object| *(optional)* OCPConsole is the specification for the OCP console plugin +|tolerations|array| *(optional)* Define the tolerations the Pods will accept +|type|string| The type of Visualization to configure +|====================== + +=== .spec.visualization.kibana + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|nodeSelector|object| **(DEPRECATED)** Define which Nodes the Pods are scheduled on. +|proxy|object| Specification of the Kibana Proxy component +|replicas|int| *(optional)* Number of instances to deploy for a Kibana deployment +|resources|object| *(optional)* The resource requirements for Kibana +|tolerations|array| **(DEPRECATED)** Define the tolerations the Pods will accept +|====================== + +=== .spec.visualization.kibana.nodeSelector + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +Type:: object + +=== .spec.visualization.kibana.proxy + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|resources|object| +|====================== + +=== .spec.visualization.kibana.proxy.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.visualization.kibana.proxy.resources.limits + +Type:: object + +=== .spec.visualization.kibana.proxy.resources.requests + +Type:: object + +=== .spec.visualization.kibana.replicas + +Type:: int + +=== .spec.visualization.kibana.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.visualization.kibana.resources.limits + +Type:: object + +=== .spec.visualization.kibana.resources.requests + +Type:: object + +=== .spec.visualization.kibana.tolerations[] + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. +|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. +|operator|string| *(optional)* Operator represents a key's relationship to the value. +|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be +|value|string| *(optional)* Value is the taint value the toleration matches to. +|====================== + +=== .spec.visualization.kibana.tolerations[].tolerationSeconds + +Type:: int + +=== .spec.visualization.nodeSelector + +Type:: object + +=== .spec.visualization.ocpConsole + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|logsLimit|int| *(optional)* LogsLimit is the max number of entries returned for a query. +|timeout|string| *(optional)* Timeout is the max duration before a query timeout +|====================== + +=== .spec.visualization.tolerations[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. +|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. +|operator|string| *(optional)* Operator represents a key's relationship to the value. +|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be +|value|string| *(optional)* Value is the taint value the toleration matches to. +|====================== + +=== .spec.visualization.tolerations[].tolerationSeconds + +Type:: int + +=== .status + +ClusterLoggingStatus defines the observed state of ClusterLogging + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|collection|object| **(DEPRECATED)** *(optional)* Deprecated. +|conditions|object| *(optional)* +|curation|object| *(optional)* +|logStore|object| *(optional)* +|visualization|object| *(optional)* +|====================== + +=== .status.collection + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|logs|object| *(optional)* +|====================== + +=== .status.collection.logs + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|fluentdStatus|object| *(optional)* +|====================== + +=== .status.collection.logs.fluentdStatus + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|clusterCondition|object| *(optional)* +|daemonSet|string| *(optional)* +|nodes|object| *(optional)* +|pods|string| *(optional)* +|====================== + +=== .status.collection.logs.fluentdStatus.clusterCondition + +`operator-sdk generate crds` does not allow map-of-slice, must use a named type. + +Type:: object + +=== .status.collection.logs.fluentdStatus.nodes + +Type:: object + +=== .status.conditions + +Type:: object + +=== .status.curation + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|curatorStatus|array| *(optional)* +|====================== + +=== .status.curation.curatorStatus[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|clusterCondition|object| *(optional)* +|cronJobs|string| *(optional)* +|schedules|string| *(optional)* +|suspended|bool| *(optional)* +|====================== + +=== .status.curation.curatorStatus[].clusterCondition + +`operator-sdk generate crds` does not allow map-of-slice, must use a named type. + +Type:: object + +=== .status.logStore + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|elasticsearchStatus|array| *(optional)* +|====================== + +=== .status.logStore.elasticsearchStatus[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|cluster|object| *(optional)* +|clusterConditions|object| *(optional)* +|clusterHealth|string| *(optional)* +|clusterName|string| *(optional)* +|deployments|array| *(optional)* +|nodeConditions|object| *(optional)* +|nodeCount|int| *(optional)* +|pods|object| *(optional)* +|replicaSets|array| *(optional)* +|shardAllocationEnabled|string| *(optional)* +|statefulSets|array| *(optional)* +|====================== + +=== .status.logStore.elasticsearchStatus[].cluster + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|activePrimaryShards|int| The number of Active Primary Shards for the Elasticsearch Cluster +|activeShards|int| The number of Active Shards for the Elasticsearch Cluster +|initializingShards|int| The number of Initializing Shards for the Elasticsearch Cluster +|numDataNodes|int| The number of Data Nodes for the Elasticsearch Cluster +|numNodes|int| The number of Nodes for the Elasticsearch Cluster +|pendingTasks|int| +|relocatingShards|int| The number of Relocating Shards for the Elasticsearch Cluster +|status|string| The current Status of the Elasticsearch Cluster +|unassignedShards|int| The number of Unassigned Shards for the Elasticsearch Cluster +|====================== + +=== .status.logStore.elasticsearchStatus[].clusterConditions + +Type:: object + +=== .status.logStore.elasticsearchStatus[].deployments[] + +Type:: array + +=== .status.logStore.elasticsearchStatus[].nodeConditions + +Type:: object + +=== .status.logStore.elasticsearchStatus[].pods + +Type:: object + +=== .status.logStore.elasticsearchStatus[].replicaSets[] + +Type:: array + +=== .status.logStore.elasticsearchStatus[].statefulSets[] + +Type:: array + +=== .status.visualization + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|kibanaStatus|array| *(optional)* +|====================== + +=== .status.visualization.kibanaStatus[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|clusterCondition|object| *(optional)* +|deployment|string| *(optional)* +|pods|string| *(optional)* The status for each of the Kibana pods for the Visualization component +|replicaSets|array| *(optional)* +|replicas|int| *(optional)* +|====================== + +=== .status.visualization.kibanaStatus[].clusterCondition + +Type:: object + +=== .status.visualization.kibanaStatus[].replicaSets[] + +Type:: array diff --git a/observability/logging/api_reference/logging-5-8-reference.adoc b/observability/logging/api_reference/logging-5-8-reference.adoc index 8fb45f49a6c9..0bfe5b5763b7 100644 --- a/observability/logging/api_reference/logging-5-8-reference.adoc +++ b/observability/logging/api_reference/logging-5-8-reference.adoc @@ -1,7 +1,1569 @@ :_mod-docs-content-type: ASSEMBLY -[id="logging-5-8-reference"] -= 5.8 Logging API reference include::_attributes/common-attributes.adoc[] +include::_attributes/attributes-openshift-dedicated.adoc[] +[id="logging-5-8-reference"] += 5.8 logging API reference :context: logging-5-8-reference toc::[] + + +//// +** These release notes are generated from the content in the openshift/cluster-logging-operator repository. +** Do not modify the content here manually except for the metadata and section IDs - changes to the content should be made in the source code. +//// + +[id="logging-5-8-reference-ClusterLogForwarder"] +== ClusterLogForwarder + +ClusterLogForwarder is an API to configure forwarding logs. + +You configure forwarding by specifying a list of `pipelines`, +which forward from a set of named inputs to a set of named outputs. + +There are built-in input names for common log categories, and you can +define custom inputs to do additional filtering. + +There is a built-in output name for the default openshift log store, but +you can define your own outputs with a URL and other connection information +to forward logs to other stores or processors, inside or outside the cluster. + +For more details see the documentation on the API fields. + +[options="header"] +|====================== +|Property|Type|Description + +|spec|object| Specification of the desired behavior of ClusterLogForwarder +|status|object| Status of the ClusterLogForwarder +|====================== + +=== .spec + +ClusterLogForwarderSpec defines how logs should be forwarded to remote targets. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|filters|array| Filters are applied to log records passing through a pipeline. +|inputs|array| *(optional)* Inputs are named filters for log messages to be forwarded. +|outputDefaults|object| *(optional)* DEPRECATED OutputDefaults specify forwarder config explicitly for the +|outputs|array| *(optional)* Outputs are named destinations for log messages. +|pipelines|array| Pipelines forward the messages selected by a set of inputs to a set of outputs. +|serviceAccountName|string| *(optional)* ServiceAccountName is the serviceaccount associated with the clusterlogforwarder +|====================== + +=== .spec.filters[] + +Filter defines a filter for log messages. +See [FilterTypeSpec] for a list of filter types. + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|kubeAPIAudit|object| *(optional)* +|name|string| Name used to refer to the filter from a `pipeline`. +|type|string| Type of filter. +|====================== + +=== .spec.inputs[] + +InputSpec defines a selector of log messages. + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|application|object| *(optional)* Application, if present, enables named set of `application` logs that +|name|string| Name used to refer to the input of a `pipeline`. +|receiver|object| Receiver to receive logs from non-cluster sources. +|====================== + +=== .spec.inputs[].application + +Application log selector. +All conditions in the selector must be satisfied (logical AND) to select logs. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|containerLimit|object| *(optional)* Container limit applied to each container of the pod(s) selected +|namespaces|array| *(optional)* Namespaces from which to collect application logs. +|selector|object| *(optional)* Selector for logs from pods with matching labels. +|====================== + +=== .spec.inputs[].application.containerLimit + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|maxRecordsPerSecond|int| MaxRecordsPerSecond is the maximum number of log records +|====================== + +=== .spec.inputs[].application.namespaces[] + +Type:: array + +=== .spec.inputs[].application.selector + +A label selector is a label query over a set of resources. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|matchLabels|object| *(optional)* matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels +|====================== + +=== .spec.inputs[].application.selector.matchLabels + +Type:: object + +=== .spec.inputs[].receiver + +ReceiverSpec is a union of input Receiver types. + +The fields of this struct define the set of known Receiver types. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|http|object| +|====================== + +=== .spec.inputs[].receiver.http + +HTTPReceiver receives encoded logs as a HTTP endpoint. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|format|string| Format is the format of incoming log data. +|port|int| *(optional)* Port the Service and the HTTP listener listen on. +|====================== + +=== .spec.outputDefaults + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|elasticsearch|object| *(optional)* Elasticsearch OutputSpec default values +|====================== + +=== .spec.outputDefaults.elasticsearch + +ElasticsearchStructuredSpec is spec related to structured log changes to determine the elasticsearch index + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|enableStructuredContainerLogs|bool| *(optional)* EnableStructuredContainerLogs enables multi-container structured logs to allow +|structuredTypeKey|string| *(optional)* StructuredTypeKey specifies the metadata key to be used as name of elasticsearch index +|structuredTypeName|string| *(optional)* StructuredTypeName specifies the name of elasticsearch schema +|====================== + +=== .spec.outputs[] + +Output defines a destination for log messages. + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|syslog|object| *(optional)* +|fluentdForward|object| *(optional)* +|elasticsearch|object| *(optional)* +|kafka|object| *(optional)* +|cloudwatch|object| *(optional)* +|loki|object| *(optional)* +|googleCloudLogging|object| *(optional)* +|splunk|object| *(optional)* +|http|object| *(optional)* +|limit|object| *(optional)* Limit of the aggregated logs to this output from any given +|name|string| Name used to refer to the output from a `pipeline`. +|secret|object| *(optional)* Secret for authentication. +|tls|object| TLS contains settings for controlling options on TLS client connections. +|type|string| Type of output plugin. +|url|string| *(optional)* URL to send log records to. +|====================== + +=== .spec.outputs[].limit + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|maxRecordsPerSecond|int| MaxRecordsPerSecond is the maximum number of log records +|====================== + +=== .spec.outputs[].secret + +OutputSecretSpec is a secret reference containing name only, no namespace. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|name|string| Name of a secret in the namespace configured for log forwarder secrets. +|====================== + +=== .spec.outputs[].tls + +OutputTLSSpec contains options for TLS connections that are agnostic to the output type. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|insecureSkipVerify|bool| If InsecureSkipVerify is true, then the TLS client will be configured to ignore errors with certificates. +|securityProfile|object| TLSSecurityProfile is the security profile to apply to the output connection +|====================== + +=== .spec.outputs[].tls.securityProfile + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|custom|object| *(optional)* custom is a user-defined TLS security profile. Be extremely careful using a custom +|intermediate|object| *(optional)* intermediate is a TLS security profile based on: +|modern|object| *(optional)* modern is a TLS security profile based on: +|old|object| *(optional)* old is a TLS security profile based on: +|type|string| *(optional)* type is one of Old, Intermediate, Modern or Custom. Custom provides +|====================== + +=== .spec.outputs[].tls.securityProfile.custom + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|ciphers|array| ciphers is used to specify the cipher algorithms that are negotiated +|minTLSVersion|string| minTLSVersion is used to specify the minimal version of the TLS protocol +|====================== + +=== .spec.outputs[].tls.securityProfile.intermediate + +Type:: object + +=== .spec.outputs[].tls.securityProfile.modern + +Type:: object + +=== .spec.outputs[].tls.securityProfile.old + +Type:: object + +=== .spec.pipelines[] + +PipelinesSpec link a set of inputs to a set of outputs. + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|detectMultilineErrors|bool| *(optional)* DetectMultilineErrors enables multiline error detection of container logs +|filterRefs|array| *(optional)* Filters lists the names of filters to be applied to records going through this pipeline. +|inputRefs|array| InputRefs lists the names (`input.name`) of inputs to this pipeline. +|labels|object| *(optional)* Labels applied to log records passing through this pipeline. +|name|string| *(optional)* Name is optional, but must be unique in the `pipelines` list if provided. +|outputRefs|array| OutputRefs lists the names (`output.name`) of outputs from this pipeline. +|parse|string| *(optional)* Parse enables parsing of log entries into structured logs +|====================== + +=== .spec.pipelines[].filterRefs[] + +Type:: array + +=== .spec.pipelines[].inputRefs[] + +Type:: array + +=== .spec.pipelines[].labels + +Type:: object + +=== .spec.pipelines[].outputRefs[] + +Type:: array + +=== .status + +ClusterLogForwarderStatus defines the observed state of ClusterLogForwarder + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|conditions|object| Conditions of the log forwarder. +|filters|Conditions| Filters maps filter name to condition of the filter. +|inputs|Conditions| Inputs maps input name to condition of the input. +|outputs|Conditions| Outputs maps output name to condition of the output. +|pipelines|Conditions| Pipelines maps pipeline name to condition of the pipeline. +|====================== + +=== .status.conditions + +Type:: object + +=== .status.filters + +Type:: Conditions + +=== .status.inputs + +Type:: Conditions + +=== .status.outputs + +Type:: Conditions + +=== .status.pipelines + +Type:: Conditions + +[id="logging-5-8-reference-ClusterLogging"] +== ClusterLogging + +A Red Hat OpenShift Logging instance. ClusterLogging is the Schema for the clusterloggings API + +[options="header"] +|====================== +|Property|Type|Description + +|spec|object| Specification of the desired behavior of ClusterLogging +|status|object| Status defines the observed state of ClusterLogging +|====================== + +=== .spec + +ClusterLoggingSpec defines the desired state of ClusterLogging + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|collection|object| Specification of the Collection component for the cluster +|curation|object| **(DEPRECATED)** *(optional)* Deprecated. Specification of the Curation component for the cluster +|forwarder|object| **(DEPRECATED)** *(optional)* Deprecated. Specification for Forwarder component for the cluster +|logStore|object| *(optional)* Specification of the Log Storage component for the cluster +|managementState|string| *(optional)* Indicator if the resource is 'Managed' or 'Unmanaged' by the operator +|visualization|object| *(optional)* Specification of the Visualization component for the cluster +|====================== + +=== .spec.collection + +This is the struct that will contain information pertinent to Log and event collection + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|resources|object| *(optional)* The resource requirements for the collector +|nodeSelector|object| *(optional)* Define which Nodes the Pods are scheduled on. +|tolerations|array| *(optional)* Define the tolerations the Pods will accept +|fluentd|object| *(optional)* Fluentd represents the configuration for forwarders of type fluentd. +|logs|object| **(DEPRECATED)** *(optional)* Deprecated. Specification of Log Collection for the cluster +|type|string| The type of Log Collection to configure +|====================== + +=== .spec.collection.fluentd + +FluentdForwarderSpec represents the configuration for forwarders of type fluentd. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|buffer|object| +|inFile|object| +|====================== + +=== .spec.collection.fluentd.buffer + +FluentdBufferSpec represents a subset of fluentd buffer parameters to tune +the buffer configuration for all fluentd outputs. It supports a subset of +parameters to configure buffer and queue sizing, flush operations and retry +flushing. + +For general parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#buffering-parameters + +For flush parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#flushing-parameters + +For retry parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#retries-parameters + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|chunkLimitSize|string| *(optional)* ChunkLimitSize represents the maximum size of each chunk. Events will be +|flushInterval|string| *(optional)* FlushInterval represents the time duration to wait between two consecutive flush +|flushMode|string| *(optional)* FlushMode represents the mode of the flushing thread to write chunks. The mode +|flushThreadCount|int| *(optional)* FlushThreadCount reprents the number of threads used by the fluentd buffer +|overflowAction|string| *(optional)* OverflowAction represents the action for the fluentd buffer plugin to +|retryMaxInterval|string| *(optional)* RetryMaxInterval represents the maximum time interval for exponential backoff +|retryTimeout|string| *(optional)* RetryTimeout represents the maximum time interval to attempt retries before giving up +|retryType|string| *(optional)* RetryType represents the type of retrying flush operations. Flush operations can +|retryWait|string| *(optional)* RetryWait represents the time duration between two consecutive retries to flush +|totalLimitSize|string| *(optional)* TotalLimitSize represents the threshold of node space allowed per fluentd +|====================== + +=== .spec.collection.fluentd.inFile + +FluentdInFileSpec represents a subset of fluentd in-tail plugin parameters +to tune the configuration for all fluentd in-tail inputs. + +For general parameters refer to: +https://docs.fluentd.org/input/tail#parameters + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|readLinesLimit|int| *(optional)* ReadLinesLimit represents the number of lines to read with each I/O operation +|====================== + +=== .spec.collection.logs + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +Specification of Log Collection for the cluster +See spec.collection + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|fluentd|object| Specification of the Fluentd Log Collection component +|type|string| The type of Log Collection to configure +|====================== + +=== .spec.collection.logs.fluentd + +CollectorSpec is spec to define scheduling and resources for a collector + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|nodeSelector|object| *(optional)* Define which Nodes the Pods are scheduled on. +|resources|object| *(optional)* The resource requirements for the collector +|tolerations|array| *(optional)* Define the tolerations the Pods will accept +|====================== + +=== .spec.collection.logs.fluentd.nodeSelector + +Type:: object + +=== .spec.collection.logs.fluentd.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|claims|array| *(optional)* Claims lists the names of resources, defined in spec.resourceClaims, +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.collection.logs.fluentd.resources.claims[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|name|string| Name must match the name of one entry in pod.spec.resourceClaims of +|====================== + +=== .spec.collection.logs.fluentd.resources.limits + +Type:: object + +=== .spec.collection.logs.fluentd.resources.requests + +Type:: object + +=== .spec.collection.logs.fluentd.tolerations[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. +|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. +|operator|string| *(optional)* Operator represents a key's relationship to the value. +|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be +|value|string| *(optional)* Value is the taint value the toleration matches to. +|====================== + +=== .spec.collection.logs.fluentd.tolerations[].tolerationSeconds + +Type:: int + +=== .spec.curation + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +This is the struct that will contain information pertinent to Log curation (Curator) + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|curator|object| The specification of curation to configure +|type|string| The kind of curation to configure +|====================== + +=== .spec.curation.curator + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|nodeSelector|object| Define which Nodes the Pods are scheduled on. +|resources|object| *(optional)* The resource requirements for Curator +|schedule|string| The cron schedule that the Curator job is run. Defaults to "30 3 * * *" +|tolerations|array| +|====================== + +=== .spec.curation.curator.nodeSelector + +Type:: object + +=== .spec.curation.curator.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|claims|array| *(optional)* Claims lists the names of resources, defined in spec.resourceClaims, +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.curation.curator.resources.claims[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|name|string| Name must match the name of one entry in pod.spec.resourceClaims of +|====================== + +=== .spec.curation.curator.resources.limits + +Type:: object + +=== .spec.curation.curator.resources.requests + +Type:: object + +=== .spec.curation.curator.tolerations[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. +|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. +|operator|string| *(optional)* Operator represents a key's relationship to the value. +|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be +|value|string| *(optional)* Value is the taint value the toleration matches to. +|====================== + +=== .spec.curation.curator.tolerations[].tolerationSeconds + +Type:: int + +=== .spec.forwarder + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +ForwarderSpec contains global tuning parameters for specific forwarder implementations. +This field is not required for general use, it allows performance tuning by users +familiar with the underlying forwarder technology. +Currently supported: `fluentd`. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|fluentd|object| +|====================== + +=== .spec.forwarder.fluentd + +FluentdForwarderSpec represents the configuration for forwarders of type fluentd. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|buffer|object| +|inFile|object| +|====================== + +=== .spec.forwarder.fluentd.buffer + +FluentdBufferSpec represents a subset of fluentd buffer parameters to tune +the buffer configuration for all fluentd outputs. It supports a subset of +parameters to configure buffer and queue sizing, flush operations and retry +flushing. + +For general parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#buffering-parameters + +For flush parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#flushing-parameters + +For retry parameters refer to: +https://docs.fluentd.org/configuration/buffer-section#retries-parameters + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|chunkLimitSize|string| *(optional)* ChunkLimitSize represents the maximum size of each chunk. Events will be +|flushInterval|string| *(optional)* FlushInterval represents the time duration to wait between two consecutive flush +|flushMode|string| *(optional)* FlushMode represents the mode of the flushing thread to write chunks. The mode +|flushThreadCount|int| *(optional)* FlushThreadCount reprents the number of threads used by the fluentd buffer +|overflowAction|string| *(optional)* OverflowAction represents the action for the fluentd buffer plugin to +|retryMaxInterval|string| *(optional)* RetryMaxInterval represents the maximum time interval for exponential backoff +|retryTimeout|string| *(optional)* RetryTimeout represents the maximum time interval to attempt retries before giving up +|retryType|string| *(optional)* RetryType represents the type of retrying flush operations. Flush operations can +|retryWait|string| *(optional)* RetryWait represents the time duration between two consecutive retries to flush +|totalLimitSize|string| *(optional)* TotalLimitSize represents the threshold of node space allowed per fluentd +|====================== + +=== .spec.forwarder.fluentd.inFile + +FluentdInFileSpec represents a subset of fluentd in-tail plugin parameters +to tune the configuration for all fluentd in-tail inputs. + +For general parameters refer to: +https://docs.fluentd.org/input/tail#parameters + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|readLinesLimit|int| *(optional)* ReadLinesLimit represents the number of lines to read with each I/O operation +|====================== + +=== .spec.logStore + +The LogStoreSpec contains information about how logs are stored. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|elasticsearch|object| **(DEPRECATED)** Specification of the Elasticsearch Log Store component +|lokistack|object| LokiStack contains information about which LokiStack to use for log storage if Type is set to LogStoreTypeLokiStack. +|retentionPolicy|object| **(DEPRECATED)** *(optional)* Retention policy defines the maximum age for an Elasticsearch index after which it should be deleted +|type|string| The Type of Log Storage to configure. The operator currently supports either using ElasticSearch +|====================== + +=== .spec.logStore.elasticsearch + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|nodeCount|int| Number of nodes to deploy for Elasticsearch +|nodeSelector|object| Define which Nodes the Pods are scheduled on. +|proxy|object| Specification of the Elasticsearch Proxy component +|redundancyPolicy|string| *(optional)* +|resources|object| *(optional)* The resource requirements for Elasticsearch +|storage|object| *(optional)* The storage specification for Elasticsearch data nodes +|tolerations|array| +|====================== + +=== .spec.logStore.elasticsearch.nodeSelector + +Type:: object + +=== .spec.logStore.elasticsearch.proxy + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|resources|object| +|====================== + +=== .spec.logStore.elasticsearch.proxy.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|claims|array| *(optional)* Claims lists the names of resources, defined in spec.resourceClaims, +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.logStore.elasticsearch.proxy.resources.claims[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|name|string| Name must match the name of one entry in pod.spec.resourceClaims of +|====================== + +=== .spec.logStore.elasticsearch.proxy.resources.limits + +Type:: object + +=== .spec.logStore.elasticsearch.proxy.resources.requests + +Type:: object + +=== .spec.logStore.elasticsearch.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|claims|array| *(optional)* Claims lists the names of resources, defined in spec.resourceClaims, +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.logStore.elasticsearch.resources.claims[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|name|string| Name must match the name of one entry in pod.spec.resourceClaims of +|====================== + +=== .spec.logStore.elasticsearch.resources.limits + +Type:: object + +=== .spec.logStore.elasticsearch.resources.requests + +Type:: object + +=== .spec.logStore.elasticsearch.storage + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|size|object| The max storage capacity for the node to provision. +|storageClassName|string| *(optional)* The name of the storage class to use with creating the node's PVC. +|====================== + +=== .spec.logStore.elasticsearch.storage.size + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|Format|string| Change Format at will. See the comment for Canonicalize for +|d|object| d is the quantity in inf.Dec form if d.Dec != nil +|i|int| i is the quantity in int64 scaled form, if d.Dec == nil +|s|string| s is the generated value of this quantity to avoid recalculation +|====================== + +=== .spec.logStore.elasticsearch.storage.size.d + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|Dec|object| +|====================== + +=== .spec.logStore.elasticsearch.storage.size.d.Dec + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|scale|int| +|unscaled|object| +|====================== + +=== .spec.logStore.elasticsearch.storage.size.d.Dec.unscaled + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|abs|Word| sign +|neg|bool| +|====================== + +=== .spec.logStore.elasticsearch.storage.size.d.Dec.unscaled.abs + +Type:: Word + +=== .spec.logStore.elasticsearch.storage.size.i + +Type:: int + +[options="header"] +|====================== +|Property|Type|Description + +|scale|int| +|value|int| +|====================== + +=== .spec.logStore.elasticsearch.tolerations[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. +|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. +|operator|string| *(optional)* Operator represents a key's relationship to the value. +|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be +|value|string| *(optional)* Value is the taint value the toleration matches to. +|====================== + +=== .spec.logStore.elasticsearch.tolerations[].tolerationSeconds + +Type:: int + +=== .spec.logStore.lokistack + +LokiStackStoreSpec is used to set up cluster-logging to use a LokiStack as logging storage. +It points to an existing LokiStack in the same namespace. + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|name|string| Name of the LokiStack resource. +|====================== + +=== .spec.logStore.retentionPolicy + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|application|object| +|audit|object| +|infra|object| +|====================== + +=== .spec.logStore.retentionPolicy.application + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|diskThresholdPercent|int| *(optional)* The threshold percentage of ES disk usage that when reached, old indices should be deleted (e.g. 75) +|maxAge|string| *(optional)* +|namespaceSpec|array| *(optional)* The per namespace specification to delete documents older than a given minimum age +|pruneNamespacesInterval|string| *(optional)* How often to run a new prune-namespaces job +|====================== + +=== .spec.logStore.retentionPolicy.application.namespaceSpec[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|minAge|string| *(optional)* Delete the records matching the namespaces which are older than this MinAge (e.g. 1d) +|namespace|string| Target Namespace to delete logs older than MinAge (defaults to 7d) +|====================== + +=== .spec.logStore.retentionPolicy.audit + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|diskThresholdPercent|int| *(optional)* The threshold percentage of ES disk usage that when reached, old indices should be deleted (e.g. 75) +|maxAge|string| *(optional)* +|namespaceSpec|array| *(optional)* The per namespace specification to delete documents older than a given minimum age +|pruneNamespacesInterval|string| *(optional)* How often to run a new prune-namespaces job +|====================== + +=== .spec.logStore.retentionPolicy.audit.namespaceSpec[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|minAge|string| *(optional)* Delete the records matching the namespaces which are older than this MinAge (e.g. 1d) +|namespace|string| Target Namespace to delete logs older than MinAge (defaults to 7d) +|====================== + +=== .spec.logStore.retentionPolicy.infra + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|diskThresholdPercent|int| *(optional)* The threshold percentage of ES disk usage that when reached, old indices should be deleted (e.g. 75) +|maxAge|string| *(optional)* +|namespaceSpec|array| *(optional)* The per namespace specification to delete documents older than a given minimum age +|pruneNamespacesInterval|string| *(optional)* How often to run a new prune-namespaces job +|====================== + +=== .spec.logStore.retentionPolicy.infra.namespaceSpec[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|minAge|string| *(optional)* Delete the records matching the namespaces which are older than this MinAge (e.g. 1d) +|namespace|string| Target Namespace to delete logs older than MinAge (defaults to 7d) +|====================== + +=== .spec.visualization + +This is the struct that will contain information pertinent to Log visualization (Kibana) + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|kibana|object| **(DEPRECATED)** *(optional)* Specification of the Kibana Visualization component +|nodeSelector|object| Define which Nodes the Pods are scheduled on. +|ocpConsole|object| *(optional)* OCPConsole is the specification for the OCP console plugin +|tolerations|array| *(optional)* Define the tolerations the Pods will accept +|type|string| The type of Visualization to configure +|====================== + +=== .spec.visualization.kibana + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|nodeSelector|object| **(DEPRECATED)** Define which Nodes the Pods are scheduled on. +|proxy|object| Specification of the Kibana Proxy component +|replicas|int| *(optional)* Number of instances to deploy for a Kibana deployment +|resources|object| *(optional)* The resource requirements for Kibana +|tolerations|array| **(DEPRECATED)** Define the tolerations the Pods will accept +|====================== + +=== .spec.visualization.kibana.nodeSelector + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +Type:: object + +=== .spec.visualization.kibana.proxy + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|resources|object| +|====================== + +=== .spec.visualization.kibana.proxy.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|claims|array| *(optional)* Claims lists the names of resources, defined in spec.resourceClaims, +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.visualization.kibana.proxy.resources.claims[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|name|string| Name must match the name of one entry in pod.spec.resourceClaims of +|====================== + +=== .spec.visualization.kibana.proxy.resources.limits + +Type:: object + +=== .spec.visualization.kibana.proxy.resources.requests + +Type:: object + +=== .spec.visualization.kibana.replicas + +Type:: int + +=== .spec.visualization.kibana.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|claims|array| *(optional)* Claims lists the names of resources, defined in spec.resourceClaims, +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.visualization.kibana.resources.claims[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|name|string| Name must match the name of one entry in pod.spec.resourceClaims of +|====================== + +=== .spec.visualization.kibana.resources.limits + +Type:: object + +=== .spec.visualization.kibana.resources.requests + +Type:: object + +=== .spec.visualization.kibana.tolerations[] + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. +|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. +|operator|string| *(optional)* Operator represents a key's relationship to the value. +|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be +|value|string| *(optional)* Value is the taint value the toleration matches to. +|====================== + +=== .spec.visualization.kibana.tolerations[].tolerationSeconds + +Type:: int + +=== .spec.visualization.nodeSelector + +Type:: object + +=== .spec.visualization.ocpConsole + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|logsLimit|int| *(optional)* LogsLimit is the max number of entries returned for a query. +|timeout|string| *(optional)* Timeout is the max duration before a query timeout +|====================== + +=== .spec.visualization.tolerations[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. +|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. +|operator|string| *(optional)* Operator represents a key's relationship to the value. +|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be +|value|string| *(optional)* Value is the taint value the toleration matches to. +|====================== + +=== .spec.visualization.tolerations[].tolerationSeconds + +Type:: int + +=== .status + +ClusterLoggingStatus defines the observed state of ClusterLogging + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|collection|object| **(DEPRECATED)** *(optional)* Deprecated. +|conditions|object| *(optional)* +|curation|object| **(DEPRECATED)** *(optional)* +|logStore|object| *(optional)* +|visualization|object| *(optional)* +|====================== + +=== .status.collection + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|logs|object| *(optional)* +|====================== + +=== .status.collection.logs + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|fluentdStatus|object| *(optional)* +|====================== + +=== .status.collection.logs.fluentdStatus + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|clusterCondition|object| *(optional)* +|daemonSet|string| *(optional)* +|nodes|object| *(optional)* +|pods|string| *(optional)* +|====================== + +=== .status.collection.logs.fluentdStatus.clusterCondition + +`operator-sdk generate crds` does not allow map-of-slice, must use a named type. + +Type:: object + +=== .status.collection.logs.fluentdStatus.nodes + +Type:: object + +=== .status.conditions + +Type:: object + +=== .status.curation + +[IMPORTANT] +==== +This API key has been deprecated and is planned for removal in a future release. For more information, see the release notes for logging on Red{nbsp}Hat OpenShift. +==== + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|curatorStatus|array| *(optional)* +|====================== + +=== .status.curation.curatorStatus[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|clusterCondition|object| *(optional)* +|cronJobs|string| *(optional)* +|schedules|string| *(optional)* +|suspended|bool| *(optional)* +|====================== + +=== .status.curation.curatorStatus[].clusterCondition + +`operator-sdk generate crds` does not allow map-of-slice, must use a named type. + +Type:: object + +=== .status.logStore + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|elasticsearchStatus|array| *(optional)* +|====================== + +=== .status.logStore.elasticsearchStatus[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|cluster|object| *(optional)* +|clusterConditions|object| *(optional)* +|clusterHealth|string| *(optional)* +|clusterName|string| *(optional)* +|deployments|array| *(optional)* +|nodeConditions|object| *(optional)* +|nodeCount|int| *(optional)* +|pods|object| *(optional)* +|replicaSets|array| *(optional)* +|shardAllocationEnabled|string| *(optional)* +|statefulSets|array| *(optional)* +|====================== + +=== .status.logStore.elasticsearchStatus[].cluster + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|activePrimaryShards|int| The number of Active Primary Shards for the Elasticsearch Cluster +|activeShards|int| The number of Active Shards for the Elasticsearch Cluster +|initializingShards|int| The number of Initializing Shards for the Elasticsearch Cluster +|numDataNodes|int| The number of Data Nodes for the Elasticsearch Cluster +|numNodes|int| The number of Nodes for the Elasticsearch Cluster +|pendingTasks|int| +|relocatingShards|int| The number of Relocating Shards for the Elasticsearch Cluster +|status|string| The current Status of the Elasticsearch Cluster +|unassignedShards|int| The number of Unassigned Shards for the Elasticsearch Cluster +|====================== + +=== .status.logStore.elasticsearchStatus[].clusterConditions + +Type:: object + +=== .status.logStore.elasticsearchStatus[].deployments[] + +Type:: array + +=== .status.logStore.elasticsearchStatus[].nodeConditions + +Type:: object + +=== .status.logStore.elasticsearchStatus[].pods + +Type:: object + +=== .status.logStore.elasticsearchStatus[].replicaSets[] + +Type:: array + +=== .status.logStore.elasticsearchStatus[].statefulSets[] + +Type:: array + +=== .status.visualization + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|kibanaStatus|array| *(optional)* +|====================== + +=== .status.visualization.kibanaStatus[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|clusterCondition|object| *(optional)* +|deployment|string| *(optional)* +|pods|string| *(optional)* The status for each of the Kibana pods for the Visualization component +|replicaSets|array| *(optional)* +|replicas|int| *(optional)* +|====================== + +=== .status.visualization.kibanaStatus[].clusterCondition + +Type:: object + +=== .status.visualization.kibanaStatus[].replicaSets[] + +Type:: array + +[id="logging-5-x-reference-LogFileMetricExporter"] +== LogFileMetricExporter + +A Log File Metric Exporter instance. LogFileMetricExporter is the Schema for the logFileMetricExporters API + +[options="header"] +|====================== +|Property|Type|Description + +|spec|object| +|status|object| +|====================== + +=== .spec + +LogFileMetricExporterSpec defines the desired state of LogFileMetricExporter + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|nodeSelector|object| *(optional)* Define which Nodes the Pods are scheduled on. +|resources|object| *(optional)* The resource requirements for the LogFileMetricExporter +|tolerations|array| *(optional)* Define the tolerations the Pods will accept +|====================== + +=== .spec.nodeSelector + +Type:: object + +=== .spec.resources + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|claims|array| *(optional)* Claims lists the names of resources, defined in spec.resourceClaims, +|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. +|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. +|====================== + +=== .spec.resources.claims[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|name|string| Name must match the name of one entry in pod.spec.resourceClaims of +|====================== + +=== .spec.resources.limits + +Type:: object + +=== .spec.resources.requests + +Type:: object + +=== .spec.tolerations[] + +Type:: array + +[options="header"] +|====================== +|Property|Type|Description + +|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. +|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. +|operator|string| *(optional)* Operator represents a key's relationship to the value. +|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be +|value|string| *(optional)* Value is the taint value the toleration matches to. +|====================== + +=== .spec.tolerations[].tolerationSeconds + +Type:: int + +=== .status + +LogFileMetricExporterStatus defines the observed state of LogFileMetricExporter + +Type:: object + +[options="header"] +|====================== +|Property|Type|Description + +|conditions|object| Conditions of the Log File Metrics Exporter. +|====================== + +=== .status.conditions + +Type:: object