diff --git a/.github/workflows/publish-version-4.5.yaml b/.github/workflows/publish-version-4.5.yaml new file mode 100644 index 000000000..fd8cb46e5 --- /dev/null +++ b/.github/workflows/publish-version-4.5.yaml @@ -0,0 +1,86 @@ +name: Publish version 4.5 + +env: + doc_versionnumber: "4.5" + +on: + push: + branches: + - release-4.5 + workflow_dispatch: + +jobs: + build: + name: Build + runs-on: ubuntu-latest + + permissions: + contents: write + pages: write + id-token: write + + concurrency: + group: "pages" + cancel-in-progress: false + + environment: + name: github-pages-test + url: ${{ steps.deployment.outputs.page_url }} + + steps: + - name: Checkout code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: release-4.5 + submodules: 'recursive' + + - name: Set up Pages + id: pages + uses: actions/configure-pages@1f0c5cde4bc74cd7e1254d0cb4de8d49e9068c7d # v4.0.0 + + - name: Set up Hugo + uses: peaceiris/actions-hugo@16361eb4acea8698b220b76c0d4e84e1fd22c61d # v2.6.0 + with: + hugo-version: '0.110.0' + extended: true + + - name: Set up Node + uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 + with: + node-version: 18 + + - name: Install dependencies + run: | + cd themes/docsy + npm install + + - name: Set up PostCSS + run: npm install --save-dev autoprefixer postcss-cli postcss + + - name: Build + run: hugo --environment production --baseURL ${{ steps.pages.outputs.base_url }}/${{ env.doc_versionnumber }}/ + + # - name: Upload artifact + # uses: actions/upload-pages-artifact@64bcae551a7b18bcb9a09042ddf1960979799187 # v1.0.8 + # with: + # path: ./public/ + + - name: Checkout code to update + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: 'gh-pages-test' + path: 'tmp/gh-pages' + # - name: Display file structure + # run: ls -R + - name: Copy built site to GH pages + run: | + rm -rf tmp/gh-pages/${{ env.doc_versionnumber }} + mkdir -p tmp/gh-pages/${{ env.doc_versionnumber }} + mv public/* tmp/gh-pages/${{ env.doc_versionnumber }} + - name: Commit & Push changes + uses: actions-js/push@master + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + message: 'Publish updated docs for ${{ env.doc_versionnumber }}, ${{ github.event.repository.pushed_at}}' + branch: 'gh-pages-test' + directory: 'tmp/gh-pages' diff --git a/config/_default/config.toml b/config/_default/config.toml index acaf7ac67..02edd83cd 100644 --- a/config/_default/config.toml +++ b/config/_default/config.toml @@ -69,7 +69,7 @@ twitter = "calisti12" # Used in the "version-banner" partial to display a version number for the # current doc set. - version = "4.5.1" + version = "4.6.0" version_menu = "Releases" version_menu_canonicallinks = true version_menu_pagelinks = true @@ -169,9 +169,13 @@ twitter = "calisti12" ####################### # Add your release versions here [[params.versions]] - version = "latest (4.5.1)" + version = "latest (4.6.0)" githubbranch = "master" url = "" +[[params.versions]] + version = "4.5" + githubbranch = "release-4.5" + url = "/4.5/" [[params.versions]] version = "4.4" githubbranch = "release-4.4" @@ -192,7 +196,7 @@ twitter = "calisti12" # Cascade version number to every doc page (needed to create sections for pagefind search) # Update this parameter when creating a new version [[cascade]] -body_attribute = 'data-pagefind-filter="section:4.5"' +body_attribute = 'data-pagefind-filter="section:4.6"' [cascade._target] path = '/docs/**' diff --git a/content/docs/configuration/crds/v1beta1/common_types.md b/content/docs/configuration/crds/v1beta1/common_types.md index e5b479732..7b6847464 100644 --- a/content/docs/configuration/crds/v1beta1/common_types.md +++ b/content/docs/configuration/crds/v1beta1/common_types.md @@ -93,6 +93,7 @@ Security defines Fluentd, FluentbitAgent deployment security properties ### podSecurityPolicyCreate (bool, optional) {#security-podsecuritypolicycreate} +Warning: this is not supported anymore and does nothing ### roleBasedAccessControlCreate (*bool, optional) {#security-rolebasedaccesscontrolcreate} diff --git a/content/docs/configuration/crds/v1beta1/fluentbit_types.md b/content/docs/configuration/crds/v1beta1/fluentbit_types.md index da188bf65..449231203 100644 --- a/content/docs/configuration/crds/v1beta1/fluentbit_types.md +++ b/content/docs/configuration/crds/v1beta1/fluentbit_types.md @@ -65,6 +65,9 @@ FluentbitSpec defines the desired state of FluentbitAgent ### bufferVolumeResources (corev1.ResourceRequirements, optional) {#fluentbitspec-buffervolumeresources} +### configHotReload (*HotReload, optional) {#fluentbitspec-confighotreload} + + ### coroStackSize (int32, optional) {#fluentbitspec-corostacksize} Set the coroutines stack size in bytes. The value must be greater than the page size of the running system. Don't set too small value (say 4096), or coroutine threads can overrun the stack buffer. Do not change the default value of this parameter unless you know what you are doing. (default: 24576) @@ -179,7 +182,7 @@ Available in Logging operator version 4.4 and later. ### logLevel (string, optional) {#fluentbitspec-loglevel} -Set the logging verbosity level. Allowed values are: error, warn, info, debug and trace. Values are accumulative, e.g: if 'debug' is set, it will include error, warning, info and debug. Note that trace mode is only available if Fluent Bit was built with the WITH_TRACE option enabled. (default: info) +Set the logging verbosity level. Allowed values are: error, warn, info, debug and trace. Values are accumulative, e.g: if 'debug' is set, it will include error, warning, info and debug. Note that trace mode is only available if Fluent Bit was built with the WITH_TRACE option enabled. Default: info @@ -290,57 +293,57 @@ FluentbitNetwork defines network configuration for fluentbit ### connectTimeout (*uint32, optional) {#fluentbitnetwork-connecttimeout} -Sets the timeout for connecting to an upstream +Sets the timeout for connecting to an upstream -Default: 10 +Default: 10 ### connectTimeoutLogError (*bool, optional) {#fluentbitnetwork-connecttimeoutlogerror} -On connection timeout, specify if it should log an error. When disabled, the timeout is logged as a debug message +On connection timeout, specify if it should log an error. When disabled, the timeout is logged as a debug message -Default: true +Default: true ### dnsMode (string, optional) {#fluentbitnetwork-dnsmode} -Sets the primary transport layer protocol used by the asynchronous DNS resolver for connections established +Sets the primary transport layer protocol used by the asynchronous DNS resolver for connections established -Default: UDP, UDP or TCP +Default: UDP, UDP or TCP ### dnsPreferIpv4 (*bool, optional) {#fluentbitnetwork-dnspreferipv4} -Prioritize IPv4 DNS results when trying to establish a connection +Prioritize IPv4 DNS results when trying to establish a connection -Default: false +Default: false ### dnsResolver (string, optional) {#fluentbitnetwork-dnsresolver} -Select the primary DNS resolver type +Select the primary DNS resolver type -Default: ASYNC, LEGACY or ASYNC +Default: ASYNC, LEGACY or ASYNC ### keepalive (*bool, optional) {#fluentbitnetwork-keepalive} -Whether or not TCP keepalive is used for the upstream connection +Whether or not TCP keepalive is used for the upstream connection -Default: true +Default: true ### keepaliveIdleTimeout (*uint32, optional) {#fluentbitnetwork-keepaliveidletimeout} -How long in seconds a TCP keepalive connection can be idle before being recycled +How long in seconds a TCP keepalive connection can be idle before being recycled -Default: 30 +Default: 30 ### keepaliveMaxRecycle (*uint32, optional) {#fluentbitnetwork-keepalivemaxrecycle} -How many times a TCP keepalive connection can be used before being recycled +How many times a TCP keepalive connection can be used before being recycled -Default: 0, disabled +Default: 0, disabled ### sourceAddress (string, optional) {#fluentbitnetwork-sourceaddress} -Specify network address (interface) to use for connection and data traffic. +Specify network address (interface) to use for connection and data traffic. -Default: disabled +Default: disabled ## BufferStorage @@ -349,13 +352,19 @@ BufferStorage is the Service Section Configuration of fluent-bit ### storage.backlog.mem_limit (string, optional) {#bufferstorage-storage.backlog.mem_limit} -If storage.path is set, Fluent Bit will look for data chunks that were not delivered and are still in the storage layer, these are called backlog data. This option configure a hint of maximum value of memory to use when processing these records. +If storage.path is set, Fluent Bit will look for data chunks that were not delivered and are still in the storage layer, these are called backlog data. This option configure a hint of maximum value of memory to use when processing these records. Default: 5M ### storage.checksum (string, optional) {#bufferstorage-storage.checksum} -Enable the data integrity check when writing and reading data from the filesystem. The storage layer uses the CRC32 algorithm. +Enable the data integrity check when writing and reading data from the filesystem. The storage layer uses the CRC32 algorithm. + +Default: Off + +### storage.delete_irrecoverable_chunks (string, optional) {#bufferstorage-storage.delete_irrecoverable_chunks} + +When enabled, irrecoverable chunks will be deleted during runtime, and any other irrecoverable chunk located in the configured storage path directory will be deleted when Fluent Bit starts. Default: Off @@ -372,7 +381,7 @@ Set an optional location in the file system to store streams and chunks of data. ### storage.sync (string, optional) {#bufferstorage-storage.sync} -Configure the synchronization mode used to store the data into the file system. It can take the values normal or full. +Configure the synchronization mode used to store the data into the file system. It can take the values normal or full. Default: normal @@ -400,19 +409,30 @@ The retry failure count to meet the unhealthy requirement, this is a sum for all Default: 5 +## HotReload + +HotReload configuration + +### image (ImageSpec, optional) {#hotreload-image} + + +### resources (corev1.ResourceRequirements, optional) {#hotreload-resources} + + + ## InputTail InputTail defines FluentbitAgent tail input configuration The tail input plugin allows to monitor one or several text files. It has a similar behavior like tail -f shell command. ### Buffer_Chunk_Size (string, optional) {#inputtail-buffer_chunk_size} -Set the buffer size for HTTP client when reading responses from Kubernetes API server. The value must be according to the Unit Size specification. +Set the buffer size for HTTP client when reading responses from Kubernetes API server. The value must be according to the Unit Size specification. Default: 32k ### Buffer_Max_Size (string, optional) {#inputtail-buffer_max_size} -Set the limit of the buffer size per monitored file. When a buffer needs to be increased (e.g: very long lines), this value is used to restrict how much the memory buffer can grow. If reading a file exceed this limit, the file is removed from the monitored file list. The value must be according to the Unit Size specification. +Set the limit of the buffer size per monitored file. When a buffer needs to be increased (e.g: very long lines), this value is used to restrict how much the memory buffer can grow. If reading a file exceed this limit, the file is removed from the monitored file list. The value must be according to the Unit Size specification. Default: Buffer_Chunk_Size @@ -423,31 +443,31 @@ Specify the database file to keep track of monitored files and offsets. ### DB.journal_mode (string, optional) {#inputtail-db.journal_mode} -sets the journal mode for databases (WAL). Enabling WAL provides higher performance. Note that WAL is not compatible with shared network file systems. +sets the journal mode for databases (WAL). Enabling WAL provides higher performance. Note that WAL is not compatible with shared network file systems. -Default: WAL +Default: WAL ### DB.locking (*bool, optional) {#inputtail-db.locking} -Specify that the database will be accessed only by Fluent Bit. Enabling this feature helps to increase performance when accessing the database but it restrict any external tool to query the content. +Specify that the database will be accessed only by Fluent Bit. Enabling this feature helps to increase performance when accessing the database but it restrict any external tool to query the content. -Default: true +Default: true ### DB_Sync (string, optional) {#inputtail-db_sync} -Set a default synchronization (I/O) method. Values: Extra, Full, Normal, Off. This flag affects how the internal SQLite engine do synchronization to disk, for more details about each option please refer to this section. +Set a default synchronization (I/O) method. Values: Extra, Full, Normal, Off. This flag affects how the internal SQLite engine do synchronization to disk, for more details about each option please refer to this section. Default: Full ### Docker_Mode (string, optional) {#inputtail-docker_mode} -If enabled, the plugin will recombine split Docker log lines before passing them to any parser as configured above. This mode cannot be used at the same time as Multiline. +If enabled, the plugin will recombine split Docker log lines before passing them to any parser as configured above. This mode cannot be used at the same time as Multiline. Default: Off ### Docker_Mode_Flush (string, optional) {#inputtail-docker_mode_flush} -Wait period time in seconds to flush queued unfinished split lines. +Wait period time in seconds to flush queued unfinished split lines. Default: 4 @@ -468,7 +488,7 @@ Ignores files that have been last modified before this time in seconds. Supports ### Key (string, optional) {#inputtail-key} -When a message is unstructured (no parser applied), it's appended as a string under the key name log. This option allows to define an alternative name for that key. +When a message is unstructured (no parser applied), it's appended as a string under the key name log. This option allows to define an alternative name for that key. Default: log @@ -479,21 +499,21 @@ Set a limit of memory that Tail plugin can use when appending data to the Engine ### Multiline (string, optional) {#inputtail-multiline} -If enabled, the plugin will try to discover multiline messages and use the proper parsers to compose the outgoing messages. Note that when this option is enabled the Parser option is not used. +If enabled, the plugin will try to discover multiline messages and use the proper parsers to compose the outgoing messages. Note that when this option is enabled the Parser option is not used. Default: Off ### Multiline_Flush (string, optional) {#inputtail-multiline_flush} -Wait period time in seconds to process queued multiline messages +Wait period time in seconds to process queued multiline messages Default: 4 ### multiline.parser ([]string, optional) {#inputtail-multiline.parser} -Specify one or multiple parser definitions to apply to the content. Part of the new Multiline Core support in 1.8 +Specify one or multiple parser definitions to apply to the content. Part of the new Multiline Core support in 1.8 -Default: "" +Default: "" ### Parser (string, optional) {#inputtail-parser} @@ -527,19 +547,19 @@ For new discovered files on start (without a database offset/position), read the ### Refresh_Interval (string, optional) {#inputtail-refresh_interval} -The interval of refreshing the list of watched files in seconds. +The interval of refreshing the list of watched files in seconds. Default: 60 ### Rotate_Wait (string, optional) {#inputtail-rotate_wait} -Specify the number of extra time in seconds to monitor a file once is rotated in case some pending data is flushed. +Specify the number of extra time in seconds to monitor a file once is rotated in case some pending data is flushed. Default: 5 ### Skip_Long_Lines (string, optional) {#inputtail-skip_long_lines} -When a monitored file reach it buffer capacity due to a very long line (Buffer_Max_Size), the default behavior is to stop monitoring that file. Skip_Long_Lines alter that behavior and instruct Fluent Bit to skip long lines and continue processing other lines that fits into the buffer size. +When a monitored file reach it buffer capacity due to a very long line (Buffer_Max_Size), the default behavior is to stop monitoring that file. Skip_Long_Lines alter that behavior and instruct Fluent Bit to skip long lines and continue processing other lines that fits into the buffer size. Default: Off @@ -566,55 +586,55 @@ FilterKubernetes Fluent Bit Kubernetes Filter allows to enrich your log files wi ### Annotations (string, optional) {#filterkubernetes-annotations} -Include Kubernetes resource annotations in the extra metadata. +Include Kubernetes resource annotations in the extra metadata. Default: On ### Buffer_Size (string, optional) {#filterkubernetes-buffer_size} -Set the buffer size for HTTP client when reading responses from Kubernetes API server. The value must be according to the Unit Size specification. A value of 0 results in no limit, and the buffer will expand as-needed. Note that if pod specifications exceed the buffer limit, the API response will be discarded when retrieving metadata, and some kubernetes metadata will fail to be injected to the logs. If this value is empty we will set it "0". +Set the buffer size for HTTP client when reading responses from Kubernetes API server. The value must be according to the Unit Size specification. A value of 0 results in no limit, and the buffer will expand as-needed. Note that if pod specifications exceed the buffer limit, the API response will be discarded when retrieving metadata, and some kubernetes metadata will fail to be injected to the logs. If this value is empty we will set it "0". Default: "0" ### Cache_Use_Docker_Id (string, optional) {#filterkubernetes-cache_use_docker_id} -When enabled, metadata will be fetched from K8s when docker_id is changed. +When enabled, metadata will be fetched from K8s when docker_id is changed. Default: Off ### DNS_Retries (string, optional) {#filterkubernetes-dns_retries} -DNS lookup retries N times until the network start working +DNS lookup retries N times until the network start working Default: 6 ### DNS_Wait_Time (string, optional) {#filterkubernetes-dns_wait_time} -DNS lookup interval between network status checks +DNS lookup interval between network status checks Default: 30 ### Dummy_Meta (string, optional) {#filterkubernetes-dummy_meta} -If set, use dummy-meta data (for test/dev purposes) +If set, use dummy-meta data (for test/dev purposes) Default: Off ### K8S-Logging.Exclude (string, optional) {#filterkubernetes-k8s-logging.exclude} -Allow Kubernetes Pods to exclude their logs from the log processor (read more about it in Kubernetes Annotations section). +Allow Kubernetes Pods to exclude their logs from the log processor (read more about it in Kubernetes Annotations section). Default: On ### K8S-Logging.Parser (string, optional) {#filterkubernetes-k8s-logging.parser} -Allow Kubernetes Pods to suggest a pre-defined Parser (read more about it in Kubernetes Annotations section) +Allow Kubernetes Pods to suggest a pre-defined Parser (read more about it in Kubernetes Annotations section) Default: Off ### Keep_Log (string, optional) {#filterkubernetes-keep_log} -When Keep_Log is disabled, the log field is removed from the incoming message once it has been successfully merged (Merge_Log must be enabled as well). +When Keep_Log is disabled, the log field is removed from the incoming message once it has been successfully merged (Merge_Log must be enabled as well). Default: On @@ -631,7 +651,7 @@ Absolute path to scan for certificate files ### Kube_Meta_Cache_TTL (string, optional) {#filterkubernetes-kube_meta_cache_ttl} -Configurable TTL for K8s cached metadata. By default, it is set to 0 which means TTL for cache entries is disabled and cache entries are evicted at random when capacity is reached. In order to enable this option, you should set the number to a time interval. For example, set this value to 60 or 60s and cache entries which have been created more than 60s will be evicted. +Configurable TTL for K8s cached metadata. By default, it is set to 0 which means TTL for cache entries is disabled and cache entries are evicted at random when capacity is reached. In order to enable this option, you should set the number to a time interval. For example, set this value to 60 or 60s and cache entries which have been created more than 60s will be evicted. Default: 0 @@ -660,19 +680,19 @@ Default: 600 ### Kube_URL (string, optional) {#filterkubernetes-kube_url} -API Server end-point. +API Server end-point. Default: `https://kubernetes.default.svc:443` ### Kubelet_Port (string, optional) {#filterkubernetes-kubelet_port} -kubelet port using for HTTP request, this only works when Use_Kubelet set to On +kubelet port using for HTTP request, this only works when Use_Kubelet set to On Default: 10250 ### Labels (string, optional) {#filterkubernetes-labels} -Include Kubernetes resource labels in the extra metadata. +Include Kubernetes resource labels in the extra metadata. Default: On @@ -695,7 +715,7 @@ When Merge_Log is enabled, the filter tries to assume the log field from the inc ### Merge_Log_Trim (string, optional) {#filterkubernetes-merge_log_trim} -When Merge_Log is enabled, trim (remove possible \n or \r) field values. +When Merge_Log is enabled, trim (remove possible \n or \r) field values. Default: On @@ -711,25 +731,25 @@ Set an alternative Parser to process record Tag and extract pod_name, namespace_ ### tls.debug (string, optional) {#filterkubernetes-tls.debug} -Debug level between 0 (nothing) and 4 (every detail). +Debug level between 0 (nothing) and 4 (every detail). Default: -1 ### tls.verify (string, optional) {#filterkubernetes-tls.verify} -When enabled, turns on certificate validation when connecting to the Kubernetes API server. +When enabled, turns on certificate validation when connecting to the Kubernetes API server. Default: On ### Use_Journal (string, optional) {#filterkubernetes-use_journal} -When enabled, the filter reads logs coming in Journald format. +When enabled, the filter reads logs coming in Journald format. Default: Off ### Use_Kubelet (string, optional) {#filterkubernetes-use_kubelet} -This is an optional feature flag to get metadata information from kubelet instead of calling Kube Server API to enhance the log. +This is an optional feature flag to get metadata information from kubelet instead of calling Kube Server API to enhance the log. Default: Off diff --git a/content/docs/configuration/crds/v1beta1/fluentd_config_types.md b/content/docs/configuration/crds/v1beta1/fluentd_config_types.md new file mode 100644 index 000000000..322573904 --- /dev/null +++ b/content/docs/configuration/crds/v1beta1/fluentd_config_types.md @@ -0,0 +1,48 @@ +--- +title: FluentdConfig +weight: 200 +generated_file: true +--- + +## FluentdConfig + +### (metav1.TypeMeta, required) {#fluentdconfig-} + + +### metadata (metav1.ObjectMeta, optional) {#fluentdconfig-metadata} + + +### spec (FluentdSpec, optional) {#fluentdconfig-spec} + + +### status (FluentdConfigStatus, optional) {#fluentdconfig-status} + + + +## FluentdConfigStatus + +### active (*bool, optional) {#fluentdconfigstatus-active} + + +### logging (string, optional) {#fluentdconfigstatus-logging} + + +### problems ([]string, optional) {#fluentdconfigstatus-problems} + + +### problemsCount (int, optional) {#fluentdconfigstatus-problemscount} + + + +## FluentdConfigList + +### (metav1.TypeMeta, required) {#fluentdconfiglist-} + + +### metadata (metav1.ListMeta, optional) {#fluentdconfiglist-metadata} + + +### items ([]FluentdConfig, required) {#fluentdconfiglist-items} + + + diff --git a/content/docs/configuration/crds/v1beta1/fluentd_types.md b/content/docs/configuration/crds/v1beta1/fluentd_types.md index 376d51d62..7816d8b7c 100644 --- a/content/docs/configuration/crds/v1beta1/fluentd_types.md +++ b/content/docs/configuration/crds/v1beta1/fluentd_types.md @@ -116,6 +116,9 @@ Ignore same log lines [more info]( https://docs.fluentd.org/deployment/logging#i ### nodeSelector (map[string]string, optional) {#fluentdspec-nodeselector} +### pdb (*PdbInput, optional) {#fluentdspec-pdb} + + ### podPriorityClassName (string, optional) {#fluentdspec-podpriorityclassname} @@ -172,10 +175,6 @@ Available in Logging operator version 4.5 and later. Configure sidecar container - - - - ## FluentOutLogrotate ### age (string, optional) {#fluentoutlogrotate-age} @@ -279,3 +278,15 @@ Available in Logging operator version 4.4 and later. Configurable security conte +## PdbInput + +### maxUnavailable (*intstr.IntOrString, optional) {#pdbinput-maxunavailable} + + +### minAvailable (*intstr.IntOrString, optional) {#pdbinput-minavailable} + + +### unhealthyPodEvictionPolicy (*policyv1.UnhealthyPodEvictionPolicyType, optional) {#pdbinput-unhealthypodevictionpolicy} + + + diff --git a/content/docs/configuration/crds/v1beta1/logging_types.md b/content/docs/configuration/crds/v1beta1/logging_types.md index 4c615bdda..94e71d9cd 100644 --- a/content/docs/configuration/crds/v1beta1/logging_types.md +++ b/content/docs/configuration/crds/v1beta1/logging_types.md @@ -15,9 +15,9 @@ Allow configuration of cluster resources from any namespace. Mutually exclusive ### clusterDomain (*string, optional) {#loggingspec-clusterdomain} -Cluster domain name to be used when templating URLs to services . +Cluster domain name to be used when templating URLs to services . -Default: "cluster.local." +Default: "cluster.local." ### configCheck (ConfigCheck, optional) {#loggingspec-configcheck} diff --git a/content/docs/configuration/crds/v1beta1/output_types.md b/content/docs/configuration/crds/v1beta1/output_types.md index 40b55136b..8f3bad24c 100644 --- a/content/docs/configuration/crds/v1beta1/output_types.md +++ b/content/docs/configuration/crds/v1beta1/output_types.md @@ -95,6 +95,12 @@ OutputSpec defines the desired state of Output ### syslog (*output.SyslogOutputConfig, optional) {#outputspec-syslog} +### vmwareLogInsight (*output.VMwareLogInsightOutput, optional) {#outputspec-vmwareloginsight} + + +### vmwareLogIntelligence (*output.VMwareLogIntelligenceOutputConfig, optional) {#outputspec-vmwarelogintelligence} + + ## OutputStatus diff --git a/content/docs/configuration/crds/v1beta1/syslogng_config_types.md b/content/docs/configuration/crds/v1beta1/syslogng_config_types.md new file mode 100644 index 000000000..a3c1e611b --- /dev/null +++ b/content/docs/configuration/crds/v1beta1/syslogng_config_types.md @@ -0,0 +1,48 @@ +--- +title: SyslogNGConfig +weight: 200 +generated_file: true +--- + +## SyslogNGConfig + +### (metav1.TypeMeta, required) {#syslogngconfig-} + + +### metadata (metav1.ObjectMeta, optional) {#syslogngconfig-metadata} + + +### spec (SyslogNGSpec, optional) {#syslogngconfig-spec} + + +### status (SyslogNGConfigStatus, optional) {#syslogngconfig-status} + + + +## SyslogNGConfigStatus + +### active (*bool, optional) {#syslogngconfigstatus-active} + + +### logging (string, optional) {#syslogngconfigstatus-logging} + + +### problems ([]string, optional) {#syslogngconfigstatus-problems} + + +### problemsCount (int, optional) {#syslogngconfigstatus-problemscount} + + + +## SyslogNGConfigList + +### (metav1.TypeMeta, required) {#syslogngconfiglist-} + + +### metadata (metav1.ListMeta, optional) {#syslogngconfiglist-metadata} + + +### items ([]SyslogNGConfig, required) {#syslogngconfiglist-items} + + + diff --git a/content/docs/configuration/crds/v1beta1/syslogng_types.md b/content/docs/configuration/crds/v1beta1/syslogng_types.md index dea5bab57..018d4f8fd 100644 --- a/content/docs/configuration/crds/v1beta1/syslogng_types.md +++ b/content/docs/configuration/crds/v1beta1/syslogng_types.md @@ -72,6 +72,7 @@ Available in Logging operator version 4.5 and later. Create [custom log metrics ## SourceDateParser + Available in Logging operator version 4.5 and later. Parses date automatically from the timestamp registered by the container runtime. diff --git a/content/docs/configuration/plugins/filters/concat.md b/content/docs/configuration/plugins/filters/concat.md index 867b1625d..f8caf55d9 100644 --- a/content/docs/configuration/plugins/filters/concat.md +++ b/content/docs/configuration/plugins/filters/concat.md @@ -23,7 +23,7 @@ The number of seconds after which the last received event log is flushed. If set ### keep_partial_key (bool, optional) {#concat-keep_partial_key} -If true, keep partial_key in concatenated records +If true, keep partial_key in concatenated records Default: False @@ -95,9 +95,9 @@ The label name to handle events caused by timeout. ### use_first_timestamp (bool, optional) {#concat-use_first_timestamp} -Use timestamp of first record when buffer is flushed. +Use timestamp of first record when buffer is flushed. -Default: False +Default: False ### use_partial_cri_logtag (bool, optional) {#concat-use_partial_cri_logtag} @@ -111,6 +111,7 @@ Use partial metadata to concatenate multiple records + ## Example `Concat` filter configurations {{< highlight yaml >}} diff --git a/content/docs/configuration/plugins/filters/dedot.md b/content/docs/configuration/plugins/filters/dedot.md index f5fa597f3..253edb590 100644 --- a/content/docs/configuration/plugins/filters/dedot.md +++ b/content/docs/configuration/plugins/filters/dedot.md @@ -15,16 +15,17 @@ generated_file: true Will cause the plugin to recourse through nested structures (hashes and arrays), and remove dots in those key-names too. -Default: false +Default: false ### de_dot_separator (string, optional) {#dedotfilterconfig-de_dot_separator} -Separator +Separator Default: _ + ## Example `Dedot` filter configurations {{< highlight yaml >}} diff --git a/content/docs/configuration/plugins/filters/detect_exceptions.md b/content/docs/configuration/plugins/filters/detect_exceptions.md index d8e118c0f..1b33217f5 100644 --- a/content/docs/configuration/plugins/filters/detect_exceptions.md +++ b/content/docs/configuration/plugins/filters/detect_exceptions.md @@ -9,7 +9,7 @@ generated_file: true This filter plugin consumes a log stream of JSON objects which contain single-line log messages. If a consecutive sequence of log messages form an exception stack trace, they forwarded as a single, combined JSON object. Otherwise, the input log data is forwarded as is. More info at https://github.com/GoogleCloudPlatform/fluent-plugin-detect-exceptions - > Note: As Tag management is not supported yet, this Plugin is **mutually exclusive** with [Tag normaliser](../tagnormaliser) +> Note: As Tag management is not supported yet, this Plugin is **mutually exclusive** with [Tag normaliser](../tagnormaliser) ## Example output configurations @@ -26,57 +26,58 @@ filters: ### force_line_breaks (bool, optional) {#detectexceptions-force_line_breaks} -Force line breaks between each lines when comibining exception stacks. +Force line breaks between each lines when combining exception stacks. -Default: false +Default: false ### languages ([]string, optional) {#detectexceptions-languages} -Programming languages for which to detect exceptions. +Programming languages for which to detect exceptions. Default: `[]` ### match_tag (string, optional) {#detectexceptions-match_tag} -Tag used in match directive. +Tag used in match directive. Default: `kubernetes.**` ### max_bytes (int, optional) {#detectexceptions-max_bytes} -Maximum number of bytes to flush (0 means no limit) +Maximum number of bytes to flush (0 means no limit) -Default: 0 +Default: 0 ### max_lines (int, optional) {#detectexceptions-max_lines} -Maximum number of lines to flush (0 means no limit) +Maximum number of lines to flush (0 means no limit) -Default: 1000 +Default: 1000 ### message (string, optional) {#detectexceptions-message} -The field which contains the raw message text in the input JSON data. +The field which contains the raw message text in the input JSON data. -Default: "" +Default: "" ### multiline_flush_interval (string, optional) {#detectexceptions-multiline_flush_interval} -The interval of flushing the buffer for multiline format. +The interval of flushing the buffer for multiline format. -Default: nil +Default: nil ### remove_tag_prefix (string, optional) {#detectexceptions-remove_tag_prefix} -The prefix to be removed from the input tag when outputting a record. +The prefix to be removed from the input tag when outputting a record. -Default: kubernetes +Default: kubernetes ### stream (string, optional) {#detectexceptions-stream} -Separate log streams by this field in the input JSON data. +Separate log streams by this field in the input JSON data. + +Default: "" -Default: "" diff --git a/content/docs/configuration/plugins/filters/enhance_k8s.md b/content/docs/configuration/plugins/filters/enhance_k8s.md index 96e2d1626..b309be09d 100644 --- a/content/docs/configuration/plugins/filters/enhance_k8s.md +++ b/content/docs/configuration/plugins/filters/enhance_k8s.md @@ -4,82 +4,81 @@ weight: 200 generated_file: true --- -# [Enhance K8s Metadata](https://github.com/SumoLogic/sumologic-kubernetes-collection/tree/main/fluent-plugin-enhance-k8s-metadata) -## Overview - Fluentd Filter plugin to fetch several metadata for a Pod +## [Enhance K8s Metadata](https://github.com/SumoLogic/sumologic-kubernetes-fluentd/tree/main/fluent-plugin-enhance-k8s-metadata) +Fluentd Filter plugin to fetch several metadata for a Pod ## Configuration ## EnhanceK8s ### api_groups ([]string, optional) {#enhancek8s-api_groups} -Kubernetes resources api groups +Kubernetes resources api groups Default: `["apps/v1", "extensions/v1beta1"]` ### bearer_token_file (string, optional) {#enhancek8s-bearer_token_file} -Bearer token path +Bearer token path -Default: nil +Default: nil ### ca_file (secret.Secret, optional) {#enhancek8s-ca_file} -Kubernetes API CA file +Kubernetes API CA file -Default: nil +Default: nil ### cache_refresh (int, optional) {#enhancek8s-cache_refresh} -Cache refresh +Cache refresh -Default: 60*60 +Default: 60*60 ### cache_refresh_variation (int, optional) {#enhancek8s-cache_refresh_variation} -Cache refresh variation +Cache refresh variation -Default: 60*15 +Default: 60*15 ### cache_size (int, optional) {#enhancek8s-cache_size} -Cache size +Cache size -Default: 1000 +Default: 1000 ### cache_ttl (int, optional) {#enhancek8s-cache_ttl} -Cache TTL +Cache TTL -Default: 60*60*2 +Default: 60*60*2 ### client_cert (secret.Secret, optional) {#enhancek8s-client_cert} -Kubernetes API Client certificate +Kubernetes API Client certificate -Default: nil +Default: nil ### client_key (secret.Secret, optional) {#enhancek8s-client_key} -// Kubernetes API Client certificate key +Kubernetes API Client certificate key -Default: nil +Default: nil ### core_api_versions ([]string, optional) {#enhancek8s-core_api_versions} -Kubernetes core API version (for different Kubernetes versions) +Kubernetes core API version (for different Kubernetes versions) -Default: ['v1'] +Default: ['v1'] ### data_type (string, optional) {#enhancek8s-data_type} -Sumologic data type +Sumo Logic data type -Default: metrics +Default: metrics ### in_namespace_path ([]string, optional) {#enhancek8s-in_namespace_path} -parameters for read/write record +parameters for read/write record Default: `['$.namespace']` @@ -89,27 +88,28 @@ Default: `['$.pod','$.pod_name']` ### kubernetes_url (string, optional) {#enhancek8s-kubernetes_url} -Kubernetes API URL +Kubernetes API URL -Default: nil +Default: nil ### ssl_partial_chain (*bool, optional) {#enhancek8s-ssl_partial_chain} If `ca_file` is for an intermediate CA, or otherwise we do not have the root CA and want to trust the intermediate CA certs we do have, set this to `true` - this corresponds to the openssl s_client -partial_chain flag and X509_V_FLAG_PARTIAL_CHAIN -Default: false +Default: false ### secret_dir (string, optional) {#enhancek8s-secret_dir} -Service account directory +Service account directory -Default: /var/run/secrets/kubernetes.io/serviceaccount +Default: /var/run/secrets/kubernetes.io/serviceaccount ### verify_ssl (*bool, optional) {#enhancek8s-verify_ssl} -Verify SSL +Verify SSL + +Default: true -Default: true diff --git a/content/docs/configuration/plugins/filters/geoip.md b/content/docs/configuration/plugins/filters/geoip.md index 23729fdc0..63e6874c3 100644 --- a/content/docs/configuration/plugins/filters/geoip.md +++ b/content/docs/configuration/plugins/filters/geoip.md @@ -29,9 +29,9 @@ Specify optional geoip database (using bundled GeoLiteCity databse by default) ### geoip_lookup_keys (string, optional) {#geoip-geoip_lookup_keys} -Specify one or more geoip lookup field which has ip address +Specify one or more geoip lookup field which has ip address -Default: host +Default: host ### records ([]Record, optional) {#geoip-records} @@ -46,6 +46,7 @@ Default: true + ## Example `GeoIP` filter configurations {{< highlight yaml >}} diff --git a/content/docs/configuration/plugins/filters/grep.md b/content/docs/configuration/plugins/filters/grep.md index 105ab54e0..19f0564db 100644 --- a/content/docs/configuration/plugins/filters/grep.md +++ b/content/docs/configuration/plugins/filters/grep.md @@ -50,6 +50,7 @@ Pattern expression to evaluate + ## Example `Regexp` filter configurations {{< highlight yaml >}} @@ -99,6 +100,7 @@ Pattern expression to evaluate + ## Example `Exclude` filter configurations {{< highlight yaml >}} @@ -173,16 +175,16 @@ spec: Fluentd config result: {{< highlight xml >}} - - - key first - pattern /^5\d\d$/ - - - key second - pattern /\.css$/ - - + + + key first + pattern /^5\d\d$/ + + + key second + pattern /\.css$/ + + {{}} @@ -228,16 +230,16 @@ spec: Fluentd config result: {{< highlight xml >}} - - - key first - pattern /^5\d\d$/ - - - key second - pattern /\.css$/ - - + + + key first + pattern /^5\d\d$/ + + + key second + pattern /\.css$/ + + {{}} diff --git a/content/docs/configuration/plugins/filters/kube_events_timestamp.md b/content/docs/configuration/plugins/filters/kube_events_timestamp.md index 656e62da7..0d7db994a 100644 --- a/content/docs/configuration/plugins/filters/kube_events_timestamp.md +++ b/content/docs/configuration/plugins/filters/kube_events_timestamp.md @@ -13,15 +13,16 @@ generated_file: true ### mapped_time_key (string, optional) {#kubeeventstimestampconfig-mapped_time_key} -Added time field name +Added time field name -Default: triggerts +Default: triggerts ### timestamp_fields ([]string, optional) {#kubeeventstimestampconfig-timestamp_fields} -Time field names in order of relevance +Time field names in order of relevance + +Default: event.eventTime, event.lastTimestamp, event.firstTimestamp -Default: event.eventTime, event.lastTimestamp, event.firstTimestamp diff --git a/content/docs/configuration/plugins/filters/parser.md b/content/docs/configuration/plugins/filters/parser.md index d01a51357..da5ae093c 100644 --- a/content/docs/configuration/plugins/filters/parser.md +++ b/content/docs/configuration/plugins/filters/parser.md @@ -71,9 +71,9 @@ Only available when using type: grok, multiline_grok. File that includes custom ### delimiter (string, optional) {#parse-section-delimiter} -Only available when using type: ltsv +Only available when using type: ltsv -Default: "\t" +Default: "\t" ### delimiter_pattern (string, optional) {#parse-section-delimiter_pattern} @@ -132,15 +132,15 @@ Names for fields on each line. (seperated by coma) ### label_delimiter (string, optional) {#parse-section-label_delimiter} -Only available when using type: ltsv +Only available when using type: ltsv -Default: ":" +Default: ":" ### local_time (bool, optional) {#parse-section-local_time} -If true, use local time. Otherwise, UTC is used. This is exclusive with utc. +If true, use local time. Otherwise, UTC is used. This is exclusive with utc. -Default: true +Default: true ### multiline ([]string, optional) {#parse-section-multiline} @@ -179,15 +179,15 @@ Specify time field for event time. If the event doesn't have this field, current ### time_type (string, optional) {#parse-section-time_type} -Parse/format value according to this type available values: float, unixtime, string +Parse/format value according to this type available values: float, unixtime, string -Default: string +Default: string ### timezone (string, optional) {#parse-section-timezone} -Use specified timezone. one can parse/format the time value in the specified timezone. +Use specified timezone. one can parse/format the time value in the specified timezone. -Default: nil +Default: nil ### type (string, optional) {#parse-section-type} @@ -201,9 +201,9 @@ Types casting the fields to proper types example: field1:type, field2:type ### utc (bool, optional) {#parse-section-utc} -If true, use UTC. Otherwise, local time is used. This is exclusive with localtime +If true, use UTC. Otherwise, local time is used. This is exclusive with localtime -Default: false +Default: false ## Parse Section (single) @@ -284,15 +284,15 @@ Specify time field for event time. If the event doesn't have this field, current ### time_type (string, optional) {#parse-section-time_type} -Parse/format value according to this type available values: float, unixtime, string +Parse/format value according to this type available values: float, unixtime, string -Default: string +Default: string ### timezone (string, optional) {#parse-section-timezone} -Use specified timezone. one can parse/format the time value in the specified timezone. +Use specified timezone. one can parse/format the time value in the specified timezone. -Default: nil +Default: nil ### type (string, optional) {#parse-section-type} @@ -306,9 +306,9 @@ Types casting the fields to proper types example: field1:type, field2:type ### utc (bool, optional) {#parse-section-utc} -If true, use UTC. Otherwise, local time is used. This is exclusive with localtime +If true, use UTC. Otherwise, local time is used. This is exclusive with localtime -Default: false +Default: false ## Grok Section @@ -346,6 +346,7 @@ Use specified timezone. one can parse/format the time value in the specified tim + ## Example `Parser` filter configurations {{< highlight yaml >}} diff --git a/content/docs/configuration/plugins/filters/prometheus.md b/content/docs/configuration/plugins/filters/prometheus.md index 60e7838b7..490e19ac5 100644 --- a/content/docs/configuration/plugins/filters/prometheus.md +++ b/content/docs/configuration/plugins/filters/prometheus.md @@ -53,6 +53,8 @@ Metrics type [counter](https://github.com/fluent/fluent-plugin-prometheus#counte + + ## Example `Prometheus` filter configurations {{< highlight yaml >}} diff --git a/content/docs/configuration/plugins/filters/record_modifier.md b/content/docs/configuration/plugins/filters/record_modifier.md index 6a6cfe886..7e36a1893 100644 --- a/content/docs/configuration/plugins/filters/record_modifier.md +++ b/content/docs/configuration/plugins/filters/record_modifier.md @@ -8,6 +8,14 @@ generated_file: true ## Overview Modify each event record. + + ## Configuration ## RecordModifier @@ -23,7 +31,7 @@ Prepare values for filtering in configure phase. Prepared values can be used in ### records ([]Record, optional) {#recordmodifier-records} -Add records docs at: https://github.com/repeatedly/fluent-plugin-record-modifier Records are represented as maps: `key: value` +Add records. Records are represented as maps: `key: value`. For details, see [https://github.com/repeatedly/fluent-plugin-record-modifier](https://github.com/repeatedly/fluent-plugin-record-modifier). ### remove_keys (string, optional) {#recordmodifier-remove_keys} @@ -43,6 +51,7 @@ This is exclusive with remove_keys + ## Example `Record Modifier` filter configurations {{< highlight yaml >}} diff --git a/content/docs/configuration/plugins/filters/record_transformer.md b/content/docs/configuration/plugins/filters/record_transformer.md index 3c500052c..cb74d5d7c 100644 --- a/content/docs/configuration/plugins/filters/record_transformer.md +++ b/content/docs/configuration/plugins/filters/record_transformer.md @@ -13,15 +13,15 @@ generated_file: true ### auto_typecast (bool, optional) {#recordtransformer-auto_typecast} -Use original value type. +Use original value type. -Default: true +Default: true ### enable_ruby (bool, optional) {#recordtransformer-enable_ruby} When set to true, the full Ruby syntax is enabled in the `${...}` expression. -Default: false +Default: false ### keep_keys (string, optional) {#recordtransformer-keep_keys} @@ -40,9 +40,9 @@ A comma-delimited list of keys to delete ### renew_record (bool, optional) {#recordtransformer-renew_record} -Create new Hash to transform incoming data +Create new Hash to transform incoming data -Default: false +Default: false ### renew_time_key (string, optional) {#recordtransformer-renew_time_key} @@ -51,6 +51,7 @@ Specify field name of the record to overwrite the time of events. Its value must + ## Example `Record Transformer` filter configurations {{< highlight yaml >}} @@ -68,8 +69,6 @@ spec: - demo-output {{}} - - Fluentd config result: {{< highlight xml >}} diff --git a/content/docs/configuration/plugins/filters/stdout.md b/content/docs/configuration/plugins/filters/stdout.md index 618f3cace..b18206696 100644 --- a/content/docs/configuration/plugins/filters/stdout.md +++ b/content/docs/configuration/plugins/filters/stdout.md @@ -18,6 +18,7 @@ This is the option of stdout format. + ## Example `StdOut` filter configurations {{< highlight yaml >}} diff --git a/content/docs/configuration/plugins/filters/sumologic.md b/content/docs/configuration/plugins/filters/sumologic.md index 9f2803026..7c74ec032 100644 --- a/content/docs/configuration/plugins/filters/sumologic.md +++ b/content/docs/configuration/plugins/filters/sumologic.md @@ -13,159 +13,160 @@ More info at https://github.com/SumoLogic/sumologic-kubernetes-collection ### collector_key_name (string, optional) {#sumologic-collector_key_name} -CollectorKey Name +CollectorKey Name Default: `_collector` ### collector_value (string, optional) {#sumologic-collector_value} -Collector Value +Collector Value -Default: "undefined" +Default: "undefined" ### exclude_container_regex (string, optional) {#sumologic-exclude_container_regex} -Exclude Container Regex +Exclude Container Regex -Default: "" +Default: "" ### exclude_facility_regex (string, optional) {#sumologic-exclude_facility_regex} -Exclude Facility Regex +Exclude Facility Regex -Default: "" +Default: "" ### exclude_host_regex (string, optional) {#sumologic-exclude_host_regex} -Exclude Host Regex +Exclude Host Regex -Default: "" +Default: "" ### exclude_namespace_regex (string, optional) {#sumologic-exclude_namespace_regex} -Exclude Namespace Regex +Exclude Namespace Regex -Default: "" +Default: "" ### exclude_pod_regex (string, optional) {#sumologic-exclude_pod_regex} -Exclude Pod Regex +Exclude Pod Regex -Default: "" +Default: "" ### exclude_priority_regex (string, optional) {#sumologic-exclude_priority_regex} -Exclude Priority Regex +Exclude Priority Regex -Default: "" +Default: "" ### exclude_unit_regex (string, optional) {#sumologic-exclude_unit_regex} -Exclude Unit Regex +Exclude Unit Regex -Default: "" +Default: "" ### log_format (string, optional) {#sumologic-log_format} -Log Format +Log Format -Default: json +Default: json ### source_category (string, optional) {#sumologic-source_category} -Source Category +Source Category Default: `%{namespace}/%{pod_name}` ### source_category_key_name (string, optional) {#sumologic-source_category_key_name} -Source CategoryKey Name +Source CategoryKey Name Default: `_sourceCategory` ### source_category_prefix (string, optional) {#sumologic-source_category_prefix} -Source Category Prefix +Source Category Prefix -Default: kubernetes/ +Default: kubernetes/ ### source_category_replace_dash (string, optional) {#sumologic-source_category_replace_dash} -Source Category Replace Dash +Source Category Replace Dash -Default: "/" +Default: "/" ### source_host (string, optional) {#sumologic-source_host} -Source Host +Source Host -Default: "" +Default: "" ### source_host_key_name (string, optional) {#sumologic-source_host_key_name} -Source HostKey Name +Source HostKey Name Default: `_sourceHost` ### source_name (string, optional) {#sumologic-source_name} -Source Name +Source Name Default: `%{namespace}.%{pod}.%{container}` ### source_name_key_name (string, optional) {#sumologic-source_name_key_name} -Source NameKey Name +Source NameKey Name Default: `_sourceName` ### tracing_annotation_prefix (string, optional) {#sumologic-tracing_annotation_prefix} -Tracing Annotation Prefix +Tracing Annotation Prefix Default: `pod_annotation_` ### tracing_container_name (string, optional) {#sumologic-tracing_container_name} -Tracing Container Name +Tracing Container Name -Default: "container_name" +Default: "container_name" ### tracing_format (*bool, optional) {#sumologic-tracing_format} -Tracing Format +Tracing Format -Default: false +Default: false ### tracing_host (string, optional) {#sumologic-tracing_host} -Tracing Host +Tracing Host -Default: "hostname" +Default: "hostname" ### tracing_label_prefix (string, optional) {#sumologic-tracing_label_prefix} -Tracing Label Prefix +Tracing Label Prefix Default: `pod_label_` ### tracing_namespace (string, optional) {#sumologic-tracing_namespace} -Tracing Namespace +Tracing Namespace -Default: "namespace" +Default: "namespace" ### tracing_pod (string, optional) {#sumologic-tracing_pod} -Tracing Pod +Tracing Pod -Default: "pod" +Default: "pod" ### tracing_pod_id (string, optional) {#sumologic-tracing_pod_id} -Tracing Pod ID +Tracing Pod ID + +Default: "pod_id" -Default: "pod_id" diff --git a/content/docs/configuration/plugins/filters/tagnormaliser.md b/content/docs/configuration/plugins/filters/tagnormaliser.md index 897c117f7..8ec51577b 100644 --- a/content/docs/configuration/plugins/filters/tagnormaliser.md +++ b/content/docs/configuration/plugins/filters/tagnormaliser.md @@ -31,12 +31,13 @@ Default: ${namespace_name}.${pod_name}.${container_name} ### match_tag (string, optional) {#tag-normaliser-parameters-match_tag} -Tag used in match directive. +Tag used in match directive. Default: `kubernetes.**` + ## Example `Parser` filter configurations {{< highlight yaml >}} diff --git a/content/docs/configuration/plugins/filters/throttle.md b/content/docs/configuration/plugins/filters/throttle.md index 97a125376..4d01a9aac 100644 --- a/content/docs/configuration/plugins/filters/throttle.md +++ b/content/docs/configuration/plugins/filters/throttle.md @@ -13,39 +13,40 @@ generated_file: true ### group_bucket_limit (int, optional) {#throttle-group_bucket_limit} -Maximum number logs allowed per groups over the period of group_bucket_period_s +Maximum number logs allowed per groups over the period of group_bucket_period_s -Default: 6000 +Default: 6000 ### group_bucket_period_s (int, optional) {#throttle-group_bucket_period_s} -This is the period of of time over which group_bucket_limit applies +This is the period of of time over which group_bucket_limit applies -Default: 60 +Default: 60 ### group_drop_logs (bool, optional) {#throttle-group_drop_logs} -When a group reaches its limit, logs will be dropped from further processing if this value is true +When a group reaches its limit, logs will be dropped from further processing if this value is true -Default: true +Default: true ### group_key (string, optional) {#throttle-group_key} -Used to group logs. Groups are rate limited independently +Used to group logs. Groups are rate limited independently -Default: kubernetes.container_name +Default: kubernetes.container_name ### group_reset_rate_s (int, optional) {#throttle-group_reset_rate_s} -After a group has exceeded its bucket limit, logs are dropped until the rate per second falls below or equal to group_reset_rate_s. +After a group has exceeded its bucket limit, logs are dropped until the rate per second falls below or equal to group_reset_rate_s. -Default: group_bucket_limit/group_bucket_period_s +Default: group_bucket_limit/group_bucket_period_s ### group_warning_delay_s (int, optional) {#throttle-group_warning_delay_s} -When a group reaches its limit and as long as it is not reset, a warning message with the current log rate of the group is emitted repeatedly. This is the delay between every repetition. +When a group reaches its limit and as long as it is not reset, a warning message with the current log rate of the group is emitted repeatedly. This is the delay between every repetition. + +Default: 10 seconds -Default: 10 seconds diff --git a/content/docs/configuration/plugins/filters/useragent.md b/content/docs/configuration/plugins/filters/useragent.md index c1c25b9e9..0fcf3502a 100644 --- a/content/docs/configuration/plugins/filters/useragent.md +++ b/content/docs/configuration/plugins/filters/useragent.md @@ -38,6 +38,7 @@ Default: ua + ## Example `UserAgent` filter configurations {{< highlight yaml >}} diff --git a/content/docs/configuration/plugins/outputs/azurestore.md b/content/docs/configuration/plugins/outputs/azurestore.md index 37390f26a..d34037c57 100644 --- a/content/docs/configuration/plugins/outputs/azurestore.md +++ b/content/docs/configuration/plugins/outputs/azurestore.md @@ -14,13 +14,13 @@ More info at https://github.com/microsoft/fluent-plugin-azure-storage-append-blo ### auto_create_container (bool, optional) {#output-config-auto_create_container} -Automatically create container if not exists +Automatically create container if not exists -Default: true +Default: true ### azure_cloud (string, optional) {#output-config-azure_cloud} -Available in Logging operator version 4.5 and later. Azure Cloud to use, for example, AzurePublicCloud, AzureChinaCloud, AzureGermanCloud, AzureUSGovernmentCloud, AZURESTACKCLOUD (in uppercase) This field is supported only if the fluentd plugin honors it, for example, https://github.com/elsesiy/fluent-plugin-azure-storage-append-blob-lts +Available in Logging operator version 4.5 and later. Azure Cloud to use, for example, AzurePublicCloud, AzureChinaCloud, AzureGermanCloud, AzureUSGovernmentCloud, AZURESTACKCLOUD (in uppercase). This field is supported only if the fluentd plugin honors it, for example, https://github.com/elsesiy/fluent-plugin-azure-storage-append-blob-lts ### azure_container (string, required) {#output-config-azure_container} @@ -35,9 +35,9 @@ Azure Instance Metadata Service API Version ### azure_object_key_format (string, optional) {#output-config-azure_object_key_format} -Object key format +Object key format -Default: %{path}%{time_slice}_%{index}.%{file_extension} +Default: %{path}%{time_slice}_%{index}.%{file_extension} ### azure_storage_access_key (*secret.Secret, optional) {#output-config-azure_storage_access_key} diff --git a/content/docs/configuration/plugins/outputs/buffer.md b/content/docs/configuration/plugins/outputs/buffer.md index c05fe445a..2a93cbec9 100644 --- a/content/docs/configuration/plugins/outputs/buffer.md +++ b/content/docs/configuration/plugins/outputs/buffer.md @@ -80,9 +80,9 @@ How output plugin behaves when its buffer queue is full throw_exception: raise e ### path (string, optional) {#buffer-path} -The path where buffer chunks are stored. The '*' is replaced with random characters. It's highly recommended to leave this default. +The path where buffer chunks are stored. The '*' is replaced with random characters. It's highly recommended to leave this default. -Default: operator generated +Default: operator generated ### queue_limit_length (int, optional) {#buffer-queue_limit_length} @@ -142,9 +142,9 @@ Seconds to wait before next retry to flush, or constant factor of exponential ba ### tags (*string, optional) {#buffer-tags} -When tag is specified as buffer chunk key, output plugin writes events into chunks separately per tags. +When tag is specified as buffer chunk key, output plugin writes events into chunks separately per tags. -Default: tag,time +Default: tag,time ### timekey (string, required) {#buffer-timekey} diff --git a/content/docs/configuration/plugins/outputs/cloudwatch.md b/content/docs/configuration/plugins/outputs/cloudwatch.md index 580d69cb7..eb281eedd 100644 --- a/content/docs/configuration/plugins/outputs/cloudwatch.md +++ b/content/docs/configuration/plugins/outputs/cloudwatch.md @@ -39,9 +39,9 @@ cloudwatch: ### auto_create_stream (bool, optional) {#output-config-auto_create_stream} -Create log group and stream automatically. +Create log group and stream automatically. -Default: false +Default: false ### aws_key_id (*secret.Secret, optional) {#output-config-aws_key_id} @@ -50,9 +50,9 @@ AWS access key id [Secret](../secret/) ### aws_instance_profile_credentials_retries (int, optional) {#output-config-aws_instance_profile_credentials_retries} -Instance Profile Credentials call retries +Instance Profile Credentials call retries -Default: nil +Default: nil ### aws_sec_key (*secret.Secret, optional) {#output-config-aws_sec_key} @@ -66,9 +66,9 @@ The role ARN to assume when using cross-account sts authentication ### aws_sts_session_name (string, optional) {#output-config-aws_sts_session_name} -The session name to use with sts authentication +The session name to use with sts authentication -Default: 'fluentd' +Default: 'fluentd' ### aws_use_sts (bool, optional) {#output-config-aws_use_sts} @@ -82,9 +82,9 @@ Enable AssumeRoleCredentials to authenticate, rather than the default credential ### concurrency (int, optional) {#output-config-concurrency} -Use to set the number of threads pushing data to CloudWatch. +Use to set the number of threads pushing data to CloudWatch. -Default: 1 +Default: 1 ### endpoint (string, optional) {#output-config-endpoint} @@ -103,9 +103,9 @@ Use to set an optional HTTP proxy ### include_time_key (bool, optional) {#output-config-include_time_key} -Include time key as part of the log entry +Include time key as part of the log entry -Default: UTC +Default: UTC ### json_handler (string, optional) {#output-config-json_handler} @@ -139,9 +139,9 @@ Specified field of records as log group name ### log_rejected_request (string, optional) {#output-config-log_rejected_request} -Output rejected_log_events_info request log. +Output rejected_log_events_info request log. -Default: false +Default: false ### log_stream_name (string, optional) {#output-config-log_stream_name} @@ -155,9 +155,9 @@ Specified field of records as log stream name ### max_events_per_batch (int, optional) {#output-config-max_events_per_batch} -Maximum number of events to send at once +Maximum number of events to send at once -Default: 10000 +Default: 10000 ### max_message_length (int, optional) {#output-config-max_message_length} diff --git a/content/docs/configuration/plugins/outputs/datadog.md b/content/docs/configuration/plugins/outputs/datadog.md index 033796903..617919c67 100644 --- a/content/docs/configuration/plugins/outputs/datadog.md +++ b/content/docs/configuration/plugins/outputs/datadog.md @@ -27,9 +27,9 @@ spec: ### api_key (*secret.Secret, required) {#output-config-api_key} -This parameter is required in order to authenticate your fluent agent. +docLink:"Secret,../secret/" +This parameter is required in order to authenticate your fluent agent. -Default: nil +Default: nil ### buffer (*Buffer, optional) {#output-config-buffer} @@ -38,75 +38,75 @@ Default: nil ### compression_level (string, optional) {#output-config-compression_level} -Set the log compression level for HTTP (1 to 9, 9 being the best ratio) +Set the log compression level for HTTP (1 to 9, 9 being the best ratio) -Default: "6" +Default: "6" ### dd_hostname (string, optional) {#output-config-dd_hostname} -Used by Datadog to identify the host submitting the logs. +Used by Datadog to identify the host submitting the logs. -Default: "hostname -f" +Default: "hostname -f" ### dd_source (string, optional) {#output-config-dd_source} -This tells Datadog what integration it is +This tells Datadog what integration it is -Default: nil +Default: nil ### dd_sourcecategory (string, optional) {#output-config-dd_sourcecategory} -Multiple value attribute. Can be used to refine the source attribute +Multiple value attribute. Can be used to refine the source attribute -Default: nil +Default: nil ### dd_tags (string, optional) {#output-config-dd_tags} -Custom tags with the following format "key1:value1, key2:value2" +Custom tags with the following format "key1:value1, key2:value2" -Default: nil +Default: nil ### host (string, optional) {#output-config-host} -Proxy endpoint when logs are not directly forwarded to Datadog +Proxy endpoint when logs are not directly forwarded to Datadog -Default: "http-intake.logs.datadoghq.com" +Default: "http-intake.logs.datadoghq.com" ### include_tag_key (bool, optional) {#output-config-include_tag_key} -Automatically include the Fluentd tag in the record. +Automatically include the Fluentd tag in the record. -Default: false +Default: false ### max_backoff (string, optional) {#output-config-max_backoff} -The maximum time waited between each retry in seconds +The maximum time waited between each retry in seconds -Default: "30" +Default: "30" ### max_retries (string, optional) {#output-config-max_retries} -The number of retries before the output plugin stops. Set to -1 for unlimited retries +The number of retries before the output plugin stops. Set to -1 for unlimited retries -Default: "-1" +Default: "-1" ### no_ssl_validation (bool, optional) {#output-config-no_ssl_validation} -Disable SSL validation (useful for proxy forwarding) +Disable SSL validation (useful for proxy forwarding) -Default: false +Default: false ### port (string, optional) {#output-config-port} -Proxy port when logs are not directly forwarded to Datadog and ssl is not used +Proxy port when logs are not directly forwarded to Datadog and ssl is not used -Default: "80" +Default: "80" ### service (string, optional) {#output-config-service} -Used by Datadog to correlate between logs, traces and metrics. +Used by Datadog to correlate between logs, traces and metrics. -Default: nil +Default: nil ### slow_flush_log_threshold (string, optional) {#output-config-slow_flush_log_threshold} @@ -115,44 +115,44 @@ The threshold for chunk flush performance check. Parameter type is float, not ti ### ssl_port (string, optional) {#output-config-ssl_port} -Port used to send logs over a SSL encrypted connection to Datadog. If use_http is disabled, use 10516 for the US region and 443 for the EU region. +Port used to send logs over a SSL encrypted connection to Datadog. If use_http is disabled, use 10516 for the US region and 443 for the EU region. -Default: "443" +Default: "443" ### tag_key (string, optional) {#output-config-tag_key} -Where to store the Fluentd tag. +Where to store the Fluentd tag. -Default: "tag" +Default: "tag" ### timestamp_key (string, optional) {#output-config-timestamp_key} -Name of the attribute which will contain timestamp of the log event. If nil, timestamp attribute is not added. +Name of the attribute which will contain timestamp of the log event. If nil, timestamp attribute is not added. -Default: "@timestamp" +Default: "@timestamp" ### use_compression (bool, optional) {#output-config-use_compression} -Enable log compression for HTTP +Enable log compression for HTTP -Default: true +Default: true ### use_http (bool, optional) {#output-config-use_http} -Enable HTTP forwarding. If you disable it, make sure to change the port to 10514 or ssl_port to 10516 +Enable HTTP forwarding. If you disable it, make sure to change the port to 10514 or ssl_port to 10516 -Default: true +Default: true ### use_json (bool, optional) {#output-config-use_json} -Event format, if true, the event is sent in json format. Othwerwise, in plain text. +Event format, if true, the event is sent in json format. Othwerwise, in plain text. -Default: true +Default: true ### use_ssl (bool, optional) {#output-config-use_ssl} -If true, the agent initializes a secure connection to Datadog. In clear TCP otherwise. +If true, the agent initializes a secure connection to Datadog. In clear TCP otherwise. -Default: true +Default: true diff --git a/content/docs/configuration/plugins/outputs/elasticsearch.md b/content/docs/configuration/plugins/outputs/elasticsearch.md index 3ec9692ad..963662486 100644 --- a/content/docs/configuration/plugins/outputs/elasticsearch.md +++ b/content/docs/configuration/plugins/outputs/elasticsearch.md @@ -10,21 +10,21 @@ generated_file: true Example Deployment: [Save all logs to Elasticsearch](../../../../quickstarts/es-nginx/) - ## Example output configurations - - ```yaml - spec: - elasticsearch: - host: elasticsearch-elasticsearch-cluster.default.svc.cluster.local - port: 9200 - scheme: https - ssl_verify: false - ssl_version: TLSv1_2 - buffer: - timekey: 1m - timekey_wait: 30s - timekey_use_utc: true - ``` +## Example output configurations + +```yaml +spec: + elasticsearch: + host: elasticsearch-elasticsearch-cluster.default.svc.cluster.local + port: 9200 + scheme: https + ssl_verify: false + ssl_version: TLSv1_2 + buffer: + timekey: 1m + timekey_wait: 30s + timekey_use_utc: true +``` ## Configuration @@ -39,9 +39,9 @@ api_key parameter adds authentication header. ### application_name (*string, optional) {#elasticsearch-application_name} -Specify the application name for the rollover index to be created. +Specify the application name for the rollover index to be created. -Default: default +Default: default ### buffer (*Buffer, optional) {#elasticsearch-buffer} @@ -50,37 +50,21 @@ Default: default ### bulk_message_request_threshold (string, optional) {#elasticsearch-bulk_message_request_threshold} -Configure bulk_message request splitting threshold size. Default value is 20MB. (20 * 1024 * 1024) If you specify this size as negative number, bulk_message request splitting feature will be disabled. +Configure bulk_message request splitting threshold size. Default value is 20MB. (20 * 1024 * 1024) If you specify this size as negative number, bulk_message request splitting feature will be disabled. -Default: 20MB - -### ca_file (*secret.Secret, optional) {#elasticsearch-ca_file} - -CA certificate - -### client_cert (*secret.Secret, optional) {#elasticsearch-client_cert} - -Client certificate - -### client_key (*secret.Secret, optional) {#elasticsearch-client_key} - -Client certificate key - -### client_key_pass (*secret.Secret, optional) {#elasticsearch-client_key_pass} - -Client key password +Default: 20MB ### content_type (string, optional) {#elasticsearch-content_type} -With content_type application/x-ndjson, elasticsearch plugin adds application/x-ndjson as Content-Profile in payload. +With content_type application/x-ndjson, elasticsearch plugin adds application/x-ndjson as Content-Profile in payload. -Default: application/json +Default: application/json ### custom_headers (string, optional) {#elasticsearch-custom_headers} -This parameter adds additional headers to request. Example: {"token":"secret"} +This parameter adds additional headers to request. Example: {"token":"secret"} -Default: {} +Default: {} ### customize_template (string, optional) {#elasticsearch-customize_template} @@ -94,9 +78,9 @@ Use @type elasticsearch_data_stream ### data_stream_ilm_name (string, optional) {#elasticsearch-data_stream_ilm_name} -Specify an existing ILM policy to be applied to the data stream. If not present, either the specified template's or a new ILM default policy is applied. Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream +Specify an existing ILM policy to be applied to the data stream. If not present, either the specified template's or a new ILM default policy is applied. Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream -Default: data_stream_name +Default: data_stream_name ### data_stream_ilm_policy (string, optional) {#elasticsearch-data_stream_ilm_policy} @@ -115,15 +99,15 @@ You can specify Elasticsearch data stream name by this parameter. This parameter ### data_stream_template_name (string, optional) {#elasticsearch-data_stream_template_name} -Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream. Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream +Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream. Further details here https://github.com/uken/fluent-plugin-elasticsearch#configuration---elasticsearch-output-data-stream -Default: data_stream_name +Default: data_stream_name ### default_elasticsearch_version (string, optional) {#elasticsearch-default_elasticsearch_version} -This parameter changes that ES plugin assumes default Elasticsearch version. +This parameter changes that ES plugin assumes default Elasticsearch version. -Default: 5 +Default: 5 ### deflector_alias (string, optional) {#elasticsearch-deflector_alias} @@ -176,9 +160,9 @@ You can specify multiple Elasticsearch hosts with separator ",". If you specify ### http_backend (string, optional) {#elasticsearch-http_backend} -With http_backend typhoeus, elasticsearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive. +With http_backend typhoeus, elasticsearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive. -Default: excon +Default: excon ### id_key (string, optional) {#elasticsearch-id_key} @@ -212,75 +196,75 @@ With this option set to true, Fluentd manifests the index name in the request UR ### include_tag_key (bool, optional) {#elasticsearch-include_tag_key} -This will add the Fluentd tag in the JSON record. +This will add the Fluentd tag in the JSON record. -Default: false +Default: false ### include_timestamp (bool, optional) {#elasticsearch-include_timestamp} -Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in Elasticsearch and utilize the rollover API. +Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in Elasticsearch and utilize the rollover API. -Default: false +Default: false ### index_date_pattern (*string, optional) {#elasticsearch-index_date_pattern} -Specify this to override the index date pattern for creating a rollover index. +Specify this to override the index date pattern for creating a rollover index. -Default: now/d +Default: now/d ### index_name (string, optional) {#elasticsearch-index_name} -The index name to write events to +The index name to write events to Default: fluentd ### index_prefix (string, optional) {#elasticsearch-index_prefix} -Specify the index prefix for the rollover index to be created. +Specify the index prefix for the rollover index to be created. -Default: logstash +Default: logstash ### log_es_400_reason (bool, optional) {#elasticsearch-log_es_400_reason} -By default, the error logger won't record the reason for a 400 error from the Elasticsearch API unless you set log_level to debug. However, this results in a lot of log spam, which isn't desirable if all you want is the 400 error reasons. You can set this true to capture the 400 error reasons without all the other debug logs. +By default, the error logger won't record the reason for a 400 error from the Elasticsearch API unless you set log_level to debug. However, this results in a lot of log spam, which isn't desirable if all you want is the 400 error reasons. You can set this true to capture the 400 error reasons without all the other debug logs. -Default: false +Default: false ### logstash_dateformat (string, optional) {#elasticsearch-logstash_dateformat} -Set the Logstash date format. +Set the Logstash date format. -Default: %Y.%m.%d +Default: %Y.%m.%d ### logstash_format (bool, optional) {#elasticsearch-logstash_format} -Enable Logstash log format. +Enable Logstash log format. -Default: false +Default: false ### logstash_prefix (string, optional) {#elasticsearch-logstash_prefix} -Set the Logstash prefix. +Set the Logstash prefix. -Default: logstash +Default: logstash ### logstash_prefix_separator (string, optional) {#elasticsearch-logstash_prefix_separator} -Set the Logstash prefix separator. +Set the Logstash prefix separator. -Default: - +Default: - ### max_retry_get_es_version (string, optional) {#elasticsearch-max_retry_get_es_version} -You can specify the number of times to retry fetching the Elasticsearch version.(default: 15) +You can specify the number of times to retry fetching the Elasticsearch version. -Default: 15 +Default: 15 ### max_retry_putting_template (string, optional) {#elasticsearch-max_retry_putting_template} -You can specify times of retry putting template. +You can specify times of retry putting template. -Default: 10 +Default: 10 ### password (*secret.Secret, optional) {#elasticsearch-password} @@ -301,19 +285,19 @@ This param is to set a pipeline id of your elasticsearch to be added into the re You can specify the Elasticsearch port using this parameter. -Default: 9200 +Default: 9200 ### prefer_oj_serializer (bool, optional) {#elasticsearch-prefer_oj_serializer} -With default behavior, Elasticsearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, Elasticsearch client uses Oj as JSON encoder/decoder. +With default behavior, Elasticsearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, Elasticsearch client uses Oj as JSON encoder/decoder. -Default: false +Default: false ### reconnect_on_error (bool, optional) {#elasticsearch-reconnect_on_error} -Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on "host unreachable exceptions". We recommended to set this true in the presence of elasticsearch shield. +Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on "host unreachable exceptions". We recommended to set this true in the presence of elasticsearch shield. -Default: false +Default: false ### reload_after (string, optional) {#elasticsearch-reload_after} @@ -330,7 +314,7 @@ Default: true Indicates that the elasticsearch-transport will try to reload the nodes addresses if there is a failure while making the request. This can be useful to quickly remove a dead node from the list of addresses. -Default: false +Default: false ### remove_keys (string, optional) {#elasticsearch-remove_keys} @@ -349,15 +333,15 @@ This setting allows remove_keys_on_update to be configured with a key in each re ### request_timeout (string, optional) {#elasticsearch-request_timeout} -You can specify HTTP request timeout. +You can specify HTTP request timeout. -Default: 5s +Default: 5s ### resurrect_after (string, optional) {#elasticsearch-resurrect_after} -You can set in the elasticsearch-transport how often dead connections from the elasticsearch-transport's pool will be resurrected. +You can set in the elasticsearch-transport how often dead connections from the elasticsearch-transport's pool will be resurrected. -Default: 60s +Default: 60s ### retry_tag (string, optional) {#elasticsearch-retry_tag} @@ -366,20 +350,40 @@ This setting allows custom routing of messages in response to bulk request failu ### rollover_index (bool, optional) {#elasticsearch-rollover_index} -Specify this as true when an index with rollover capability needs to be created. https://github.com/uken/fluent-plugin-elasticsearch#rollover_index +Specify this as true when an index with rollover capability needs to be created. https://github.com/uken/fluent-plugin-elasticsearch#rollover_index -Default: false +Default: false ### routing_key (string, optional) {#elasticsearch-routing_key} Similar to parent_key config, will add _routing into elasticsearch command if routing_key is set and the field does exist in input event. +### ca_file (*secret.Secret, optional) {#elasticsearch-ca_file} + +CA certificate + + +### client_cert (*secret.Secret, optional) {#elasticsearch-client_cert} + +Client certificate + + +### client_key (*secret.Secret, optional) {#elasticsearch-client_key} + +Client certificate key + + +### client_key_pass (*secret.Secret, optional) {#elasticsearch-client_key_pass} + +Client key password + + ### scheme (string, optional) {#elasticsearch-scheme} -Connection scheme +Connection scheme -Default: http +Default: http ### slow_flush_log_threshold (string, optional) {#elasticsearch-slow_flush_log_threshold} @@ -412,9 +416,9 @@ If you want to configure SSL/TLS version, you can specify ssl_version parameter. ### suppress_doc_wrap (bool, optional) {#elasticsearch-suppress_doc_wrap} -By default, record body is wrapped by 'doc'. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched. +By default, record body is wrapped by 'doc'. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched. -Default: false +Default: false ### suppress_type_name (*bool, optional) {#elasticsearch-suppress_type_name} @@ -423,9 +427,9 @@ Suppress type name to avoid warnings in Elasticsearch 7.x ### tag_key (string, optional) {#elasticsearch-tag_key} -This will add the Fluentd tag in the JSON record. +This will add the Fluentd tag in the JSON record. -Default: tag +Default: tag ### target_index_key (string, optional) {#elasticsearch-target_index_key} @@ -434,9 +438,9 @@ Tell this plugin to find the index name to write to in the record under this key ### target_type_key (string, optional) {#elasticsearch-target_type_key} -Similar to target_index_key config, find the type name to write to in the record under this key (or nested record). If key not found in record - fallback to type_name. +Similar to target_index_key config, find the type name to write to in the record under this key (or nested record). If key not found in record - fallback to type_name. -Default: fluentd +Default: fluentd ### template_file (*secret.Secret, optional) {#elasticsearch-template_file} @@ -450,9 +454,9 @@ The name of the template to define. If a template by the name given is already p ### template_overwrite (bool, optional) {#elasticsearch-template_overwrite} -Always update the template, even if it already exists. +Always update the template, even if it already exists. -Default: false +Default: false ### templates (string, optional) {#elasticsearch-templates} @@ -481,9 +485,9 @@ Should the record not include a time_key, define the degree of sub-second time p ### type_name (string, optional) {#elasticsearch-type_name} -Set the index type for elasticsearch. This is the fallback if `target_type_key` is missing. +Set the index type for elasticsearch. This is the fallback if `target_type_key` is missing. -Default: fluentd +Default: fluentd ### unrecoverable_error_types (string, optional) {#elasticsearch-unrecoverable_error_types} @@ -492,7 +496,9 @@ Default unrecoverable_error_types parameter is set up strictly. Because es_rejec ### use_legacy_template (*bool, optional) {#elasticsearch-use_legacy_template} -If set to true, the output uses the [legacy index template format](https://www.elastic.co/guide/en/elasticsearch/reference/7.13/indices-templates-v1.html). Otherwise, it uses the [composable index template](https://www.elastic.co/guide/en/elasticsearch/reference/7.13/index-templates.html) format. (default: true) +If set to true, the output uses the [legacy index template format](https://www.elastic.co/guide/en/elasticsearch/reference/7.13/indices-templates-v1.html). Otherwise, it uses the [composable index template](https://www.elastic.co/guide/en/elasticsearch/reference/7.13/index-templates.html) format. + +Default: true ### user (string, optional) {#elasticsearch-user} @@ -507,9 +513,9 @@ Default: true ### validate_client_version (bool, optional) {#elasticsearch-validate_client_version} -When you use mismatched Elasticsearch server and client libraries, fluent-plugin-elasticsearch cannot send data into Elasticsearch. +When you use mismatched Elasticsearch server and client libraries, fluent-plugin-elasticsearch cannot send data into Elasticsearch. -Default: false +Default: false ### verify_es_version_at_startup (*bool, optional) {#elasticsearch-verify_es_version_at_startup} @@ -519,14 +525,14 @@ Default: true ### with_transporter_log (bool, optional) {#elasticsearch-with_transporter_log} -This is debugging purpose option to enable to obtain transporter layer log. +This is debugging purpose option to enable to obtain transporter layer log. -Default: false +Default: false ### write_operation (string, optional) {#elasticsearch-write_operation} -The write_operation can be any of: (index,create,update,upsert) +The write_operation can be any of: (index,create,update,upsert) -Default: index +Default: index diff --git a/content/docs/configuration/plugins/outputs/file.md b/content/docs/configuration/plugins/outputs/file.md index bf8d7aa8a..95b91125f 100644 --- a/content/docs/configuration/plugins/outputs/file.md +++ b/content/docs/configuration/plugins/outputs/file.md @@ -44,15 +44,15 @@ The Path of the file. The actual path is path + time + ".log" by default. ### path_suffix (string, optional) {#fileoutputconfig-path_suffix} -The suffix of output result. +The suffix of output result. -Default: ".log" +Default: ".log" ### recompress (bool, optional) {#fileoutputconfig-recompress} -Performs compression again even if the buffer chunk is already compressed. +Performs compression again even if the buffer chunk is already compressed. -Default: false +Default: false ### slow_flush_log_threshold (string, optional) {#fileoutputconfig-slow_flush_log_threshold} @@ -61,13 +61,14 @@ The threshold for chunk flush performance check. Parameter type is float, not ti ### symlink_path (bool, optional) {#fileoutputconfig-symlink_path} -Create symlink to temporary buffered file when buffer_type is file. This is useful for tailing file content to check logs. +Create symlink to temporary buffered file when buffer_type is file. This is useful for tailing file content to check logs. + +Default: false -Default: false -## Example `File` output configurations +## Example `File` output configurations {{< highlight yaml >}} apiVersion: logging.banzaicloud.io/v1beta1 @@ -88,7 +89,7 @@ spec: Fluentd config result: {{< highlight xml >}} - + @type file @id test_file add_path_suffix true @@ -102,7 +103,7 @@ Fluentd config result: timekey_use_utc true timekey_wait 30s - + {{}} diff --git a/content/docs/configuration/plugins/outputs/format.md b/content/docs/configuration/plugins/outputs/format.md index ce6ae5a0f..2db9b4697 100644 --- a/content/docs/configuration/plugins/outputs/format.md +++ b/content/docs/configuration/plugins/outputs/format.md @@ -7,12 +7,12 @@ generated_file: true # Format output records ## Overview - Specify how to format output records. For details, see [https://docs.fluentd.org/configuration/format-section](https://docs.fluentd.org/configuration/format-section). +Specify how to format output records. For details, see [https://docs.fluentd.org/configuration/format-section](https://docs.fluentd.org/configuration/format-section). ## Example ```yaml - spec: +spec: format: path: /tmp/logs/${tag}/%Y/%m/%d.%H.%M format: @@ -27,9 +27,9 @@ generated_file: true ### add_newline (*bool, optional) {#format-add_newline} -When type is single_value add '\n' to the end of the message +When type is single_value add '\n' to the end of the message -Default: true +Default: true ### message_key (string, optional) {#format-message_key} @@ -38,8 +38,8 @@ When type is single_value specify the key holding information ### type (string, optional) {#format-type} -Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value +Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value -Default: json +Default: json diff --git a/content/docs/configuration/plugins/outputs/format_rfc5424.md b/content/docs/configuration/plugins/outputs/format_rfc5424.md index 5874bc4c6..909341abb 100644 --- a/content/docs/configuration/plugins/outputs/format_rfc5424.md +++ b/content/docs/configuration/plugins/outputs/format_rfc5424.md @@ -8,39 +8,39 @@ generated_file: true ### app_name_field (string, optional) {#formatrfc5424-app_name_field} -Sets app name in syslog from field in fluentd, delimited by '.' +Sets app name in syslog from field in fluentd, delimited by '.' -Default: app_name +Default: app_name ### hostname_field (string, optional) {#formatrfc5424-hostname_field} -Sets host name in syslog from field in fluentd, delimited by '.' +Sets host name in syslog from field in fluentd, delimited by '.' -Default: hostname +Default: hostname ### log_field (string, optional) {#formatrfc5424-log_field} -Sets log in syslog from field in fluentd, delimited by '.' +Sets log in syslog from field in fluentd, delimited by '.' -Default: log +Default: log ### message_id_field (string, optional) {#formatrfc5424-message_id_field} -Sets msg id in syslog from field in fluentd, delimited by '.' +Sets msg id in syslog from field in fluentd, delimited by '.' -Default: message_id +Default: message_id ### proc_id_field (string, optional) {#formatrfc5424-proc_id_field} -Sets proc id in syslog from field in fluentd, delimited by '.' +Sets proc id in syslog from field in fluentd, delimited by '.' -Default: proc_id +Default: proc_id ### rfc6587_message_size (*bool, optional) {#formatrfc5424-rfc6587_message_size} -Prepends message length for syslog transmission +Prepends message length for syslog transmission -Default: true +Default: true ### structured_data_field (string, optional) {#formatrfc5424-structured_data_field} @@ -49,8 +49,8 @@ Sets structured data in syslog from field in fluentd, delimited by '.' (default ### type (string, optional) {#formatrfc5424-type} -Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value +Output line formatting: out_file,json,ltsv,csv,msgpack,hash,single_value -Default: json +Default: json diff --git a/content/docs/configuration/plugins/outputs/gcs.md b/content/docs/configuration/plugins/outputs/gcs.md index 7a098e5dc..83bd80bd2 100644 --- a/content/docs/configuration/plugins/outputs/gcs.md +++ b/content/docs/configuration/plugins/outputs/gcs.md @@ -11,7 +11,7 @@ Store logs in Google Cloud Storage. For details, see [https://github.com/kube-lo ## Example ```yaml - spec: +spec: gcs: project: logging-example bucket: banzai-log-test @@ -29,9 +29,9 @@ Permission for the object in GCS: `auth_read` `owner_full` `owner_read` `private ### auto_create_bucket (bool, optional) {#gcsoutput-auto_create_bucket} -Create GCS bucket if it does not exists +Create GCS bucket if it does not exists -Default: true +Default: true ### bucket (string, required) {#gcsoutput-bucket} @@ -70,9 +70,9 @@ Customer-supplied, AES-256 encryption key ### hex_random_length (int, optional) {#gcsoutput-hex_random_length} -Max length of `%{hex_random}` placeholder(4-16) +Max length of `%{hex_random}` placeholder(4-16) -Default: 4 +Default: 4 ### keyfile (string, optional) {#gcsoutput-keyfile} @@ -81,7 +81,7 @@ Path of GCS service account credentials JSON file ### object_key_format (string, optional) {#gcsoutput-object_key_format} -Format of GCS object keys +Format of GCS object keys Default: `%{path}%{time_slice}_%{index}.%{file_extension}` @@ -92,9 +92,9 @@ User provided web-safe keys and arbitrary string values that will returned with ### overwrite (bool, optional) {#gcsoutput-overwrite} -Overwrite already existing path +Overwrite already existing path -Default: false +Default: false ### path (string, optional) {#gcsoutput-path} @@ -118,9 +118,9 @@ Storage class of the file: `dra` `nearline` `coldline` `multi_regional` `regiona ### store_as (string, optional) {#gcsoutput-store_as} -Archive format on GCS: gzip json text +Archive format on GCS: gzip json text -Default: gzip +Default: gzip ### transcoding (bool, optional) {#gcsoutput-transcoding} diff --git a/content/docs/configuration/plugins/outputs/gelf.md b/content/docs/configuration/plugins/outputs/gelf.md index 9cd3ea26b..cc8f33338 100644 --- a/content/docs/configuration/plugins/outputs/gelf.md +++ b/content/docs/configuration/plugins/outputs/gelf.md @@ -23,19 +23,23 @@ Destination host port ### protocol (string, optional) {#output-config-protocol} -Transport Protocol +Transport Protocol -Default: "udp" +Default: "udp" ### tls (*bool, optional) {#output-config-tls} -Enable TlS +Enable TlS -Default: false +Default: false ### tls_options (map[string]string, optional) {#output-config-tls_options} -TLS options (default: {}). For details, see [https://github.com/graylog-labs/gelf-rb/blob/72916932b789f7a6768c3cdd6ab69a3c942dbcef/lib/gelf/transport/tcp_tls.rb#L7-L12](https://github.com/graylog-labs/gelf-rb/blob/72916932b789f7a6768c3cdd6ab69a3c942dbcef/lib/gelf/transport/tcp_tls.rb#L7-L12). +TLS options. For details, see [https://github.com/graylog-labs/gelf-rb/blob/72916932b789f7a6768c3cdd6ab69a3c942dbcef/lib/gelf/transport/tcp_tls.rb#L7-L12](https://github.com/graylog-labs/gelf-rb/blob/72916932b789f7a6768c3cdd6ab69a3c942dbcef/lib/gelf/transport/tcp_tls.rb#L7-L12). + +Default: {} + + ## Example `GELF` output configurations @@ -54,12 +58,12 @@ spec: Fluentd config result: {{< highlight xml >}} - + @type gelf @id test_gelf host gelf-host port 12201 - + {{}} diff --git a/content/docs/configuration/plugins/outputs/http.md b/content/docs/configuration/plugins/outputs/http.md index 2a9d4ef7a..4f5e951dc 100644 --- a/content/docs/configuration/plugins/outputs/http.md +++ b/content/docs/configuration/plugins/outputs/http.md @@ -46,15 +46,21 @@ Endpoint for HTTP request. ### error_response_as_unrecoverable (*bool, optional) {#output-config-error_response_as_unrecoverable} -Raise UnrecoverableError when the response code is non success, 1xx/3xx/4xx/5xx. If false, the plugin logs error message instead of raising UnrecoverableError. +Raise UnrecoverableError when the response code is non success, 1xx/3xx/4xx/5xx. If false, the plugin logs error message instead of raising UnrecoverableError. -Default: true +Default: true ### format (*Format, optional) {#output-config-format} [Format](../format/) +### http_method (string, optional) {#output config-http_method} + +Method for HTTP request. [post, put] + +Default: post + ### headers (map[string]string, optional) {#output-config-headers} Additional headers for HTTP request. @@ -62,9 +68,9 @@ Additional headers for HTTP request. ### json_array (bool, optional) {#output-config-json_array} -Using array format of JSON. This parameter is used and valid only for json format. When json_array as true, Content-Profile should be application/json and be able to use JSON data for the HTTP request body. +Using array format of JSON. This parameter is used and valid only for json format. When json_array as true, Content-Profile should be application/json and be able to use JSON data for the HTTP request body. -Default: false +Default: false ### open_timeout (int, optional) {#output-config-open_timeout} @@ -83,9 +89,9 @@ Read timeout in seconds. ### retryable_response_codes ([]int, optional) {#output-config-retryable_response_codes} -List of retryable response codes. If the response code is included in this list, the plugin retries the buffer flush. Since Fluentd v2 the Status code 503 is going to be removed from default. +List of retryable response codes. If the response code is included in this list, the plugin retries the buffer flush. Since Fluentd v2 the Status code 503 is going to be removed from default. -Default: [503] +Default: [503] ### ssl_timeout (int, optional) {#output-config-ssl_timeout} @@ -103,9 +109,9 @@ The CA certificate path for TLS. ### tls_ciphers (string, optional) {#output-config-tls_ciphers} -The cipher configuration of TLS transport. +The cipher configuration of TLS transport. -Default: ALL:!aNULL:!eNULL:!SSLv2 +Default: ALL:!aNULL:!eNULL:!SSLv2 ### tls_client_cert_path (*secret.Secret, optional) {#output-config-tls_client_cert_path} @@ -123,15 +129,15 @@ The client private key path for TLS. ### tls_verify_mode (string, optional) {#output-config-tls_verify_mode} -The verify mode of TLS. [peer, none] +The verify mode of TLS. [peer, none] -Default: peer +Default: peer ### tls_version (string, optional) {#output-config-tls_version} -The default version of TLS transport. [TLSv1_1, TLSv1_2] +The default version of TLS transport. [TLSv1_1, TLSv1_2] -Default: TLSv1_2 +Default: TLSv1_2 ## HTTP auth config diff --git a/content/docs/configuration/plugins/outputs/kafka.md b/content/docs/configuration/plugins/outputs/kafka.md index 11260c77d..9ca2e115e 100644 --- a/content/docs/configuration/plugins/outputs/kafka.md +++ b/content/docs/configuration/plugins/outputs/kafka.md @@ -37,9 +37,9 @@ Send your logs to Kafka ### ack_timeout (int, optional) {#kafka-ack_timeout} -How long the producer waits for acks. The unit is seconds +How long the producer waits for acks. The unit is seconds -Default: nil => Uses default of ruby-kafka library +Default: nil => Uses default of ruby-kafka library ### brokers (string, required) {#kafka-brokers} @@ -53,51 +53,51 @@ The list of all seed brokers, with their host and port information. ### client_id (string, optional) {#kafka-client_id} -Client ID +Client ID -Default: "kafka" +Default: "kafka" ### compression_codec (string, optional) {#kafka-compression_codec} -The codec the producer uses to compress messages . The available options are gzip and snappy. +The codec the producer uses to compress messages . The available options are gzip and snappy. -Default: nil +Default: nil ### default_message_key (string, optional) {#kafka-default_message_key} -The name of default message key . +The name of default message key . -Default: nil +Default: nil ### default_partition_key (string, optional) {#kafka-default_partition_key} -The name of default partition key . +The name of default partition key . -Default: nil +Default: nil ### default_topic (string, optional) {#kafka-default_topic} -The name of default topic . +The name of default topic . -Default: nil +Default: nil ### discard_kafka_delivery_failed (bool, optional) {#kafka-discard_kafka_delivery_failed} -Discard the record where Kafka DeliveryFailed occurred +Discard the record where Kafka DeliveryFailed occurred -Default: false +Default: false ### exclude_partion_key (bool, optional) {#kafka-exclude_partion_key} -Exclude Partition key +Exclude Partition key -Default: false +Default: false ### exclude_topic_key (bool, optional) {#kafka-exclude_topic_key} -Exclude Topic key +Exclude Topic key -Default: false +Default: false ### format (*Format, required) {#kafka-format} @@ -106,66 +106,66 @@ Default: false ### get_kafka_client_log (bool, optional) {#kafka-get_kafka_client_log} -Get Kafka Client log +Get Kafka Client log -Default: false +Default: false ### headers (map[string]string, optional) {#kafka-headers} -Headers +Headers -Default: {} +Default: {} ### headers_from_record (map[string]string, optional) {#kafka-headers_from_record} -Headers from Record +Headers from Record -Default: {} +Default: {} ### idempotent (bool, optional) {#kafka-idempotent} -Idempotent +Idempotent -Default: false +Default: false ### kafka_agg_max_bytes (int, optional) {#kafka-kafka_agg_max_bytes} -Maximum value of total message size to be included in one batch transmission. . +Maximum value of total message size to be included in one batch transmission. . -Default: 4096 +Default: 4096 ### kafka_agg_max_messages (int, optional) {#kafka-kafka_agg_max_messages} -Maximum number of messages to include in one batch transmission. . +Maximum number of messages to include in one batch transmission. . -Default: nil +Default: nil ### keytab (*secret.Secret, optional) {#kafka-keytab} ### max_send_retries (int, optional) {#kafka-max_send_retries} -Number of times to retry sending of messages to a leader +Number of times to retry sending of messages to a leader -Default: 1 +Default: 1 ### message_key_key (string, optional) {#kafka-message_key_key} -Message Key +Message Key -Default: "message_key" +Default: "message_key" ### partition_key (string, optional) {#kafka-partition_key} -Partition +Partition -Default: "partition" +Default: "partition" ### partition_key_key (string, optional) {#kafka-partition_key_key} -Partition Key +Partition Key -Default: "partition_key" +Default: "partition_key" ### password (*secret.Secret, optional) {#kafka-password} @@ -177,19 +177,9 @@ Password when using PLAIN/SCRAM SASL authentication ### required_acks (int, optional) {#kafka-required_acks} -The number of acks required per request . +The number of acks required per request . -Default: -1 - -### sasl_over_ssl (bool, required) {#kafka-sasl_over_ssl} - -SASL over SSL - -Default: true - -### scram_mechanism (string, optional) {#kafka-scram_mechanism} - -If set, use SCRAM authentication with specified mechanism. When unset, default to PLAIN authentication +Default: -1 ### ssl_ca_cert (*secret.Secret, optional) {#kafka-ssl_ca_cert} @@ -198,9 +188,9 @@ CA certificate ### ssl_ca_certs_from_system (*bool, optional) {#kafka-ssl_ca_certs_from_system} -System's CA cert store +System's CA cert store -Default: false +Default: false ### ssl_client_cert (*secret.Secret, optional) {#kafka-ssl_client_cert} @@ -222,6 +212,17 @@ Client certificate key Verify certificate hostname +### sasl_over_ssl (bool, required) {#kafka-sasl_over_ssl} + +SASL over SSL + +Default: true + +### scram_mechanism (string, optional) {#kafka-scram_mechanism} + +If set, use SCRAM authentication with specified mechanism. When unset, default to PLAIN authentication + + ### slow_flush_log_threshold (string, optional) {#kafka-slow_flush_log_threshold} The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, Fluentd logs a warning message and increases the `fluentd_output_status_slow_flush_count` metric. @@ -235,9 +236,9 @@ Default: "topic" ### use_default_for_unknown_topic (bool, optional) {#kafka-use_default_for_unknown_topic} -Use default for unknown topics +Use default for unknown topics -Default: false +Default: false ### username (*secret.Secret, optional) {#kafka-username} diff --git a/content/docs/configuration/plugins/outputs/kinesis_firehose.md b/content/docs/configuration/plugins/outputs/kinesis_firehose.md index 09d990657..74441d42e 100644 --- a/content/docs/configuration/plugins/outputs/kinesis_firehose.md +++ b/content/docs/configuration/plugins/outputs/kinesis_firehose.md @@ -10,16 +10,16 @@ generated_file: true For details, see [https://github.com/awslabs/aws-fluent-plugin-kinesis#configuration-kinesis_firehose](https://github.com/awslabs/aws-fluent-plugin-kinesis#configuration-kinesis_firehose). - ## Example output configurations - - ```yaml - spec: - kinesisFirehose: - delivery_stream_name: example-stream-name - region: us-east-1 - format: - type: json - ``` +## Example output configurations + +```yaml +spec: + kinesisFirehose: + delivery_stream_name: example-stream-name + region: us-east-1 + format: + type: json +``` ## Configuration diff --git a/content/docs/configuration/plugins/outputs/logdna.md b/content/docs/configuration/plugins/outputs/logdna.md index f07b70e9e..acd643cea 100644 --- a/content/docs/configuration/plugins/outputs/logdna.md +++ b/content/docs/configuration/plugins/outputs/logdna.md @@ -35,21 +35,21 @@ Hostname ### ingester_domain (string, optional) {#logdna-ingester_domain} -Custom Ingester URL, Optional +Custom Ingester URL, Optional -Default: `https://logs.logdna.com` +Default: `https://logs.logdna.com` ### ingester_endpoint (string, optional) {#logdna-ingester_endpoint} -Custom Ingester Endpoint, Optional +Custom Ingester Endpoint, Optional -Default: /logs/ingest +Default: /logs/ingest ### request_timeout (string, optional) {#logdna-request_timeout} -HTTPS POST Request Timeout, Optional. Supports s and ms Suffices +HTTPS POST Request Timeout, Optional. Supports s and ms Suffices -Default: 30 s +Default: 30 s ### slow_flush_log_threshold (string, optional) {#logdna-slow_flush_log_threshold} @@ -62,6 +62,8 @@ Comma-Separated List of Tags, Optional + + ## Example `LogDNA` filter configurations {{< highlight yaml >}} diff --git a/content/docs/configuration/plugins/outputs/loki.md b/content/docs/configuration/plugins/outputs/loki.md index f6171b571..126ca79bf 100644 --- a/content/docs/configuration/plugins/outputs/loki.md +++ b/content/docs/configuration/plugins/outputs/loki.md @@ -44,15 +44,15 @@ TLS: parameters for presenting a client certificate [Secret](../secret/) ### configure_kubernetes_labels (*bool, optional) {#output-config-configure_kubernetes_labels} -Configure Kubernetes metadata in a Prometheus like format +Configure Kubernetes metadata in a Prometheus like format -Default: false +Default: false ### drop_single_key (*bool, optional) {#output-config-drop_single_key} -If a record only has 1 key, then just set the log line to the value and discard the key. +If a record only has 1 key, then just set the log line to the value and discard the key. -Default: false +Default: false ### extra_labels (map[string]string, optional) {#output-config-extra_labels} @@ -60,21 +60,21 @@ Set of extra labels to include with every Loki stream. ### extract_kubernetes_labels (*bool, optional) {#output-config-extract_kubernetes_labels} -Extract kubernetes labels as loki labels +Extract kubernetes labels as loki labels -Default: false +Default: false ### include_thread_label (*bool, optional) {#output-config-include_thread_label} -whether to include the fluentd_thread label when multiple threads are used for flushing. +whether to include the fluentd_thread label when multiple threads are used for flushing. -Default: true +Default: true ### insecure_tls (*bool, optional) {#output-config-insecure_tls} -TLS: disable server certificate verification +TLS: disable server certificate verification -Default: false +Default: false ### key (*secret.Secret, optional) {#output-config-key} @@ -99,9 +99,9 @@ Specify password if the Loki server requires authentication. [Secret](../secret/ ### remove_keys ([]string, optional) {#output-config-remove_keys} -Comma separated list of needless record keys to remove +Comma separated list of needless record keys to remove -Default: [] +Default: [] ### slow_flush_log_threshold (string, optional) {#output-config-slow_flush_log_threshold} @@ -114,7 +114,7 @@ Loki is a multi-tenant log storage platform and all requests sent must include a ### url (string, optional) {#output-config-url} -The url of the Loki server to send logs to. +The url of the Loki server to send logs to. Default: `https://logs-us-west1.grafana.net` diff --git a/content/docs/configuration/plugins/outputs/mattermost.md b/content/docs/configuration/plugins/outputs/mattermost.md index 5420a117c..8c25aca74 100644 --- a/content/docs/configuration/plugins/outputs/mattermost.md +++ b/content/docs/configuration/plugins/outputs/mattermost.md @@ -39,7 +39,7 @@ The ID of the channel where you want to receive the information. You can set the communication channel if it uses TLS. -Default: true +Default: true ### message (string, optional) {#output-config-message} @@ -50,13 +50,13 @@ The message you want to send. It can be a static message, which you add at this Color of the message you are sending, in hexadecimal format. -Default: #A9A9A9 +Default: #A9A9A9 ### message_title (string, optional) {#output-config-message_title} The title you want to add to the message. -Default: fluent_title_default +Default: fluent_title_default ### webhook_url (*secret.Secret, required) {#output-config-webhook_url} diff --git a/content/docs/configuration/plugins/outputs/newrelic.md b/content/docs/configuration/plugins/outputs/newrelic.md index b5b0f5ccd..ba2beb994 100644 --- a/content/docs/configuration/plugins/outputs/newrelic.md +++ b/content/docs/configuration/plugins/outputs/newrelic.md @@ -48,7 +48,7 @@ Default: `https://log-api.newrelic.com/log/v1` ### license_key (*secret.Secret, optional) {#output-config-license_key} -New Relic License Key (recommended) [Secret](../secret/" LicenseKey *secret.Secret `json:"license_key)` +New Relic License Key (recommended) [Secret](../secret/). diff --git a/content/docs/configuration/plugins/outputs/opensearch.md b/content/docs/configuration/plugins/outputs/opensearch.md index da5de1a41..b1445fd31 100644 --- a/content/docs/configuration/plugins/outputs/opensearch.md +++ b/content/docs/configuration/plugins/outputs/opensearch.md @@ -35,22 +35,18 @@ Send your logs to OpenSearch ### application_name (*string, optional) {#opensearch-application_name} -Specify the application name for the rollover index to be created. +Specify the application name for the rollover index to be created. -Default: default +Default: default ### buffer (*Buffer, optional) {#opensearch-buffer} ### bulk_message_request_threshold (string, optional) {#opensearch-bulk_message_request_threshold} -Configure bulk_message request splitting threshold size. Default value is 20MB. (20 * 1024 * 1024) If you specify this size as negative number, bulk_message request splitting feature will be disabled. +Configure bulk_message request splitting threshold size. Default value is 20MB. (20 * 1024 * 1024) If you specify this size as negative number, bulk_message request splitting feature will be disabled. -Default: 20MB - -### ca_file (*secret.Secret, optional) {#opensearch-ca_file} - -CA certificate +Default: 20MB ### catch_transport_exception_on_retry (*bool, optional) {#opensearch-catch_transport_exception_on_retry} @@ -58,21 +54,6 @@ catch_transport_exception_on_retry (default: true) Default: true -### client_cert (*secret.Secret, optional) {#opensearch-client_cert} - -Client certificate - - -### client_key (*secret.Secret, optional) {#opensearch-client_key} - -Client certificate key - - -### client_key_pass (*secret.Secret, optional) {#opensearch-client_key_pass} - -Client key password - - ### compression_level (string, optional) {#opensearch-compression_level} compression_level @@ -82,7 +63,7 @@ compression_level This parameter adds additional headers to request. Example: `{"token":"secret"}` -Default: {} +Default: {} ### customize_template (string, optional) {#opensearch-customize_template} @@ -101,21 +82,21 @@ You can specify Opensearch data stream name by this parameter. This parameter is ### data_stream_template_name (string, optional) {#opensearch-data_stream_template_name} -Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream. +Specify an existing index template for the data stream. If not present, a new template is created and named after the data stream. -Default: data_stream_name +Default: data_stream_name ### default_opensearch_version (int, optional) {#opensearch-default_opensearch_version} -max_retry_get_os_version +max_retry_get_os_version -Default: 1 +Default: 1 ### emit_error_for_missing_id (bool, optional) {#opensearch-emit_error_for_missing_id} -emit_error_for_missing_id +emit_error_for_missing_id -Default: false +Default: false ### emit_error_label_event (*bool, optional) {#opensearch-emit_error_label_event} @@ -130,7 +111,7 @@ AWS Endpoint Credentials ### exception_backup (*bool, optional) {#opensearch-exception_backup} -Indicates whether to backup chunk when ignore exception occurs. +Indicates whether to backup chunk when ignore exception occurs. Default: true @@ -158,7 +139,7 @@ Flatten separator ### host (string, optional) {#opensearch-host} -You can specify OpenSearch host by this parameter. +You can specify OpenSearch host by this parameter. Default: localhost @@ -171,11 +152,11 @@ You can specify multiple OpenSearch hosts with separator ",". If you specify hos With http_backend typhoeus, the opensearch plugin uses typhoeus faraday http backend. Typhoeus can handle HTTP keepalive. -Default: excon +Default: excon ### http_backend_excon_nonblock (*bool, optional) {#opensearch-http_backend_excon_nonblock} -http_backend_excon_nonblock +http_backend_excon_nonblock Default: true @@ -196,75 +177,75 @@ With this option set to true, Fluentd manifests the index name in the request UR ### include_tag_key (bool, optional) {#opensearch-include_tag_key} -This will add the Fluentd tag in the JSON record. +This will add the Fluentd tag in the JSON record. -Default: false +Default: false ### include_timestamp (bool, optional) {#opensearch-include_timestamp} -Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in OpenSearch and utilize the rollover API. +Adds a @timestamp field to the log, following all settings logstash_format does, except without the restrictions on index_name. This allows one to log to an alias in OpenSearch and utilize the rollover API. -Default: false +Default: false ### index_date_pattern (*string, optional) {#opensearch-index_date_pattern} -Specify this to override the index date pattern for creating a rollover index. +Specify this to override the index date pattern for creating a rollover index. -Default: now/d +Default: now/d ### index_name (string, optional) {#opensearch-index_name} -The index name to write events to +The index name to write events to -Default: fluentd +Default: fluentd ### index_separator (string, optional) {#opensearch-index_separator} -index_separator +index_separator Default: - ### log_os_400_reason (bool, optional) {#opensearch-log_os_400_reason} -log_os_400_reason +log_os_400_reason -Default: false +Default: false ### logstash_dateformat (string, optional) {#opensearch-logstash_dateformat} -Set the Logstash date format. +Set the Logstash date format. -Default: %Y.%m.%d +Default: %Y.%m.%d ### logstash_format (bool, optional) {#opensearch-logstash_format} -Enable Logstash log format. +Enable Logstash log format. -Default: false +Default: false ### logstash_prefix (string, optional) {#opensearch-logstash_prefix} -Set the Logstash prefix. +Set the Logstash prefix. -Default: logstash +Default: logstash ### logstash_prefix_separator (string, optional) {#opensearch-logstash_prefix_separator} -Set the Logstash prefix separator. +Set the Logstash prefix separator. -Default: - +Default: - ### max_retry_get_os_version (int, optional) {#opensearch-max_retry_get_os_version} -max_retry_get_os_version +max_retry_get_os_version -Default: 15 +Default: 15 ### max_retry_putting_template (string, optional) {#opensearch-max_retry_putting_template} -You can specify times of retry putting template. +You can specify times of retry putting template. -Default: 10 +Default: 10 ### parent_key (string, optional) {#opensearch-parent_key} @@ -288,21 +269,21 @@ This param is to set a pipeline ID of your OpenSearch to be added into the reque ### port (int, optional) {#opensearch-port} -You can specify OpenSearch port by this parameter. +You can specify OpenSearch port by this parameter. -Default: 9200 +Default: 9200 ### prefer_oj_serializer (bool, optional) {#opensearch-prefer_oj_serializer} -With default behavior, OpenSearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, OpenSearch client uses Oj as JSON encoder/decoder. +With default behavior, OpenSearch client uses Yajl as JSON encoder/decoder. Oj is the alternative high performance JSON encoder/decoder. When this parameter sets as true, OpenSearch client uses Oj as JSON encoder/decoder. -Default: false +Default: false ### reconnect_on_error (bool, optional) {#opensearch-reconnect_on_error} -Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on "host unreachable exceptions". We recommended to set this true in the presence of OpenSearch shield. +Indicates that the plugin should reset connection on any error (reconnect on next send). By default it will reconnect only on "host unreachable exceptions". We recommended to set this true in the presence of OpenSearch shield. -Default: false +Default: false ### reload_after (string, optional) {#opensearch-reload_after} @@ -317,9 +298,9 @@ Default: true ### reload_on_failure (bool, optional) {#opensearch-reload_on_failure} -Indicates that the OpenSearch-transport will try to reload the nodes addresses if there is a failure while making the request, this can be useful to quickly remove a dead node from the list of addresses. +Indicates that the OpenSearch-transport will try to reload the nodes addresses if there is a failure while making the request, this can be useful to quickly remove a dead node from the list of addresses. -Default: false +Default: false ### remove_keys_on_update (string, optional) {#opensearch-remove_keys_on_update} @@ -333,15 +314,15 @@ This setting allows remove_keys_on_update to be configured with a key in each re ### request_timeout (string, optional) {#opensearch-request_timeout} -You can specify HTTP request timeout. +You can specify HTTP request timeout. -Default: 5s +Default: 5s ### resurrect_after (string, optional) {#opensearch-resurrect_after} -You can set in the OpenSearch-transport how often dead connections from the OpenSearch-transport's pool will be resurrected. +You can set in the OpenSearch-transport how often dead connections from the OpenSearch-transport's pool will be resurrected. -Default: 60s +Default: 60s ### retry_tag (string, optional) {#opensearch-retry_tag} @@ -352,11 +333,32 @@ This setting allows custom routing of messages in response to bulk request failu routing_key + +### ca_file (*secret.Secret, optional) {#opensearch-ca_file} + +CA certificate + + +### client_cert (*secret.Secret, optional) {#opensearch-client_cert} + +Client certificate + + +### client_key (*secret.Secret, optional) {#opensearch-client_key} + +Client certificate key + + +### client_key_pass (*secret.Secret, optional) {#opensearch-client_key_pass} + +Client key password + + ### scheme (string, optional) {#opensearch-scheme} -Connection scheme +Connection scheme -Default: http +Default: http ### selector_class_name (string, optional) {#opensearch-selector_class_name} @@ -386,9 +388,9 @@ If you want to configure SSL/TLS version, you can specify ssl_version parameter. ### suppress_doc_wrap (bool, optional) {#opensearch-suppress_doc_wrap} -By default, record body is wrapped by 'doc'. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched. +By default, record body is wrapped by 'doc'. This behavior can not handle update script requests. You can set this to suppress doc wrapping and allow record body to be untouched. -Default: false +Default: false ### suppress_type_name (*bool, optional) {#opensearch-suppress_type_name} @@ -397,15 +399,15 @@ Suppress type name to avoid warnings in OpenSearch ### tag_key (string, optional) {#opensearch-tag_key} -This will add the Fluentd tag in the JSON record. +This will add the Fluentd tag in the JSON record. -Default: tag +Default: tag ### target_index_affinity (bool, optional) {#opensearch-target_index_affinity} -target_index_affinity +target_index_affinity -Default: false +Default: false ### target_index_key (string, optional) {#opensearch-target_index_key} @@ -424,9 +426,9 @@ The name of the template to define. If a template by the name given is already p ### template_overwrite (bool, optional) {#opensearch-template_overwrite} -Always update the template, even if it already exists. +Always update the template, even if it already exists. -Default: false +Default: false ### templates (string, optional) {#opensearch-templates} @@ -440,9 +442,9 @@ By default, when inserting records in Logstash format, @timestamp is dynamically ### time_key_exclude_timestamp (bool, optional) {#opensearch-time_key_exclude_timestamp} -time_key_exclude_timestamp +time_key_exclude_timestamp -Default: false +Default: false ### time_key_format (string, optional) {#opensearch-time_key_format} @@ -487,15 +489,15 @@ User for HTTP Basic authentication. This plugin will escape required URL encoded ### utc_index (*bool, optional) {#opensearch-utc_index} -By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe `utc_index` to false. +By default, the records inserted into index logstash-YYMMDD with UTC (Coordinated Universal Time). This option allows to use local time if you describe `utc_index` to false. Default: true ### validate_client_version (bool, optional) {#opensearch-validate_client_version} -When you use mismatched OpenSearch server and client libraries, fluent-plugin-opensearch cannot send data into OpenSearch. +When you use mismatched OpenSearch server and client libraries, fluent-plugin-opensearch cannot send data into OpenSearch. -Default: false +Default: false ### verify_os_version_at_startup (*bool, optional) {#opensearch-verify_os_version_at_startup} @@ -505,15 +507,15 @@ Default: true ### with_transporter_log (bool, optional) {#opensearch-with_transporter_log} -This is debugging purpose option to enable to obtain transporter layer log. +This is debugging purpose option to enable to obtain transporter layer log. -Default: false +Default: false ### write_operation (string, optional) {#opensearch-write_operation} -The write_operation can be any of: (index,create,update,upsert) +The write_operation can be any of: (index,create,update,upsert) -Default: index +Default: index ## OpenSearchEndpointCredentials diff --git a/content/docs/configuration/plugins/outputs/oss.md b/content/docs/configuration/plugins/outputs/oss.md index 1ae3e92ad..77c3cda7c 100644 --- a/content/docs/configuration/plugins/outputs/oss.md +++ b/content/docs/configuration/plugins/outputs/oss.md @@ -9,7 +9,7 @@ generated_file: true **Fluent OSS output plugin** buffers event logs in local files and uploads them to OSS periodically in background threads. -This plugin splits events by using the timestamp of event logs. For example, a log '2019-04-09 message Hello' is reached, and then another log '2019-04-10 message World' is reached in this order, the former is stored in "20190409.gz" file, and latter in "20190410.gz" file. +This plugin splits events by using the timestamp of event logs. For example, a log '2019-04-09 message Hello' is reached, and then another log '2019-04-10 message World' is reached in this order, the former is stored in "20190409.gz" file, and latter in "20190410.gz" file. **Fluent OSS input plugin** reads data from OSS periodically. @@ -35,9 +35,9 @@ Your access secret key [Secret](../secret/) ### auto_create_bucket (bool, optional) {#output-config-auto_create_bucket} -desc 'Create OSS bucket if it does not exists +desc 'Create OSS bucket if it does not exists -Default: false +Default: false ### bucket (string, required) {#output-config-bucket} @@ -51,21 +51,21 @@ Your bucket name ### check_bucket (bool, optional) {#output-config-check_bucket} -Check bucket if exists or not +Check bucket if exists or not -Default: true +Default: true ### check_object (bool, optional) {#output-config-check_object} -Check object before creation +Check object before creation -Default: true +Default: true ### download_crc_enable (bool, optional) {#output-config-download_crc_enable} -Download crc enabled +Download crc enabled -Default: true +Default: true ### endpoint (string, required) {#output-config-endpoint} @@ -79,51 +79,51 @@ OSS endpoint to connect to' ### hex_random_length (int, optional) {#output-config-hex_random_length} -The length of `%{hex_random}` placeholder(4-16) +The length of `%{hex_random}` placeholder(4-16) -Default: 4 +Default: 4 ### index_format (string, optional) {#output-config-index_format} -`sprintf` format for `%{index}` +`sprintf` format for `%{index}` -Default: %d +Default: %d ### key_format (string, optional) {#output-config-key_format} -The format of OSS object keys +The format of OSS object keys Default: `%{path}/%{time_slice}_%{index}_%{thread_id}.%{file_extension}` ### open_timeout (int, optional) {#output-config-open_timeout} -Timeout for open connections +Timeout for open connections -Default: 10 +Default: 10 ### oss_sdk_log_dir (string, optional) {#output-config-oss_sdk_log_dir} -OSS SDK log directory +OSS SDK log directory -Default: /var/log/td-agent +Default: /var/log/td-agent ### overwrite (bool, optional) {#output-config-overwrite} -Overwrite already existing path +Overwrite already existing path -Default: false +Default: false ### path (string, optional) {#output-config-path} -Path prefix of the files on OSS +Path prefix of the files on OSS -Default: fluent/logs +Default: fluent/logs ### read_timeout (int, optional) {#output-config-read_timeout} -Timeout for read response +Timeout for read response -Default: 120 +Default: 120 ### slow_flush_log_threshold (string, optional) {#output-config-slow_flush_log_threshold} @@ -132,15 +132,15 @@ The threshold for chunk flush performance check. Parameter type is float, not ti ### store_as (string, optional) {#output-config-store_as} -Archive format on OSS: gzip, json, text, lzo, lzma2 +Archive format on OSS: gzip, json, text, lzo, lzma2 -Default: gzip +Default: gzip ### upload_crc_enable (bool, optional) {#output-config-upload_crc_enable} -Upload crc enabled +Upload crc enabled -Default: true +Default: true ### warn_for_delay (string, optional) {#output-config-warn_for_delay} diff --git a/content/docs/configuration/plugins/outputs/redis.md b/content/docs/configuration/plugins/outputs/redis.md index 4000ddc60..c47f9bb93 100644 --- a/content/docs/configuration/plugins/outputs/redis.md +++ b/content/docs/configuration/plugins/outputs/redis.md @@ -28,7 +28,7 @@ spec: Allow inserting key duplicate. It will work as update values. -Default: false +Default: false ### buffer (*Buffer, optional) {#output-config-buffer} @@ -37,9 +37,9 @@ Default: false ### db_number (int, optional) {#output-config-db_number} -DbNumber database number is optional. +DbNumber database number is optional. -Default: 0 +Default: 0 ### format (*Format, optional) {#output-config-format} @@ -48,15 +48,15 @@ Default: 0 ### host (string, optional) {#output-config-host} -Host Redis endpoint +Host Redis endpoint -Default: localhost +Default: localhost ### insert_key_prefix (string, optional) {#output-config-insert_key_prefix} -insert_key_prefix +insert_key_prefix -Default: "${tag}" +Default: "${tag}" ### password (*secret.Secret, optional) {#output-config-password} @@ -65,9 +65,9 @@ Redis Server password ### port (int, optional) {#output-config-port} -Port of the Redis server +Port of the Redis server -Default: 6379 +Default: 6379 ### slow_flush_log_threshold (string, optional) {#output-config-slow_flush_log_threshold} @@ -78,7 +78,7 @@ The threshold for chunk flush performance check. Parameter type is float, not ti Users can set strftime format. -Default: "%s" +Default: "%s" ### ttl (int, optional) {#output-config-ttl} diff --git a/content/docs/configuration/plugins/outputs/s3.md b/content/docs/configuration/plugins/outputs/s3.md index cc5d53d32..8774b7b10 100644 --- a/content/docs/configuration/plugins/outputs/s3.md +++ b/content/docs/configuration/plugins/outputs/s3.md @@ -54,14 +54,14 @@ Permission for the object in S3 Create S3 bucket if it does not exists -### aws_iam_retries (string, optional) {#output-config-aws_iam_retries} +### aws_key_id (*secret.Secret, optional) {#output config-aws_key_id} -The number of attempts to load instance profile credentials from the EC2 metadata service using IAM role +AWS access key id [Secret](../secret/) -### aws_key_id (*secret.Secret, optional) {#output-config-aws_key_id} +### aws_iam_retries (string, optional) {#output-config-aws_iam_retries} -AWS access key id [Secret](../secret/) +The number of attempts to load instance profile credentials from the EC2 metadata service using IAM role ### aws_sec_key (*secret.Secret, optional) {#output-config-aws_sec_key} @@ -91,7 +91,7 @@ Check object before creation ### clustername (string, optional) {#output-config-clustername} -Custom cluster name +Custom cluster name Default: one-eye @@ -157,7 +157,7 @@ The length of `%{hex_random}` placeholder(4-16) ### oneeye_format (bool, optional) {#output-config-oneeye_format} -One-eye format trigger +One-eye format trigger Default: false @@ -314,13 +314,13 @@ Number of seconds to wait for one block to be read ### ip_address (string, optional) {#instance profile-credentials-ip_address} -IP address +IP address Default: 169.254.169.254 ### port (string, optional) {#instance profile-credentials-port} -Port number +Port number Default: 80 @@ -336,9 +336,9 @@ shared_credentials ### path (string, optional) {#shared-credentials-path} -Path to the shared file. +Path to the shared file. -Default: $HOME/.aws/credentials +Default: $HOME/.aws/credentials ### profile_name (string, optional) {#shared-credentials-profile_name} @@ -352,27 +352,27 @@ parquet compressor ### parquet_compression_codec (string, optional) {#parquet-compressor-parquet_compression_codec} -Parquet compression codec. (uncompressed, snappy, gzip, lzo, brotli, lz4, zstd) +Parquet compression codec. (uncompressed, snappy, gzip, lzo, brotli, lz4, zstd) -Default: snappy +Default: snappy ### parquet_page_size (string, optional) {#parquet-compressor-parquet_page_size} -Parquet file page size. +Parquet file page size. -Default: 8192 bytes +Default: 8192 bytes ### parquet_row_group_size (string, optional) {#parquet-compressor-parquet_row_group_size} -Parquet file row group size. +Parquet file row group size. -Default: 128 MB +Default: 128 MB ### record_type (string, optional) {#parquet-compressor-record_type} -Record data format type. (avro csv jsonl msgpack tsv msgpack json) +Record data format type. (avro csv jsonl msgpack tsv msgpack json) -Default: msgpack +Default: msgpack ### schema_file (string, optional) {#parquet-compressor-schema_file} @@ -381,8 +381,8 @@ Path to schema file. ### schema_type (string, optional) {#parquet-compressor-schema_type} -Schema type. (avro, bigquery) +Schema type. (avro, bigquery) -Default: avro +Default: avro diff --git a/content/docs/configuration/plugins/outputs/splunk_hec.md b/content/docs/configuration/plugins/outputs/splunk_hec.md index ae0c70046..78b93db69 100644 --- a/content/docs/configuration/plugins/outputs/splunk_hec.md +++ b/content/docs/configuration/plugins/outputs/splunk_hec.md @@ -53,15 +53,15 @@ The private key for this client.' [Secret](../secret/) ### coerce_to_utf8 (*bool, optional) {#splunkhecoutput-coerce_to_utf8} -Indicates whether to allow non-UTF-8 characters in user logs. If set to true, any non-UTF-8 character is replaced by the string specified in non_utf8_replacement_string. If set to false, the Ingest API errors out any non-UTF-8 characters. . +Indicates whether to allow non-UTF-8 characters in user logs. If set to true, any non-UTF-8 character is replaced by the string specified in non_utf8_replacement_string. If set to false, the Ingest API errors out any non-UTF-8 characters. . -Default: true +Default: true ### data_type (string, optional) {#splunkhecoutput-data_type} -The type of data that will be sent to Sumo Logic, either event or metric +The type of data that will be sent to Sumo Logic, either event or metric -Default: event +Default: event ### fields (Fields, optional) {#splunkhecoutput-fields} @@ -80,9 +80,9 @@ You can specify SplunkHec host by this parameter. ### hec_port (int, optional) {#splunkhecoutput-hec_port} -The port number for the Hec token or the Hec load balancer. +The port number for the Hec token or the Hec load balancer. -Default: 8088 +Default: 8088 ### hec_token (*secret.Secret, required) {#splunkhecoutput-hec_token} @@ -116,7 +116,7 @@ The field name that contains the Splunk index name. Cannot set both index and in ### insecure_ssl (*bool, optional) {#splunkhecoutput-insecure_ssl} -Indicates if insecure SSL connection is allowed +Indicates if insecure SSL connection is allowed Default: false @@ -129,7 +129,7 @@ By default, all the fields used by the *_key parameters are removed from the ori Field name that contains the metric name. This parameter only works in conjunction with the metrics_from_event parameter. When this prameter is set, the `metrics_from_event` parameter is automatically set to false. -Default: true +Default: true ### metric_value_key (string, optional) {#splunkhecoutput-metric_value_key} @@ -143,9 +143,9 @@ When data_type is set to "metric", the ingest API will treat every key-value pai ### non_utf8_replacement_string (string, optional) {#splunkhecoutput-non_utf8_replacement_string} -If coerce_to_utf8 is set to true, any non-UTF-8 character is replaced by the string you specify in this parameter. . +If coerce_to_utf8 is set to true, any non-UTF-8 character is replaced by the string you specify in this parameter. . -Default: ' ' +Default: ' ' ### open_timeout (int, optional) {#splunkhecoutput-open_timeout} @@ -154,15 +154,20 @@ The amount of time to wait for a connection to be opened. ### protocol (string, optional) {#splunkhecoutput-protocol} -This is the protocol to use for calling the Hec API. Available values are: http, https. +This is the protocol to use for calling the Hec API. Available values are: http, https. -Default: https +Default: https ### read_timeout (int, optional) {#splunkhecoutput-read_timeout} The amount of time allowed between reading two chunks from the socket. +### ssl_ciphers (string, optional) {#splunkhecoutput-ssl_ciphers} + +List of SSL ciphers allowed. + + ### slow_flush_log_threshold (string, optional) {#splunkhecoutput-slow_flush_log_threshold} The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. @@ -187,9 +192,5 @@ The sourcetype field for events. When not set, the sourcetype is decided by HEC. Field name that contains the sourcetype. Cannot set both source and source_key parameters at the same time. -### ssl_ciphers (string, optional) {#splunkhecoutput-ssl_ciphers} - -List of SSL ciphers allowed. - diff --git a/content/docs/configuration/plugins/outputs/sqs.md b/content/docs/configuration/plugins/outputs/sqs.md index 8710c2b45..a4525773a 100644 --- a/content/docs/configuration/plugins/outputs/sqs.md +++ b/content/docs/configuration/plugins/outputs/sqs.md @@ -28,21 +28,21 @@ AWS secret key ### create_queue (*bool, optional) {#output-config-create_queue} -Create SQS queue +Create SQS queue -Default: true +Default: true ### delay_seconds (int, optional) {#output-config-delay_seconds} -Delivery delay seconds +Delivery delay seconds -Default: 0 +Default: 0 ### include_tag (*bool, optional) {#output-config-include_tag} -Include tag +Include tag -Default: true +Default: true ### message_group_id (string, optional) {#output-config-message_group_id} @@ -55,25 +55,26 @@ SQS queue name - required if sqs_url is not set ### region (string, optional) {#output-config-region} -AWS region +AWS region -Default: ap-northeast-1 +Default: ap-northeast-1 -### slow_flush_log_threshold (string, optional) {#output-config-slow_flush_log_threshold} +### sqs_url (string, optional) {#output config-sqs_url} -The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. +SQS queue url e.g. `https://sqs.us-west-2.amazonaws.com/123456789012/myqueue` -### sqs_url (string, optional) {#output-config-sqs_url} +### slow_flush_log_threshold (string, optional) {#output-config-slow_flush_log_threshold} -SQS queue url e.g. `https://sqs.us-west-2.amazonaws.com/123456789012/myqueue` +The threshold for chunk flush performance check. Parameter type is float, not time, default: 20.0 (seconds) If chunk flush takes longer time than this threshold, fluentd logs warning message and increases metric fluentd_output_status_slow_flush_count. ### tag_property_name (string, optional) {#output-config-tag_property_name} -Tags property name in json +Tags property name in json + +Default: '__tag' -Default: '__tag' diff --git a/content/docs/configuration/plugins/outputs/sumologic.md b/content/docs/configuration/plugins/outputs/sumologic.md index ede59e3dc..49c059547 100644 --- a/content/docs/configuration/plugins/outputs/sumologic.md +++ b/content/docs/configuration/plugins/outputs/sumologic.md @@ -44,9 +44,9 @@ spec: ### add_timestamp (bool, optional) {#output-config-add_timestamp} -Add timestamp (or timestamp_key) field to logs before sending to sumologic +Add timestamp (or timestamp_key) field to logs before sending to SumoLogic -Default: true +Default: true ### buffer (*Buffer, optional) {#output-config-buffer} @@ -54,15 +54,15 @@ Default: true ### compress (*bool, optional) {#output-config-compress} -Compress payload +Compress payload -Default: false +Default: false ### compress_encoding (string, optional) {#output-config-compress_encoding} -Encoding method of compression (either gzip or deflate) +Encoding method of compression (either gzip or deflate) -Default: gzip +Default: gzip ### custom_dimensions (string, optional) {#output-config-custom_dimensions} @@ -76,9 +76,9 @@ Comma-separated key=value list of fields to apply to every log. [More informatio ### data_type (string, optional) {#output-config-data_type} -The type of data that will be sent to Sumo Logic, either logs or metrics +The type of data that will be sent to Sumo Logic, either logs or metrics -Default: logs +Default: logs ### delimiter (string, optional) {#output-config-delimiter} @@ -88,9 +88,9 @@ Default: . ### disable_cookies (bool, optional) {#output config-disable_cookies} -Option to disable cookies on the HTTP Client. +Option to disable cookies on the HTTP Client. -Default: false +Default: false ### endpoint (*secret.Secret, required) {#output-config-endpoint} @@ -99,27 +99,27 @@ SumoLogic HTTP Collector URL ### log_format (string, optional) {#output-config-log_format} -Format to post logs into Sumo. +Format to post logs into Sumo. -Default: json +Default: json ### log_key (string, optional) {#output-config-log_key} -Used to specify the key when merging json or sending logs in text format +Used to specify the key when merging json or sending logs in text format -Default: message +Default: message ### metric_data_format (string, optional) {#output-config-metric_data_format} -The format of metrics you will be sending, either graphite or carbon2 or prometheus +The format of metrics you will be sending, either graphite or carbon2 or prometheus -Default: graphite +Default: graphite ### open_timeout (int, optional) {#output-config-open_timeout} -Set timeout seconds to wait until connection is opened. +Set timeout seconds to wait until connection is opened. -Default: 60 +Default: 60 ### proxy_uri (string, optional) {#output-config-proxy_uri} @@ -133,15 +133,15 @@ The threshold for chunk flush performance check. Parameter type is float, not ti ### source_category (string, optional) {#output-config-source_category} -Set _sourceCategory metadata field within SumoLogic +Set _sourceCategory metadata field within SumoLogic -Default: nil +Default: nil ### source_host (string, optional) {#output-config-source_host} -Set _sourceHost metadata field within SumoLogic +Set _sourceHost metadata field within SumoLogic -Default: nil +Default: nil ### source_name (string, required) {#output-config-source_name} @@ -150,26 +150,26 @@ Set _sourceName metadata field within SumoLogic - overrides source_name_key (def ### source_name_key (string, optional) {#output-config-source_name_key} -Set as source::path_key's value so that the source_name can be extracted from Fluentd's buffer +Set as source::path_key's value so that the source_name can be extracted from Fluentd's buffer -Default: source_name +Default: source_name ### sumo_client (string, optional) {#output-config-sumo_client} -Name of sumo client which is send as X-Sumo-Client header +Name of sumo client which is send as X-Sumo-Client header -Default: fluentd-output +Default: fluentd-output ### timestamp_key (string, optional) {#output-config-timestamp_key} -Field name when add_timestamp is on +Field name when add_timestamp is on -Default: timestamp +Default: timestamp ### verify_ssl (bool, optional) {#output-config-verify_ssl} -Verify ssl certificate. +Verify ssl certificate. -Default: true +Default: true diff --git a/content/docs/configuration/plugins/outputs/syslog.md b/content/docs/configuration/plugins/outputs/syslog.md index 8fee93d47..68901e20e 100644 --- a/content/docs/configuration/plugins/outputs/syslog.md +++ b/content/docs/configuration/plugins/outputs/syslog.md @@ -13,9 +13,9 @@ generated_file: true ### allow_self_signed_cert (*bool, optional) {#syslogoutputconfig-allow_self_signed_cert} -allow_self_signed_cert for mutual tls +allow_self_signed_cert for mutual tls -Default: false +Default: false ### buffer (*Buffer, optional) {#syslogoutputconfig-buffer} @@ -39,9 +39,9 @@ cert_store to set ca_certificate for ssl context ### fqdn (string, optional) {#syslogoutputconfig-fqdn} -Fqdn +Fqdn -Default: "nil" +Default: "nil" ### host (string, required) {#syslogoutputconfig-host} @@ -50,21 +50,21 @@ Destination host address ### insecure (*bool, optional) {#syslogoutputconfig-insecure} -skip ssl validation +skip ssl validation -Default: false +Default: false ### port (int, optional) {#syslogoutputconfig-port} -Destination host port +Destination host port -Default: "514" +Default: "514" ### private_key_passphrase (*secret.Secret, optional) {#syslogoutputconfig-private_key_passphrase} -PrivateKeyPassphrase for private key +PrivateKeyPassphrase for private key -Default: "nil" +Default: "nil" ### private_key_path (*secret.Secret, optional) {#syslogoutputconfig-private_key_path} @@ -78,9 +78,9 @@ The threshold for chunk flush performance check. Parameter type is float, not ti ### transport (string, optional) {#syslogoutputconfig-transport} -Transport Protocol +Transport Protocol -Default: "tls" +Default: "tls" ### trusted_ca_path (*secret.Secret, optional) {#syslogoutputconfig-trusted_ca_path} @@ -89,19 +89,20 @@ file path to ca to trust ### verify_fqdn (*bool, optional) {#syslogoutputconfig-verify_fqdn} -verify_fqdn +verify_fqdn -Default: nil +Default: nil ### version (string, optional) {#syslogoutputconfig-version} -TLS Version +TLS Version + +Default: "TLSv1_2" -Default: "TLSv1_2" -## Example `File` output configurations +## Example `File` output configurations {{< highlight yaml >}} apiVersion: logging.banzaicloud.io/v1beta1 @@ -124,16 +125,16 @@ spec: Fluentd config result: {{< highlight xml >}} - + @type syslog_rfc5424 @id test_syslog host SYSLOG-HOST port 123 - - @type syslog_rfc5424 - app_name_field example.custom_field_1 - proc_id_field example.custom_field_2 - + + @type syslog_rfc5424 + app_name_field example.custom_field_1 + proc_id_field example.custom_field_2 + @type file path /buffers/test_file.*.buffer @@ -142,7 +143,7 @@ Fluentd config result: timekey_use_utc true timekey_wait 30s - + {{}} diff --git a/content/docs/configuration/plugins/outputs/vmware_log_intelligence.md b/content/docs/configuration/plugins/outputs/vmware_log_intelligence.md new file mode 100644 index 000000000..851297bc2 --- /dev/null +++ b/content/docs/configuration/plugins/outputs/vmware_log_intelligence.md @@ -0,0 +1,116 @@ +--- +title: VMware Log Intelligence +weight: 200 +generated_file: true +--- + +## Overview + +VMware Log Intelligence output plugin for Fluentd. For details, see [https://github.com/vmware/fluent-plugin-vmware-log-intelligence](https://github.com/vmware/fluent-plugin-vmware-log-intelligence). + +## Example output configurations + +```yaml +spec: + vmwarelogintelligence: + endpoint_url: https://data.upgrade.symphony-dev.com/le-mans/v1/streams/ingestion-pipeline-stream + verify_ssl: true + http_compress: false + headers: + content_type: "application/json" + authorization: + valueFrom: + secretKeyRef: + name: vmware-log-intelligence-token + key: authorization + structure: simple + buffer: + chunk_limit_records: 300 + flush_interval: 3s + retry_max_times: 3 +``` + + +## Configuration +## VMwareLogIntelligence + +### buffer (*Buffer, optional) {#vmwarelogintelligence-buffer} + +[Buffer](../buffer/) + + +### endpoint_url (string, required) {#vmwarelogintelligence-endpoint_url} + +Log Intelligence endpoint to send logs to https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-endpoint_url + + +### format (*Format, optional) {#vmwarelogintelligence-format} + +[Format](../format/) + + +### http_compress (*bool, optional) {#vmwarelogintelligence-http_compress} + +Compress http request https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-http_compress + + +### headers (LogIntelligenceHeaders, required) {#vmwarelogintelligence-headers} + +Required headers for sending logs to VMware Log Intelligence https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-3Cheaders-3E + + +### verify_ssl (*bool, required) {#vmwarelogintelligence-verify_ssl} + +Verify SSL (default: true) https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-verify_ssl + +Default: true + + +## VMwareLogIntelligenceHeaders + +headers +https://github.com/vmware/fluent-plugin-vmware-log-intelligence?tab=readme-ov-file#label-3Cheaders-3E + +### authorization (*secret.Secret, required) {#vmwarelogintelligenceheaders-authorization} + +Authorization Bearer token for http request to VMware Log Intelligence [Secret](../secret/) + + +### content_type (string, required) {#vmwarelogintelligenceheaders-content_type} + +Content Type for http request to VMware Log Intelligence + +Default: application/json + +### structure (string, required) {#vmwarelogintelligenceheaders-structure} + +Structure for http request to VMware Log Intelligence + +Default: simple + + +## LogIntelligenceHeadersOut + +LogIntelligenceHeadersOut is used to convert the input LogIntelligenceHeaders to a fluentd +output that uses the correct key names for the VMware Log Intelligence plugin. This allows the +Ouput to accept the config is snake_case (as other output plugins do) but output the fluentd + config with the proper key names (ie. content_type -> Content-Type) + +### Authorization (*secret.Secret, required) {#logintelligenceheadersout-authorization} + +Authorization Bearer token for http request to VMware Log Intelligence + + +### Content-Type (string, required) {#logintelligenceheadersout-content-type} + +Content Type for http request to VMware Log Intelligence + +Default: application/json + +### structure (string, required) {#logintelligenceheadersout-structure} + +Structure for http request to VMware Log Intelligence + +Default: simple + + diff --git a/content/docs/configuration/plugins/outputs/vmware_loginsight.md b/content/docs/configuration/plugins/outputs/vmware_loginsight.md new file mode 100644 index 000000000..b7abfe92a --- /dev/null +++ b/content/docs/configuration/plugins/outputs/vmware_loginsight.md @@ -0,0 +1,174 @@ +--- +title: VMware LogInsight +weight: 200 +generated_file: true +--- + +## Overview + +VMware LogInsight output plugin for Fluentd. For details, see [https://github.com/vmware/fluent-plugin-vmware-loginsight](https://github.com/vmware/fluent-plugin-vmware-loginsight). + +## Example output configurations + +```yaml +spec: + vmwareLogInsight: + scheme: https + ssl_verify: true + host: MY_LOGINSIGHT_HOST + port: 9543 + agent_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + log_text_keys: + - log + - msg + - message + http_conn_debug: false +``` + + +## Configuration +## VMwareLogInsight + +Send your logs to VMware LogInsight + +### agent_id (string, optional) {#vmwareloginsight-agent_id} + +agent_id generated by your LI + +Default: 0 + +### authentication (*string, optional) {#vmwareloginsight-authentication} + +Type of authentication to use (nil,basic) + +Default: nil + +### buffer (*Buffer, optional) {#vmwareloginsight-buffer} + +[Buffer](../buffer/) + + +### ca_file (*secret.Secret, optional) {#vmwareloginsight-ca_file} + +[Secret](../secret/) + + +### config_param (map[string]string, optional) {#vmwareloginsight-config_param} + +Rename fields names + +Default: {"source" => "log_source"} + +### flatten_hashes (*bool, optional) {#vmwareloginsight-flatten_hashes} + +Flatten hashes to create one key/val pair w/o losing log data + +Default: true + +### flatten_hashes_separator (string, optional) {#vmwareloginsight-flatten_hashes_separator} + +Separator to use for joining flattened keys + +Default: _ + +### http_conn_debug (bool, optional) {#vmwareloginsight-http_conn_debug} + +If set, enables debug logs for http connection + +Default: false + +### http_method (string, optional) {#vmwareloginsight-http_method} + +HTTP method (post) + +Default: post + +### host (string, optional) {#vmwareloginsight-host} + +VMware Aria Operations For Logs Host ex. localhost + + +### log_text_keys ([]string, optional) {#vmwareloginsight-log_text_keys} + +Keys from log event whose values should be added as log message/text to VMware Aria Operations For Logs. These key/value pairs won't be expanded/flattened and won't be added as metadata/fields. + +Default: ["log", "message", "msg"] + +### max_batch_size (int, optional) {#vmwareloginsight-max_batch_size} + +Number of bytes per post request + +Default: 4000000 + +### password (*secret.Secret, optional) {#vmwareloginsight-password} + +[Secret](../secret/) + + +### path (string, optional) {#vmwareloginsight-path} + +VMware Aria Operations For Logs ingestion api path ex. 'api/v1/events/ingest' + +Default: api/v1/events/ingest + +### port (int, optional) {#vmwareloginsight-port} + +VMware Aria Operations For Logs port ex. 9000 + +Default: 80 + +### raise_on_error (bool, optional) {#vmwareloginsight-raise_on_error} + +Raise errors that were rescued during HTTP requests? + +Default: false + +### rate_limit_msec (int, optional) {#vmwareloginsight-rate_limit_msec} + +Simple rate limiting: ignore any records within `rate_limit_msec` since the last one + +Default: 0 + +### request_retries (int, optional) {#vmwareloginsight-request_retries} + +Number of retries + +Default: 3 + +### request_timeout (int, optional) {#vmwareloginsight-request_timeout} + +http connection ttl for each request + +Default: 5 + +### ssl_verify (*bool, optional) {#vmwareloginsight-ssl_verify} + +SSL verification flag + +Default: true + +### scheme (string, optional) {#vmwareloginsight-scheme} + +HTTP scheme (http,https) + +Default: http + +### serializer (string, optional) {#vmwareloginsight-serializer} + +Serialization (json) + +Default: json + +### shorten_keys (map[string]string, optional) {#vmwareloginsight-shorten_keys} + +Keys from log event to rewrite for instance from 'kubernetes_namespace' to 'k8s_namespace' tags will be rewritten with substring substitution and applied in the order present in the hash. Hashes enumerate their values in the order that the corresponding keys were inserted, see: https://ruby-doc.org/core-2.2.2/Hash.html + +Default: { 'kubernetes_':'k8s_', 'namespace':'ns', 'labels_':'', '_name':'', '_hash':'', 'container_':'' } + + +### username (*secret.Secret, optional) {#vmwareloginsight-username} + +[Secret](../secret/) + + + diff --git a/content/docs/configuration/plugins/syslog-ng-filters/match.md b/content/docs/configuration/plugins/syslog-ng-filters/match.md index e80761281..ca5de09ef 100644 --- a/content/docs/configuration/plugins/syslog-ng-filters/match.md +++ b/content/docs/configuration/plugins/syslog-ng-filters/match.md @@ -7,17 +7,17 @@ generated_file: true Match filters can be used to select the log records to process. These filters have the same options and syntax as [syslog-ng flow match expressions]({{< relref "/docs/configuration/plugins/syslog-ng-filters/match.md" >}}). {{< highlight yaml >}} - filters: - - match: - or: - - regexp: - value: json.kubernetes.labels.app.kubernetes.io/name - pattern: apache - type: string - - regexp: - value: json.kubernetes.labels.app.kubernetes.io/name - pattern: nginx - type: string +filters: +- match: + or: + - regexp: + value: json.kubernetes.labels.app.kubernetes.io/name + pattern: apache + type: string + - regexp: + value: json.kubernetes.labels.app.kubernetes.io/name + pattern: nginx + type: string {{}} @@ -72,6 +72,7 @@ Specify a field name of the record to match against the value of. + ## Example `Regexp` filter configurations ```yaml diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/auth.md b/content/docs/configuration/plugins/syslog-ng-outputs/auth.md index d84a2d05c..f68632a7a 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/auth.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/auth.md @@ -10,7 +10,7 @@ GRPC-based outputs use this configuration instead of the simple `tls` field foun ## Configuration ## Auth -Authentication settings. Only one authentication method can be set. Default: insecure +Authentication settings. Only one authentication method can be set. Default: Insecure ### adc (*ADC, optional) {#auth-adc} diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/elasticsearch.md b/content/docs/configuration/plugins/syslog-ng-outputs/elasticsearch.md index dadd4236c..476697c6b 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/elasticsearch.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/elasticsearch.md @@ -47,7 +47,7 @@ Name of the data stream, index, or index alias to perform the action on. ### logstash_prefix (string, optional) {#elasticsearchoutput-logstash_prefix} -Set the prefix for logs in logstash format. If set, then Index field will be ignored. +Set the prefix for logs in logstash format. If set, then the Index field will be ignored. ### logstash_prefix_separator (string, optional) {#elasticsearchoutput-logstash_prefix_separator} @@ -63,5 +63,14 @@ Default: `${YEAR}.${MONTH}.${DAY}`### type (*string, optional) {#elasticsearchou The document type associated with the operation. Elasticsearch indices now support a single document type: `_doc` +### template (string, optional) {#elasticsearchoutput-template} + +The template to format the record itself inside the payload body + + +### type (*string, optional) {#elasticsearchoutput-type} + +The document type associated with the operation. Elasticsearch indices now support a single document type: `_doc` + diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/file.md b/content/docs/configuration/plugins/syslog-ng-outputs/file.md index 9ee952462..2d37a1520 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/file.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/file.md @@ -7,10 +7,10 @@ generated_file: true The `file` output stores log records in a plain text file. {{< highlight yaml >}} - spec: - file: - path: /mnt/archive/logs/${YEAR}/${MONTH}/${DAY}/app.log - create_dirs: true +spec: + file: + path: /mnt/archive/logs/${YEAR}/${MONTH}/${DAY}/app.log + create_dirs: true {{}} For details on the available options of the output, see the [documentation of the AxoSyslog syslog-ng distribution](https://axoflow.com/docs/axosyslog-core/chapter-destinations/configuring-destinations-file/). @@ -23,33 +23,33 @@ For available macros like `${YEAR}/${MONTH}/${DAY}` see the [documentation of th ### create_dirs (bool, optional) {#fileoutput-create_dirs} -Enable creating non-existing directories. +Enable creating non-existing directories. -Default: false +Default: false ### dir_group (string, optional) {#fileoutput-dir_group} The group of the directories created by syslog-ng. To preserve the original properties of an existing directory, use the option without specifying an attribute: `dir-group()`. -Default: Use the global settings +Default: Use the global settings ### dir_owner (string, optional) {#fileoutput-dir_owner} The owner of the directories created by syslog-ng. To preserve the original properties of an existing directory, use the option without specifying an attribute: `dir-owner()`. -Default: Use the global settings +Default: Use the global settings ### dir_perm (int, optional) {#fileoutput-dir_perm} The permission mask of directories created by syslog-ng. Log directories are only created if a file after macro expansion refers to a non-existing directory, and directory creation is enabled (see also the `create-dirs()` option). For octal numbers prefix the number with 0, for example, use `0755` for `rwxr-xr-x`. -Default: Use the global settings +Default: Use the global settings ### disk_buffer (*DiskBuffer, optional) {#fileoutput-disk_buffer} -This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). +This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). -Default: false +Default: false ### path (string, required) {#fileoutput-path} diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/http.md b/content/docs/configuration/plugins/syslog-ng-outputs/http.md index 85f606bcf..e5d87dc1c 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/http.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/http.md @@ -61,28 +61,6 @@ For details, see the [documentation of the AxoSyslog syslog-ng distribution](htt ## Configuration -### headers ([]string, optional) {#httpoutput-headers} - -Custom HTTP headers to include in the request, for example, headers("HEADER1: header1", "HEADER2: header2"). - -Default: empty - -### time_reopen (int, optional) {#httpoutput-time_reopen} - -The time to wait in seconds before a dead connection is reestablished. - -Default: 60 - -### tls (*TLS, optional) {#httpoutput-tls} - -This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see [TLS for syslog-ng outputs](../tls/) and the [documentation of the AxoSyslog syslog-ng distribution](https://axoflow.com/docs/axosyslog-core/chapter-encrypted-transport-tls/tlsoptions/). - - -### disk_buffer (*DiskBuffer, optional) {#httpoutput-disk_buffer} - -This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). - -Default: false ### (Batch, required) {#httpoutput-} @@ -109,6 +87,18 @@ The string syslog-ng OSE puts to the end of the body of the HTTP request, after By default, syslog-ng OSE separates the log messages of the batch with a newline character. +### disk_buffer (*DiskBuffer, optional) {#httpoutput-disk_buffer} + +This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). + +Default: false + +### headers ([]string, optional) {#httpoutput-headers} + +Custom HTTP headers to include in the request, for example, `headers("HEADER1: header1", "HEADER2: header2")`. + +Default: empty + ### log-fifo-size (int, optional) {#httpoutput-log-fifo-size} The number of messages that the output queue can store. @@ -139,6 +129,8 @@ Specifies what syslog-ng does with the log message, based on the response code r The number of times syslog-ng OSE attempts to send a message to this destination. If syslog-ng OSE could not send a message, it will try again until the number of attempts reaches `retries`, then drops the message. + + ### time_reopen (int, optional) {#httpoutput-time_reopen} The time to wait in seconds before a dead connection is reestablished. @@ -157,7 +149,7 @@ This option sets various options related to TLS encryption, for example, key/cer ### url (string, optional) {#httpoutput-url} -Specifies the hostname or IP address and optionally the port number of the web service that can receive log data via HTTP. Use a colon (:) after the address to specify the port number of the server. For example: `http://127.0.0.1:8000` +Specifies the hostname or IP address and optionally the port number of the web service that can receive log data via HTTP. Use a colon (:) after the address to specify the port number of the server. For example: `http://127.0.0.1:8000` ### user (string, optional) {#httpoutput-user} diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/logscale.md b/content/docs/configuration/plugins/syslog-ng-outputs/logscale.md index 49897b509..78601f44a 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/logscale.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/logscale.md @@ -34,7 +34,7 @@ spec: ### attributes (string, optional) {#logscaleoutput-attributes} -A JSON object representing key-value pairs for the Event. These key-value pairs adds structure to Events, making it easier to search. Attributes can be nested JSON objects, however, we recommend limiting the amount of nesting. +A JSON object representing key-value pairs for the Event. These key-value pairs adds structure to Events, making it easier to search. Attributes can be nested JSON objects, however, we recommend limiting the amount of nesting. Default: `"--scope rfc5424 --exclude MESSAGE --exclude DATE --leave-initial-dot"` @@ -52,30 +52,30 @@ Default: `"--scope rfc5424 --exclude MESSAGE --exclude DATE --leave-initial-dot" ### content_type (string, optional) {#logscaleoutput-content_type} -This field specifies the content type of the log records being sent to Falcon's LogScale. +This field specifies the content type of the log records being sent to Falcon's LogScale. Default: `"application/json"` ### disk_buffer (*DiskBuffer, optional) {#logscaleoutput-disk_buffer} -This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). +This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). -Default: false +Default: false ### extra_headers (string, optional) {#logscaleoutput-extra_headers} -This field represents additional headers that can be included in the HTTP request when sending log records to Falcon's LogScale. +This field represents additional headers that can be included in the HTTP request when sending log records to Falcon's LogScale. -Default: empty +Default: empty ### persist_name (string, optional) {#logscaleoutput-persist_name} ### rawstring (string, optional) {#logscaleoutput-rawstring} -The raw string representing the Event. The default display for an Event in LogScale is the rawstring. If you do not provide the rawstring field, then the response defaults to a JSON representation of the attributes field. +The raw string representing the Event. The default display for an Event in LogScale is the rawstring. If you do not provide the rawstring field, then the response defaults to a JSON representation of the attributes field. -Default: empty +Default: empty ### timezone (string, optional) {#logscaleoutput-timezone} @@ -86,11 +86,11 @@ The timezone is only required if you specify the timestamp in milliseconds. The An [Ingest Token](https://library.humio.com/data-analysis/ingesting-data-tokens.html) is a unique string that identifies a repository and allows you to send data to that repository. -Default: empty +Default: empty ### url (*secret.Secret, optional) {#logscaleoutput-url} -Ingester URL is the URL of the Humio cluster you want to send data to. +Ingester URL is the URL of the Humio cluster you want to send data to. Default: `https://cloud.humio.com` diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/loki.md b/content/docs/configuration/plugins/syslog-ng-outputs/loki.md index b16f4bf80..f1694f5b1 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/loki.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/loki.md @@ -29,7 +29,7 @@ spec: template: "$ISODATE $HOST $MSGHDR$MSG" auth: insecure: {} -{{< /highlight >}} +{{}} For details on the available options of the output, see the [documentation of the AxoSyslog syslog-ng distribution](https://axoflow.com/docs/axosyslog-core/chapter-destinations/destination-loki/). For available macros like `$PROGRAM` and `$HOST` see https://axoflow.com/docs/axosyslog-core/chapter-manipulating-messages/customizing-message-format/reference-macros/ @@ -53,9 +53,9 @@ Description: Specifies the time syslog-ng OSE waits for lines to accumulate in t ### disk_buffer (*DiskBuffer, optional) {#lokioutput-disk_buffer} -This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). +This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). -Default: false +Default: false ### labels (filter.ArrowMap, optional) {#lokioutput-labels} @@ -84,9 +84,9 @@ Template for customizing the log message format. ### time_reopen (int, optional) {#lokioutput-time_reopen} -The time to wait in seconds before a dead connection is reestablished. +The time to wait in seconds before a dead connection is reestablished. -Default: 60 +Default: 60 ### timestamp (string, optional) {#lokioutput-timestamp} diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/mongodb.md b/content/docs/configuration/plugins/syslog-ng-outputs/mongodb.md index 0319ed45b..9f8fd6a72 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/mongodb.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/mongodb.md @@ -21,7 +21,7 @@ spec: collection: syslog uri: "mongodb://mongodb-endpoint/syslog?wtimeoutMS=60000&socketTimeoutMS=60000&connectTimeoutMS=60000" value_pairs: scope("selected-macros" "nv-pairs") - {{}} +{{}} For more information, see the [documentation of the AxoSyslog syslog-ng distribution](https://axoflow.com/docs/axosyslog-core/chapter-destinations/configuring-destinations-mongodb/). @@ -50,9 +50,9 @@ Defines the folder where the disk-buffer files are stored. ### disk_buffer (*DiskBuffer, optional) {#mongodb-disk_buffer} -This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). +This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). -Default: false +Default: false ### log-fifo-size (int, optional) {#mongodb-log-fifo-size} @@ -71,9 +71,9 @@ The number of times syslog-ng OSE attempts to send a message to this destination ### time_reopen (int, optional) {#mongodb-time_reopen} -The time to wait in seconds before a dead connection is reestablished. +The time to wait in seconds before a dead connection is reestablished. -Default: 60 +Default: 60 ### uri (*secret.Secret, optional) {#mongodb-uri} @@ -83,7 +83,7 @@ Default: `mongodb://127.0.0.1:27017/syslog?wtimeoutMS=60000&socketTimeoutMS=600 ### value_pairs (ValuePairs, optional) {#mongodb-value_pairs} -Creates structured name-value pairs from the data and metadata of the log message. +Creates structured name-value pairs from the data and metadata of the log message. Default: `"scope("selected-macros" "nv-pairs")"` @@ -99,21 +99,21 @@ Bulk operation related options. For details, see the [documentation of the AxoSy ### bulk (*bool, optional) {#bulk-bulk} -Enables bulk insert mode. If disabled, each messages is inserted individually. +Enables bulk insert mode. If disabled, each messages is inserted individually. -Default: yes +Default: yes ### bulk_bypass_validation (*bool, optional) {#bulk-bulk_bypass_validation} -If set to yes, it disables MongoDB bulk operations validation mode. +If set to yes, it disables MongoDB bulk operations validation mode. -Default: no +Default: no ### bulk_unordered (*bool, optional) {#bulk-bulk_unordered} -Description: Enables unordered bulk operations mode. +Description: Enables unordered bulk operations mode. -Default: no +Default: no ## ValuePairs diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/openobserve.md b/content/docs/configuration/plugins/syslog-ng-outputs/openobserve.md index 18df6a3d7..8a9a2a8ca 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/openobserve.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/openobserve.md @@ -21,7 +21,7 @@ metadata: spec: openobserve: url: "https://some-openobserve-endpoint" - port: 5040 + port: 5080 organization: "default" stream: "default" user: "username" @@ -43,9 +43,15 @@ For details on the available options of the output, see the [documentation of th ### organization (string, optional) {#openobserveoutput-organization} -Name of the organization in Openobserve. +Name of the organization in OpenObserve. +### port (int, optional) {#openobserveoutput-port} + +The port number of the OpenObserve server. Specify it here instead of appending it to the URL. + +Default: 5080 + ### record (string, optional) {#openobserveoutput-record} Arguments to the `$format-json()` template function. Default: `"--scope rfc5424 --exclude DATE --key ISODATE @timestamp=${ISODATE}"` @@ -53,7 +59,7 @@ Arguments to the `$format-json()` template function. Default: `"--scope rfc5424 ### stream (string, optional) {#openobserveoutput-stream} -Name of the stream in Openobserve. +Name of the stream in OpenObserve. diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/redis.md b/content/docs/configuration/plugins/syslog-ng-outputs/redis.md index ebbcf3ac0..50c72b5fc 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/redis.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/redis.md @@ -41,27 +41,27 @@ Batching parameters The password used for authentication on a password-protected Redis server. -### command_and_arguments ([]string, optional) {#redisoutput-command_and_arguments} +### command (StringList, optional) {#redisoutput-command} -The Redis command to execute, for example, LPUSH, INCR, or HINCRBY. Using the HINCRBY command with an increment value of 1 allows you to create various statistics. For example, the `command("HINCRBY" "${HOST}/programs" "${PROGRAM}" "1")` command counts the number of log messages on each host for each program. +Internal rendered form of the CommandAndArguments field -Default: "" +### command_and_arguments ([]string, optional) {#redisoutput-command_and_arguments} -### command (StringList, optional) {#redisoutput-command} +The Redis command to execute, for example, LPUSH, INCR, or HINCRBY. Using the HINCRBY command with an increment value of 1 allows you to create various statistics. For example, the `command("HINCRBY" "${HOST}/programs" "${PROGRAM}" "1")` command counts the number of log messages on each host for each program. -Internal rendered form of the CommandAndArguments field +Default: "" ### disk_buffer (*DiskBuffer, optional) {#redisoutput-disk_buffer} -This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [syslog-ng DiskBuffer options](../disk_buffer/). +This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). -Default: false +Default: false ### host (string, optional) {#redisoutput-host} -The hostname or IP address of the Redis server. +The hostname or IP address of the Redis server. -Default: 127.0.0.1 +Default: 127.0.0.1 ### log-fifo-size (int, optional) {#redisoutput-log-fifo-size} @@ -75,33 +75,33 @@ Persistname ### port (int, optional) {#redisoutput-port} -The port number of the Redis server. +The port number of the Redis server. -Default: 6379 +Default: 6379 ### retries (int, optional) {#redisoutput-retries} If syslog-ng OSE cannot send a message, it will try again until the number of attempts reaches `retries()`. -Default: 3 +Default: 3 ### throttle (int, optional) {#redisoutput-throttle} -Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited. +Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited. -Default: 0 +Default: 0 ### time-reopen (int, optional) {#redisoutput-time-reopen} -The time to wait in seconds before a dead connection is reestablished. +The time to wait in seconds before a dead connection is reestablished. -Default: 60 +Default: 60 ### workers (int, optional) {#redisoutput-workers} -Specifies the number of worker threads (at least 1) that syslog-ng OSE uses to send messages to the server. Increasing the number of worker threads can drastically improve the performance of the destination. +Specifies the number of worker threads (at least 1) that syslog-ng OSE uses to send messages to the server. Increasing the number of worker threads can drastically improve the performance of the destination. -Default: 1 +Default: 1 ## StringList diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/s3.md b/content/docs/configuration/plugins/syslog-ng-outputs/s3.md index 64922904f..8085cb35a 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/s3.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/s3.md @@ -52,27 +52,27 @@ Set the canned_acl option. ### chunk_size (int, optional) {#s3output-chunk_size} -Set the chunk size. +Set the chunk size. -Default: 5MiB +Default: 5MiB ### compresslevel (int, optional) {#s3output-compresslevel} -Set the compression level (1-9). +Set the compression level (1-9). -Default: 9 +Default: 9 ### compression (*bool, optional) {#s3output-compression} -Enable or disable compression. +Enable or disable compression. -Default: false +Default: false ### flush_grace_period (int, optional) {#s3output-flush_grace_period} -Set the number of seconds for flush period. +Set the number of seconds for flush period. -Default: 60 +Default: 60 ### log-fifo-size (int, optional) {#s3output-log-fifo-size} @@ -81,15 +81,15 @@ The number of messages that the output queue can store. ### max_object_size (int, optional) {#s3output-max_object_size} -Set the maximum object size size. +Set the maximum object size size. -Default: 5120GiB +Default: 5120GiB ### max_pending_uploads (int, optional) {#s3output-max_pending_uploads} -Set the maximum number of pending uploads. +Set the maximum number of pending uploads. -Default: 32 +Default: 32 ### object_key (string, optional) {#s3output-object_key} @@ -133,15 +133,15 @@ Template ### throttle (int, optional) {#s3output-throttle} -Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited. +Sets the maximum number of messages sent to the destination per second. Use this output-rate-limiting functionality only when using disk-buffer as well to avoid the risk of losing messages. Specifying 0 or a lower value sets the output limit to unlimited. -Default: 0 +Default: 0 ### upload_threads (int, optional) {#s3output-upload_threads} -Set the number of upload threads. +Set the number of upload threads. -Default: 8 +Default: 8 ### url (string, optional) {#s3output-url} diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/sumologic_http.md b/content/docs/configuration/plugins/syslog-ng-outputs/sumologic_http.md index 75220c80d..221c3f0ba 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/sumologic_http.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/sumologic_http.md @@ -64,27 +64,27 @@ For details on the available options of the output, see the [documentation of th ### collector (*secret.Secret, optional) {#sumologichttpoutput-collector} -The Cloud Syslog Cloud Token that you received from the Sumo Logic service while configuring your cloud syslog source. +The Cloud Syslog Cloud Token that you received from the Sumo Logic service while configuring your cloud syslog source. -Default: empty +Default: empty ### deployment (string, optional) {#sumologichttpoutput-deployment} -This option specifies your Sumo Logic deployment.https://help.sumologic.com/APIs/General-API-Information/Sumo-Logic-Endpoints-by-Deployment-and-Firewall-Security +This option specifies your [Sumo Logic deployment](https://help.sumologic.com/APIs/General-API-Information/Sumo-Logic-Endpoints-by-Deployment-and-Firewall-Security). -Default: empty +Default: empty ### disk_buffer (*DiskBuffer, optional) {#sumologichttpoutput-disk_buffer} -This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). +This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). -Default: false +Default: false ### headers ([]string, optional) {#sumologichttpoutput-headers} Custom HTTP headers to include in the request, for example, `headers("HEADER1: header1", "HEADER2: header2")`. -Default: empty +Default: empty ### persist_name (string, optional) {#sumologichttpoutput-persist_name} @@ -92,11 +92,11 @@ Default: empty The time to wait in seconds before a dead connection is reestablished. -Default: 60 +Default: 60 ### tls (*TLS, optional) {#sumologichttpoutput-tls} -This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see [TLS for syslog-ng outputs](../tls/) and the [documentation of the AxoSyslog syslog-ng distribution](https://axoflow.com/docs/axosyslog-core/chapter-encrypted-transport-tls/tlsoptions/). +This option sets various options related to TLS encryption, for example, key/certificate files and trusted CA locations. TLS can be used only with tcp-based transport protocols. For details, see [TLS for syslog-ng outputs](../tls/) and the [documentation of the AxoSyslog syslog-ng distribution](https://axoflow.com/docs/axosyslog-core/chapter-encrypted-transport-tls/tlsoptions/). Default: - diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/sumologic_syslog.md b/content/docs/configuration/plugins/syslog-ng-outputs/sumologic_syslog.md index 455fc635c..ded2257ca 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/sumologic_syslog.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/sumologic_syslog.md @@ -16,30 +16,30 @@ You need a Sumo Logic account to use this output. For details, see the [document ### deployment (string, optional) {#sumologicsyslogoutput-deployment} -This option specifies your Sumo Logic deployment. https://help.sumologic.com/APIs/General-API-Information/Sumo-Logic-Endpoints-by-Deployment-and-Firewall-Security +This option specifies your [Sumo Logic deployment](https://help.sumologic.com/APIs/General-API-Information/Sumo-Logic-Endpoints-by-Deployment-and-Firewall-Security). -Default: empty +Default: empty ### disk_buffer (*DiskBuffer, optional) {#sumologicsyslogoutput-disk_buffer} -This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). +This option enables putting outgoing messages into the disk buffer of the destination to avoid message loss in case of a system failure on the destination side. For details, see the [Syslog-ng DiskBuffer options](../disk_buffer/). -Default: false +Default: false ### persist_name (string, optional) {#sumologicsyslogoutput-persist_name} ### port (int, optional) {#sumologicsyslogoutput-port} -This option sets the port number of the Sumo Logic server to connect to. +This option sets the port number of the Sumo Logic server to connect to. -Default: 6514 +Default: 6514 ### tag (string, optional) {#sumologicsyslogoutput-tag} This option specifies the list of tags to add as the tags fields of Sumo Logic messages. If not specified, syslog-ng OSE automatically adds the tags already assigned to the message. If you set the tag() option, only the tags you specify will be added to the messages. -Default: tag +Default: tag ### token (int, optional) {#sumologicsyslogoutput-token} diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/syslog.md b/content/docs/configuration/plugins/syslog-ng-outputs/syslog.md index 91194a8df..9d1ec3c9f 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/syslog.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/syslog.md @@ -127,7 +127,7 @@ Specifies the number of seconds syslog-ng waits for identical messages. For deta Specifies a template defining the logformat to be used in the destination. For details, see the [documentation of the AxoSyslog syslog-ng distribution](https://axoflow.com/docs/axosyslog-core/chapter-destinations/configuring-destinations-syslog/reference-destination-syslog-chapter/#template). -Default: 0 +Default: 0 ### template_escape (*bool, optional) {#syslogoutput-template_escape} diff --git a/content/docs/configuration/plugins/syslog-ng-outputs/tls.md b/content/docs/configuration/plugins/syslog-ng-outputs/tls.md index aba23ea27..9f44442ac 100644 --- a/content/docs/configuration/plugins/syslog-ng-outputs/tls.md +++ b/content/docs/configuration/plugins/syslog-ng-outputs/tls.md @@ -4,7 +4,8 @@ weight: 200 generated_file: true --- -For details on how TLS configuration works in syslog-ng, see the [AxoSyslog Core documentation](https://axoflow.com/docs/axosyslog-core/chapter-encrypted-transport-tls/tlsoptions/). + + For details on how TLS configuration works in syslog-ng, see the [AxoSyslog Core documentation](https://axoflow.com/docs/axosyslog-core/chapter-encrypted-transport-tls/). ## Configuration diff --git a/content/docs/image-versions.md b/content/docs/image-versions.md index 2594cd1ab..a33da722f 100644 --- a/content/docs/image-versions.md +++ b/content/docs/image-versions.md @@ -5,6 +5,22 @@ weight: 750 Logging operator uses the following image versions. +## Logging operator version 4.6 + +| Image repository | GitHub repository | Version | +| -------- | --- | -- | +| ghcr.io/kube-logging/node-exporter | https://github.com/kube-logging/node-exporter-image | v0.7.1 | +| ghcr.io/kube-logging/config-reloader | https://github.com/kube-logging/config-reloader | v0.0.5 | +| ghcr.io/kube-logging/fluentd-drain-watch | https://github.com/kube-logging/fluentd-drain-watch | v0.2.1 | +| k8s.gcr.io/pause | | 3.2 | +| docker.io/busybox | https://github.com/docker-library/busybox | latest | +| ghcr.io/axoflow/axosyslog | https://github.com/axoflow/axosyslog-docker/ | 4.5.0 | +| docker.io/fluent/fluent-bit | https://github.com/fluent/fluent-bit | 2.1.8 | +| ghcr.io/kube-logging/fluentd | https://github.com/kube-logging/fluentd-images | v1.16-full | +| ghcr.io/axoflow/axosyslog-metrics-exporter | https://github.com/axoflow/axosyslog-metrics-exporter | 0.0.2 | +| ghcr.io/kube-logging/syslogng-reload | https://github.com/kube-logging/syslogng-reload-image | v1.3.1 | +| ghcr.io/kube-logging/eventrouter | https://github.com/kube-logging/eventrouter | 0.4.0 | + ## Logging operator version 4.5 | Image repository | GitHub repository | Version | diff --git a/content/docs/logging-infrastructure/security/_index.md b/content/docs/logging-infrastructure/security/_index.md index 49c60e6bb..3c3b048cd 100644 --- a/content/docs/logging-infrastructure/security/_index.md +++ b/content/docs/logging-infrastructure/security/_index.md @@ -13,7 +13,6 @@ aliases: | Variable Name | Type | Required | Default | Description | |---|---|---|---|---| | roleBasedAccessControlCreate | bool | No | True | create RBAC resources | -| podSecurityPolicyCreate | bool | No | False | create PSP resources | | serviceAccount | string | No | - | Set ServiceAccount | | securityContext | [SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#securitycontext-v1-core) | No | {} | SecurityContext holds security configuration that will be applied to a container. | | podSecurityContext | [PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#podsecuritycontext-v1-core) | No | {} | PodSecurityContext holds pod-level security attributes and common container settings. Some | @@ -151,127 +150,6 @@ spec: EOF ``` -## Enabling Pod Security Policies ([PSP](https://kubernetes.io/docs/concepts/policy/pod-security-policy/)) - -> This option depends on the roleBasedAccessControlCreate enabled status because the psp require rbac roles also. - -### Deploy with Kubernetes Manifests {#psp-deploy-kubernetes-manifests} - -Create `logging` resource with PSP - -```yaml -kubectl -n logging apply -f - <<"EOF" -apiVersion: logging.banzaicloud.io/v1beta1 -kind: Logging -metadata: - name: default-logging-simple -spec: - fluentd: - security: - podSecurityPolicyCreate: true - roleBasedAccessControlCreate: true - fluentbit: - security: - podSecurityPolicyCreate: true - roleBasedAccessControlCreate: true - controlNamespace: logging -EOF -``` - -### Example Manifest Generated by the operator - -#### Fluentd PSP+Role Output - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: nginx-demo-nginx-logging-demo-logging-fluentd-psp -rules: -- apiGroups: - - policy - - extensions - resources: - - podsecuritypolicies - resourceNames: - - nginx-demo-nginx-logging-demo-logging-fluentd - verbs: - - use - ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: nginx-demo-nginx-logging-demo-logging-fluentd -spec: - allowPrivilegeEscalation: false - fsGroup: - ranges: - - max: 101 - min: 101 - rule: MustRunAs - runAsUser: - ranges: - - max: 100 - min: 100 - rule: MustRunAs - seLinux: - rule: RunAsAny - supplementalGroups: - ranges: - - max: 101 - min: 101 - rule: MustRunAs - volumes: - - configMap - - emptyDir - - secret - - hostPath - - persistentVolumeClaim -``` - -#### Fluentbit PSP+ClusterRole Output - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: nginx-demo-nginx-logging-demo-logging-fluentbit-psp -rules: -- apiGroups: - - policy - resources: - - nginx-demo-nginx-logging-demo-logging-fluentbit - verbs: - - use ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: nginx-demo-nginx-logging-demo-logging-fluentbit -spec: - allowPrivilegeEscalation: false - allowedHostPaths: - - pathPrefix: /var/lib/docker/containers - readOnly: true - - pathPrefix: /var/log - readOnly: true - fsGroup: - rule: RunAsAny - readOnlyRootFilesystem: true - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - configMap - - emptyDir - - secret - - hostPath -``` - ## [Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) - [Security Context Parameters](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#securitycontext-v1-core) diff --git a/content/docs/whats-new/_index.md b/content/docs/whats-new/_index.md index d53bd9c56..97ae16d51 100644 --- a/content/docs/whats-new/_index.md +++ b/content/docs/whats-new/_index.md @@ -3,6 +3,132 @@ title: What's new weight: 50 --- +## Version 4.6 + +The following are the highlights and main changes of Logging operator 4.6. For a complete list of changes and bugfixes, see the [Logging operator 4.6 releases page](https://github.com/kube-logging/logging-operator/releases/tag/4.6.0) and the [Logging operator 4.6 release blog post](fluent-bit-hot-reload-kubernetes-namespace-labels-vmware-outputs-logging-operator-4-6). + +## Fluent Bit hot reload + +As a Fluent Bit restart can take a long time when there are many files to index, Logging operator now supports [hot reload for Fluent Bit](https://docs.fluentbit.io/manual/administration/hot-reload) to reload its configuration on the fly. + +You can enable hot reloads under the Logging's `spec.fluentbit.configHotReload` (legacy method) option, or the new FluentbitAgent's `spec.configHotReload` option: + +```yaml +apiVersion: logging.banzaicloud.io/v1beta1 +kind: FluentbitAgent +metadata: + name: reload-example +spec: + configHotReload: {} +``` + +You can configure the `resources` and `image` options: + +```yaml +apiVersion: logging.banzaicloud.io/v1beta1 +kind: FluentbitAgent +metadata: + name: reload-example +spec: + configHotReload: + resources: ... + image: + repository: ghcr.io/kube-logging/config-reloader + tag: v0.0.5 +``` + +Many thanks to @aslafy-z for contributing this feature! + +## VMware Aria Operations output for Fluentd + +When using the Fluentd aggregator with the Logging operator, you can now send your logs to [VMware Aria Operations for Logs](https://www.vmware.com/products/aria-operations-for-logs.html). This output uses the [vmwareLogInsight plugin](https://github.com/vmware/fluent-plugin-vmware-loginsight). + +Here is a sample output snippet: + +```yaml +spec: + vmwareLogInsight: + scheme: https + ssl_verify: true + host: MY_LOGINSIGHT_HOST + port: 9543 + agent_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + log_text_keys: + - log + - msg + - message + http_conn_debug: false +``` + +Many thanks to @logikone for contributing this feature! + +## VMware Log Intelligence output for Fluentd + +When using the Fluentd aggregator with the Logging operator, you can now send your logs to [VMware Log Intelligence](https://aria.vmware.com/t/vmware-log-intelligence/). This output uses the [vmware_log_intelligence plugin](https://github.com/vmware/fluent-plugin-vmware-log-intelligence). + +Here is a sample output snippet: + +```yaml +spec: + vmwarelogintelligence: + endpoint_url: https://data.upgrade.symphony-dev.com/le-mans/v1/streams/ingestion-pipeline-stream + verify_ssl: true + http_compress: false + headers: + content_type: "application/json" + authorization: + valueFrom: + secretKeyRef: + name: vmware-log-intelligence-token + key: authorization + structure: simple + buffer: + chunk_limit_records: 300 + flush_interval: 3s + retry_max_times: 3 +``` + +Many thanks to @zrobisho for contributing this feature! + +## Kubernetes namespace labels and annotations + +Logging operator 4.6 supports the new Fluent Bit Kubernetes filter options that will be released in Fluent Bit 3.0. That way you'll be able to enrich your logs with Kubernetes namespace labels and annotations right at the source of the log messages. + +Fluent Bit 3.0 hasn't been released yet (at the time of this writing), but you can use a developer image to test the feature, using a `FluentbitAgent` resource like this: + +```yaml +apiVersion: logging.banzaicloud.io/v1beta1 +kind: FluentbitAgent +metadata: + name: namespace-label-test +spec: + filterKubernetes: + namespace_annotations: "On" + namespace_labels: "On" + image: + repository: ghcr.io/fluent/fluent-bit/unstable + tag: latest +``` + +## Other changes + +- Enabling ServiceMonitor checks if Prometheus is already available. +- You can now use a custom PVC without a template for the statefulset. +- You can now configure PodDisruptionBudget for Fluentd. +- Event tailer metrics are now automatically exposed. +- You can configure [timeout-based configuration checks](https://kube-logging.dev/docs/whats-new/#timeout-based-configuration-checks) using the `logging.configCheck` object of the `logging-operator` chart. +- You can now specify the event tailer image to use in the `logging-operator` chart. +- Fluent Bit can now automatically delete irrecoverable chunks. +- The Fluentd statefulset and its components created by the Logging operator now include the whole securityContext object. +- The Elasticsearch output of the syslog-ng aggregator now supports the template option. +- To avoid problems that might occur when a tenant has a faulty output and backpressure kicks in, Logging operator now creates a dedicated tail input for each tenant. + +## Removed feature + +We have removed support for [Pod Security Policies (PSPs)](https://kubernetes.io/docs/concepts/security/pod-security-policy/), which were deprecated in Kubernetes v1.21, and removed from Kubernetes in v1.25. + +Note that the API was left intact, it just doesn't do anything. + ## Version 4.5 The following are the highlights and main changes of Logging operator 4.5. For a complete list of changes and bugfixes, see the [Logging operator 4.5 releases page](https://github.com/kube-logging/logging-operator/releases/tag/4.5.0).