From 3ecc55c2a364ffd50d666ab01aa31da4080d3e8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20=C5=9Awi=C4=85tek?= Date: Tue, 22 Nov 2022 14:18:44 +0100 Subject: [PATCH] feat!: refactor logs metadata config --- CHANGELOG.md | 2 + .../sumologic/conf/logs/otelcol/config.yaml | 480 +++++++++++++++++- deploy/helm/sumologic/templates/_helpers.tpl | 30 +- .../templates/logs/otelcol/configmap.yaml | 12 +- deploy/helm/sumologic/values.yaml | 472 +---------------- docs/opentelemetry-collector.md | 18 +- .../metadata_logs_otc/static/basic.input.yaml | 4 - .../static/fluentbit.input.yaml | 10 + ...asic.output.yaml => fluentbit.output.yaml} | 88 +--- .../metadata_logs_otc/static/merge.input.yaml | 18 + .../static/merge.output.yaml | 361 +++++++++++++ .../metadata_logs_otc/static/otel.input.yaml | 10 + .../metadata_logs_otc/static/otel.output.yaml | 361 +++++++++++++ .../static/override.input.yaml | 193 +++++++ .../static/override.output.yaml | 197 +++++++ .../static/systemd.input.yaml | 12 + .../static/systemd.output.yaml | 190 +++++++ .../static/templates.input.yaml | 6 + .../static/templates.output.yaml | 100 +--- 19 files changed, 1956 insertions(+), 608 deletions(-) delete mode 100644 tests/helm/metadata_logs_otc/static/basic.input.yaml create mode 100644 tests/helm/metadata_logs_otc/static/fluentbit.input.yaml rename tests/helm/metadata_logs_otc/static/{basic.output.yaml => fluentbit.output.yaml} (82%) create mode 100644 tests/helm/metadata_logs_otc/static/merge.input.yaml create mode 100644 tests/helm/metadata_logs_otc/static/merge.output.yaml create mode 100644 tests/helm/metadata_logs_otc/static/otel.input.yaml create mode 100644 tests/helm/metadata_logs_otc/static/otel.output.yaml create mode 100644 tests/helm/metadata_logs_otc/static/override.input.yaml create mode 100644 tests/helm/metadata_logs_otc/static/override.output.yaml create mode 100644 tests/helm/metadata_logs_otc/static/systemd.input.yaml create mode 100644 tests/helm/metadata_logs_otc/static/systemd.output.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b88b73895..b8be92ffe1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - feat!: refactor event collection configuration [#2444] - fix(logs): configure fluentbit to send data to metadata-logs [#2610] - feat(logs): Changing the default logs metadata provider to otel [#2621] +- feat!: refactor logs metadata config [#2626] ### Changed @@ -73,6 +74,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 [#2610]: https://github.com/SumoLogic/sumologic-kubernetes-collection/pull/2610 [#2619]: https://github.com/SumoLogic/sumologic-kubernetes-collection/pull/2619 [#2621]: https://github.com/SumoLogic/sumologic-kubernetes-collection/pull/2621 +[#2626]: https://github.com/SumoLogic/sumologic-kubernetes-collection/pull/2626 [Unreleased]: https://github.com/SumoLogic/sumologic-kubernetes-collection/compare/v2.17.0...main [telegraf_operator_comapare_1.3.5_and_1.3.10]: https://github.com/influxdata/helm-charts/compare/telegraf-operator-1.3.5...telegraf-operator-1.3.10 [cert-manager-1.4]: https://github.com/cert-manager/cert-manager/releases/tag/v1.4.0 diff --git a/deploy/helm/sumologic/conf/logs/otelcol/config.yaml b/deploy/helm/sumologic/conf/logs/otelcol/config.yaml index 2fa672d766..6b11944573 100644 --- a/deploy/helm/sumologic/conf/logs/otelcol/config.yaml +++ b/deploy/helm/sumologic/conf/logs/otelcol/config.yaml @@ -1 +1,479 @@ -{{ tpl (toYaml .Values.metadata.logs.config | replace ": '{{" ": {{" | replace "}}'" "}}") . | nindent 2 }} +receivers: +{{ if eq (include "logs.collector.fluentbit.enabled" .) "true" }} + fluentforward: + endpoint: 0.0.0.0:24321 +{{ end }} +{{ if eq (include "logs.collector.otelcol.enabled" .) "true" }} + otlp: + protocols: + http: + endpoint: 0.0.0.0:4318 +{{ end }} +extensions: + health_check: {} +{{ if .Values.metadata.persistence.enabled }} + ## Configuration for File Storage extension + ## ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/release/v0.37.x/extension/storage/filestorage + file_storage: + directory: /var/lib/storage/otc + timeout: 10s + compaction: + on_start: true + on_rebound: true + # Can't be /tmp yet, see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/13449 + directory: /var/lib/storage/otc +{{ end }} + pprof: {} +exporters: +{{ if .Values.sumologic.logs.container.enabled }} + sumologic/containers: + log_format: json + json_logs: + add_timestamp: true + timestamp_key: timestamp + endpoint: ${SUMO_ENDPOINT_DEFAULT_LOGS_SOURCE} + source_name: "%{_sourceName}" + source_category: "%{_sourceCategory}" + source_host: "%{_sourceHost}" + ## Configuration for sending queue + ## ref: https://github.com/open-telemetry/opentelemetry-collector/tree/release/v0.37.x/exporter/exporterhelper#configuration + sending_queue: + enabled: true + persistent_storage_enabled: {{ .Values.metadata.persistence.enabled }} + num_consumers: 10 + queue_size: 10_000 +{{ end }} +{{ if .Values.sumologic.logs.systemd.enabled }} + sumologic/systemd: + log_format: json + json_logs: + add_timestamp: true + timestamp_key: timestamp + ## use flatten_body, but OTLP won't require any flattening + ## fluent based logs will be all send as record attributes + ## otellogs based logs will be all send as body attributes + flatten_body: true + endpoint: ${SUMO_ENDPOINT_DEFAULT_LOGS_SOURCE} + source_name: "%{_sourceName}" + source_category: "%{_sourceCategory}" + source_host: "%{_sourceHost}" + ## Configuration for sending queue + ## ref: https://github.com/open-telemetry/opentelemetry-collector/tree/release/v0.37.x/exporter/exporterhelper#configuration + sending_queue: + enabled: true + persistent_storage_enabled: {{ .Values.metadata.persistence.enabled }} + num_consumers: 10 + queue_size: 10_000 +{{ end }} + +processors: + ## Common processors + attributes/remove_fluent_tag: + actions: + - action: delete + key: fluent.tag + ## The memory_limiter processor is used to prevent out of memory situations on the collector. + memory_limiter: + ## check_interval is the time between measurements of memory usage for the + ## purposes of avoiding going over the limits. Defaults to zero, so no + ## checks will be performed. Values below 1 second are not recommended since + ## it can result in unnecessary CPU consumption. + check_interval: 5s + + ## Maximum amount of memory, in %, targeted to be allocated by the process heap. + limit_percentage: 75 + ## Spike limit (calculated from available memory). Must be less than limit_percentage. + spike_limit_percentage: 20 + + ## The batch processor accepts spans and places them into batches grouped by node and resource + batch: + ## Number of spans after which a batch will be sent regardless of time + send_batch_size: 1_024 + ## Time duration after which a batch will be sent regardless of size + timeout: 1s + resource/add_cluster: + attributes: + - key: cluster + value: {{ .Values.sumologic.clusterName | quote }} + action: upsert + + ## Containers related processors +{{ if .Values.sumologic.logs.container.enabled }} + sumologic_schema: + add_cloud_namespace: false + filter/include_fluent_tag_containers: + logs: + include: + match_type: regexp + record_attributes: + - key: fluent.tag + value: containers\..+ + filter/include_containers: + logs: + include: + match_type: regexp + record_attributes: + - key: k8s.container.name + value: .+ + attributes/containers: + actions: + - action: extract + key: fluent.tag + pattern: ^containers\.var\.log\.containers\.(?P[^_]+)_(?P[^_]+)_(?P.+)-(?P[a-f0-9]{64})\.log$ + - action: insert + key: k8s.container.id + from_attribute: container_id + - action: delete + key: container_id + - action: insert + key: k8s.pod.name + from_attribute: k8s_pod_name + - action: delete + key: k8s_pod_name + - action: insert + key: k8s.namespace.name + from_attribute: k8s_namespace + - action: delete + key: k8s_namespace + - action: insert + key: k8s.container.name + from_attribute: k8s_container_name + - action: delete + key: k8s_container_name + resource/containers_copy_node_to_host: + attributes: + - action: upsert + key: k8s.pod.hostname + from_attribute: k8s.node.name + resource/drop_annotations: + attributes: + - pattern: ^pod_annotations_.* + action: delete + groupbyattrs/containers: + keys: + - k8s.container.id + - k8s.container.name + - k8s.namespace.name + - k8s.pod.name + - _collector + k8s_tagger: + ## Has to be false to enrich metadata + passthrough: false + owner_lookup_enabled: true # To enable fetching additional metadata using `owner` relationship + extract: + metadata: + ## extract the following well-known metadata fields + - containerId + - containerName + - daemonSetName + - deploymentName + - hostName + - namespace + - nodeName + - podId + - podName + - replicaSetName + - serviceName + - statefulSetName + annotations: + - tag_name: "pod_annotations_%s" + key: "*" + namespace_labels: + - tag_name: "namespace_labels_%s" + key: "*" + labels: + - tag_name: "pod_labels_%s" + key: "*" + delimiter: "_" + pod_association: + - from: build_hostname + source/containers: + collector: {{ .Values.sumologic.collectorName | default .Values.sumologic.clusterName | quote }} + source_host: "%{k8s.pod.hostname}" + source_name: "%{k8s.namespace.name}.%{k8s.pod.name}.%{k8s.container.name}" + source_category: "%{k8s.namespace.name}/%{k8s.pod.pod_name}" + source_category_prefix: {{ .Values.fluentd.logs.containers.sourceCategoryPrefix | quote }} + source_category_replace_dash: {{ .Values.fluentd.logs.containers.sourceCategoryReplaceDash | quote }} + exclude: + k8s.namespace.name: {{ include "fluentd.excludeNamespaces" . }} + k8s.pod.name: {{ .Values.fluentd.logs.containers.excludePodRegex | quote }} + k8s.container.name: {{ .Values.fluentd.logs.containers.excludeContainerRegex | quote }} + k8s.pod.hostname: {{ .Values.fluentd.logs.containers.excludeHostRegex | quote }} + annotation_prefix: "pod_annotations_" + pod_template_hash_key: "pod_labels_pod-template-hash" + pod_name_key: "k8s.pod.pod_name" + pod_key: "k8s.pod.name" + container_annotations: + enabled: {{ .Values.fluentd.logs.containers.perContainerAnnotationsEnabled }} + prefixes: {{ toJson .Values.fluentd.logs.containers.perContainerAnnotationPrefixes }} +{{ end }} +{{ if .Values.sumologic.logs.systemd.enabled }} + ## Systemd related processors + filter/include_fluent_tag_host: + logs: + include: + match_type: regexp + record_attributes: + - key: fluent.tag + value: host\..+ + attributes/extract_systemd_source_fields: + actions: + - action: extract + key: fluent.tag + pattern: ^host\.(?P<_sourceName>[a-zA-z0-9]+)\..+$ + - action: insert + from_attribute: _HOSTNAME + key: _sourceHost + filter/include_systemd: + logs: + include: + match_type: regexp + record_attributes: + - key: _SYSTEMD_UNIT + value: .+ + filter/exclude_kubelet: + logs: + exclude: + match_type: strict + record_attributes: + - key: _SYSTEMD_UNIT + value: kubelet.service + filter/exclude_systemd_syslog: + logs: + exclude: + match_type: regexp + record_attributes: + - key: SYSLOG_FACILITY + value: {{ .Values.fluentd.logs.systemd.excludeFacilityRegex | default "$^" | quote }} + filter/exclude_systemd_hostname: + logs: + exclude: + match_type: regexp + record_attributes: + - key: _HOSTNAME + value: {{ .Values.fluentd.logs.systemd.excludeHostRegex | default "$^" | quote }} + filter/exclude_systemd_priority: + logs: + exclude: + match_type: regexp + record_attributes: + - key: PRIORITY + value: {{ .Values.fluentd.logs.systemd.excludePriorityRegex | default "$^" | quote }} + filter/exclude_systemd_unit: + logs: + exclude: + match_type: regexp + record_attributes: + - key: _SYSTEMD_UNIT + value: {{ .Values.fluentd.logs.systemd.excludeUnitRegex | default "$^" | quote }} + filter/exclude_kubelet_syslog: + logs: + exclude: + match_type: regexp + record_attributes: + - key: SYSLOG_FACILITY + value: {{ .Values.fluentd.logs.kubelet.excludeFacilityRegex | default "$^" | quote }} + filter/exclude_kubelet_hostname: + logs: + exclude: + match_type: regexp + record_attributes: + - key: _HOSTNAME + value: {{ .Values.fluentd.logs.kubelet.excludeHostRegex | default "$^" | quote }} + filter/exclude_kubelet_priority: + logs: + exclude: + match_type: regexp + record_attributes: + - key: PRIORITY + value: {{ .Values.fluentd.logs.kubelet.excludePriorityRegex | default "$^" | quote }} + filter/exclude_kubelet_unit: + logs: + exclude: + match_type: regexp + record_attributes: + - key: _SYSTEMD_UNIT + value: {{ .Values.fluentd.logs.kubelet.excludeUnitRegex | default "$^" | quote }} + + groupbyattrs/systemd: + keys: + - _sourceName + - _sourceHost + - _collector + source/systemd: + collector: {{ .Values.sumologic.collectorName | default .Values.sumologic.clusterName | quote }} + source_host: "%{_sourceHost}" + source_name: "%{_sourceName}" + source_category: {{ .Values.fluentd.logs.systemd.sourceCategory | quote }} + source_category_prefix: {{ .Values.fluentd.logs.systemd.sourceCategoryPrefix | quote }} + source_category_replace_dash: {{ .Values.fluentd.logs.systemd.sourceCategoryReplaceDash | quote }} + ## Remove all attributes, so body won't by nested by SumoLogic receiver in case of using otlp format + transform/remove_attributes: + logs: + queries: + - limit(attributes, 0) + + ## kubelet related processors + filter/include_kubelet: + logs: + include: + match_type: strict + record_attributes: + - key: _SYSTEMD_UNIT + value: kubelet.service + source/kubelet: + collector: {{ .Values.sumologic.collectorName | default .Values.sumologic.clusterName | quote }} + source_host: "%{_sourceHost}" + source_name: {{ .Values.fluentd.logs.kubelet.sourceName | quote }} + source_category: {{ .Values.fluentd.logs.kubelet.sourceCategory | quote }} + source_category_prefix: {{ .Values.fluentd.logs.kubelet.sourceCategoryPrefix | quote }} + source_category_replace_dash: {{ .Values.fluentd.logs.kubelet.sourceCategoryReplaceDash | quote }} +{{ end }} + +service: + telemetry: + logs: + level: {{ .Values.metadata.logs.logLevel }} + extensions: + - health_check +{{ if .Values.metadata.persistence.enabled }} + - file_storage +{{ end }} + - pprof + pipelines: +{{ if eq (include "logs.collector.fluentbit.enabled" .) "true" }} +{{ if .Values.sumologic.logs.container.enabled }} + logs/fluent/containers: + receivers: + - fluentforward + processors: + - memory_limiter + - filter/include_fluent_tag_containers + - attributes/containers + - groupbyattrs/containers + - k8s_tagger + - resource/add_cluster + - source/containers + - resource/drop_annotations + - attributes/remove_fluent_tag + - resource/containers_copy_node_to_host + - sumologic_schema + - batch + exporters: + - sumologic/containers +{{ end }} +{{ if .Values.sumologic.logs.systemd.enabled }} + logs/fluent/systemd: + receivers: + - fluentforward + processors: + - memory_limiter + - filter/include_fluent_tag_host + - filter/include_systemd + - filter/exclude_kubelet + - filter/exclude_systemd_syslog + - filter/exclude_systemd_hostname + - filter/exclude_systemd_priority + - filter/exclude_systemd_unit + - attributes/extract_systemd_source_fields + - attributes/remove_fluent_tag + - groupbyattrs/systemd + - resource/add_cluster + - source/systemd + - batch + exporters: + - sumologic/systemd + logs/fluent/kubelet: + receivers: + - fluentforward + processors: + - memory_limiter + - filter/include_fluent_tag_host + - filter/include_kubelet + - filter/exclude_kubelet_syslog + - filter/exclude_kubelet_hostname + - filter/exclude_kubelet_priority + - filter/exclude_kubelet_unit + - attributes/extract_systemd_source_fields + - attributes/remove_fluent_tag + - groupbyattrs/systemd + - resource/add_cluster + - source/kubelet + - batch + exporters: + - sumologic/systemd +{{ end }} +{{ end }} +{{ if eq (include "logs.collector.otelcol.enabled" .) "true" }} +{{ if .Values.sumologic.logs.container.enabled }} + ## This is the same pipeline like for logs/fluent/containers with the following modifications: + ## - filter/include_fluent_tag_containers and attributes/remove_fluent_tag are being removed + ## as only containers log are being provided to otlp receiver + ## - attributes/containers functionality is being replaced by otellogs operators + logs/otlp/containers: + receivers: + - otlp + processors: + - memory_limiter + - filter/include_containers + - groupbyattrs/containers + - k8s_tagger + - resource/add_cluster + - source/containers + - resource/drop_annotations + - resource/containers_copy_node_to_host + - sumologic_schema + - batch + exporters: + - sumologic/containers +{{ end }} +{{ if .Values.sumologic.logs.systemd.enabled }} + ## This is the same pipeline like logs/fluent/systemd, but with the following changes: + ## - otlp receiver instead of fluentforward + ## - added transform/remove_attributes processor + logs/otlp/systemd: + receivers: + - otlp + processors: + - memory_limiter + - filter/include_fluent_tag_host + - filter/include_systemd + - filter/exclude_kubelet + - filter/exclude_systemd_syslog + - filter/exclude_systemd_hostname + - filter/exclude_systemd_priority + - filter/exclude_systemd_unit + - attributes/extract_systemd_source_fields + - attributes/remove_fluent_tag + - groupbyattrs/systemd + - resource/add_cluster + - source/systemd + - transform/remove_attributes + - batch + exporters: + - sumologic/systemd + ## This is the same pipeline like logs/fluent/kubelet, but with the following changes: + ## - otlp receiver instead of fluentforward + ## - added transform/remove_attributes processor + logs/otlp/kubelet: + receivers: + - otlp + processors: + - memory_limiter + - filter/include_fluent_tag_host + - filter/include_kubelet + - filter/exclude_kubelet_syslog + - filter/exclude_kubelet_hostname + - filter/exclude_kubelet_priority + - filter/exclude_kubelet_unit + - attributes/extract_systemd_source_fields + - attributes/remove_fluent_tag + - groupbyattrs/systemd + - resource/add_cluster + - source/kubelet + - transform/remove_attributes + - batch + exporters: + - sumologic/systemd +{{ end }} +{{ end }} diff --git a/deploy/helm/sumologic/templates/_helpers.tpl b/deploy/helm/sumologic/templates/_helpers.tpl index 001a5898d2..89076cc5cf 100644 --- a/deploy/helm/sumologic/templates/_helpers.tpl +++ b/deploy/helm/sumologic/templates/_helpers.tpl @@ -789,10 +789,10 @@ Generate metrics match configuration Example usage (as one line): -{{ include "utils.metrics.match" (dict - "Values" . - "Tag" "prometheus.metrics.kubelet" - "Endpoint" "SUMO_ENDPOINT_METRICS" +{{ include "utils.metrics.match" (dict + "Values" . + "Tag" "prometheus.metrics.kubelet" + "Endpoint" "SUMO_ENDPOINT_METRICS" "Storage" .Values.fluentd.buffer.filePaths.metrics.default "Id" sumologic.endpoint.metrics )}} @@ -1410,6 +1410,28 @@ Example Usage: {{ $enabled }} {{- end -}} +{{/* +Check if Fluent-Bit logs collector is enabled. +It's enabled if logs in general are enabled and fluent-bit.enabled is set to true. + +Example Usage: +{{- if eq (include "logs.collector.fluentbit.enabled" .) "true" }} + +*/}} +{{- define "logs.collector.fluentbit.enabled" -}} +{{- $fluentBitEnabled := index .Values "fluent-bit" "enabled" -}} +{{- if kindIs "invalid" $fluentBitEnabled -}} +{{- $fluentBitEnabled = true -}} +{{- end -}} +{{- $enabled := and (eq (include "logs.enabled" .) "true") $fluentBitEnabled -}} +{{- $otelLogCollectorEnabled := .Values.sumologic.logs.collector.otelcol.enabled -}} +{{- $sideBySideAllowed := .Values.sumologic.logs.collector.allowSideBySide -}} +{{- if and $enabled $otelLogCollectorEnabled (not $sideBySideAllowed) -}} +{{- fail "Fluent-Bit and Otel log collector can't be enabled at the same time. Set either `fluent-bit.enabled` or `sumologic.logs.collector.otelcol.enabled` to false" -}} +{{- end -}} +{{ $enabled }} +{{- end -}} + {{/* Check if any events provider is enabled Example Usage: diff --git a/deploy/helm/sumologic/templates/logs/otelcol/configmap.yaml b/deploy/helm/sumologic/templates/logs/otelcol/configmap.yaml index 83f4ad8c1c..29713076eb 100644 --- a/deploy/helm/sumologic/templates/logs/otelcol/configmap.yaml +++ b/deploy/helm/sumologic/templates/logs/otelcol/configmap.yaml @@ -1,4 +1,13 @@ {{- if eq (include "logs.otelcol.enabled" .) "true" }} +{{ $baseConfig := (tpl (.Files.Get "conf/logs/otelcol/config.yaml") .) | fromYaml }} +{{ $mergeConfig := .Values.metadata.logs.config.merge }} +{{ $overrideConfig := .Values.metadata.logs.config.override }} +{{ $finalConfig := "" }} +{{ if $overrideConfig }} +{{ $finalConfig = $overrideConfig }} +{{ else }} +{{ $finalConfig = mergeOverwrite $baseConfig $mergeConfig }} +{{ end }} apiVersion: v1 kind: ConfigMap metadata: @@ -7,5 +16,6 @@ metadata: app: {{ template "sumologic.labels.app.logs.configmap" . }} {{- include "sumologic.labels.common" . | nindent 4 }} data: - {{- (tpl (.Files.Glob "conf/logs/otelcol/config.yaml").AsConfig .) | nindent 2 }} + config.yaml: | + {{- $finalConfig | toYaml | nindent 4 }} {{- end }} diff --git a/deploy/helm/sumologic/values.yaml b/deploy/helm/sumologic/values.yaml index 802c232711..de91bc2f3e 100644 --- a/deploy/helm/sumologic/values.yaml +++ b/deploy/helm/sumologic/values.yaml @@ -4155,461 +4155,23 @@ metadata: enabled: true logLevel: info config: - receivers: - fluentforward: - endpoint: 0.0.0.0:24321 - otlp: - protocols: - http: - endpoint: 0.0.0.0:4318 - extensions: - health_check: {} - ## Configuration for File Storage extension - ## ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/release/v0.37.x/extension/storage/filestorage - file_storage: - directory: /var/lib/storage/otc - timeout: 10s - compaction: - on_start: true - on_rebound: true - # Can't be /tmp yet, see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/13449 - directory: /var/lib/storage/otc - pprof: {} - exporters: - sumologic/containers: - log_format: json - json_logs: - add_timestamp: true - timestamp_key: timestamp - endpoint: ${SUMO_ENDPOINT_DEFAULT_LOGS_SOURCE} - source_name: "%{_sourceName}" - source_category: "%{_sourceCategory}" - source_host: "%{_sourceHost}" - ## Configuration for sending queue - ## ref: https://github.com/open-telemetry/opentelemetry-collector/tree/release/v0.37.x/exporter/exporterhelper#configuration - sending_queue: - enabled: true - persistent_storage_enabled: '{{ .Values.metadata.persistence.enabled }}' - num_consumers: 10 - queue_size: 10_000 - sumologic/systemd: - log_format: json - json_logs: - add_timestamp: true - timestamp_key: timestamp - ## use flatten_body, but OTLP won't require any flattening - ## fluent based logs will be all send as record attributes - ## otellogs based logs will be all send as body attributes - flatten_body: true - endpoint: ${SUMO_ENDPOINT_DEFAULT_LOGS_SOURCE} - source_name: "%{_sourceName}" - source_category: "%{_sourceCategory}" - source_host: "%{_sourceHost}" - ## Configuration for sending queue - ## ref: https://github.com/open-telemetry/opentelemetry-collector/tree/release/v0.37.x/exporter/exporterhelper#configuration - sending_queue: - enabled: true - persistent_storage_enabled: '{{ .Values.metadata.persistence.enabled }}' - num_consumers: 10 - queue_size: 10_000 - - processors: - ## Common processors - attributes/remove_fluent_tag: - actions: - - action: delete - key: fluent.tag - ## The memory_limiter processor is used to prevent out of memory situations on the collector. - memory_limiter: - ## check_interval is the time between measurements of memory usage for the - ## purposes of avoiding going over the limits. Defaults to zero, so no - ## checks will be performed. Values below 1 second are not recommended since - ## it can result in unnecessary CPU consumption. - check_interval: 5s - - ## Maximum amount of memory, in %, targeted to be allocated by the process heap. - limit_percentage: 75 - ## Spike limit (calculated from available memory). Must be less than limit_percentage. - spike_limit_percentage: 20 - - ## The batch processor accepts spans and places them into batches grouped by node and resource - batch: - ## Number of spans after which a batch will be sent regardless of time - send_batch_size: 1_024 - ## Time duration after which a batch will be sent regardless of size - timeout: 1s - - ## Containers related processors - filter/include_fluent_tag_containers: - logs: - include: - match_type: regexp - record_attributes: - - key: fluent.tag - value: containers\..+ - filter/include_containers: - logs: - include: - match_type: regexp - record_attributes: - - key: k8s.container.name - value: .+ - attributes/containers: - actions: - - action: extract - key: fluent.tag - pattern: ^containers\.var\.log\.containers\.(?P[^_]+)_(?P[^_]+)_(?P.+)-(?P[a-f0-9]{64})\.log$ - - action: insert - key: k8s.container.id - from_attribute: container_id - - action: delete - key: container_id - - action: insert - key: k8s.pod.name - from_attribute: k8s_pod_name - - action: delete - key: k8s_pod_name - - action: insert - key: k8s.namespace.name - from_attribute: k8s_namespace - - action: delete - key: k8s_namespace - - action: insert - key: k8s.container.name - from_attribute: k8s_container_name - - action: delete - key: k8s_container_name - resource/containers_copy_node_to_host: - attributes: - - action: upsert - key: k8s.pod.hostname - from_attribute: k8s.node.name - resource/drop_annotations: - attributes: - - pattern: ^pod_annotations_.* - action: delete - resource/add_cluster: - attributes: - - key: cluster - value: '{{ .Values.sumologic.clusterName | quote }}' - action: upsert - groupbyattrs/containers: - keys: - - k8s.container.id - - k8s.container.name - - k8s.namespace.name - - k8s.pod.name - - _collector - k8s_tagger: - ## Has to be false to enrich metadata - passthrough: false - owner_lookup_enabled: true # To enable fetching additional metadata using `owner` relationship - extract: - metadata: - ## extract the following well-known metadata fields - - containerId - - containerName - - daemonSetName - - deploymentName - - hostName - - namespace - - nodeName - - podId - - podName - - replicaSetName - - serviceName - - statefulSetName - annotations: - - tag_name: "pod_annotations_%s" - key: "*" - namespace_labels: - - tag_name: "namespace_labels_%s" - key: "*" - labels: - - tag_name: "pod_labels_%s" - key: "*" - delimiter: "_" - pod_association: - - from: build_hostname - source/containers: - collector: '{{ .Values.sumologic.collectorName | default .Values.sumologic.clusterName | quote }}' - source_host: "%{k8s.pod.hostname}" - source_name: "%{k8s.namespace.name}.%{k8s.pod.name}.%{k8s.container.name}" - source_category: "%{k8s.namespace.name}/%{k8s.pod.pod_name}" - source_category_prefix: '{{ .Values.fluentd.logs.containers.sourceCategoryPrefix | quote }}' - source_category_replace_dash: '{{ .Values.fluentd.logs.containers.sourceCategoryReplaceDash | quote }}' - exclude: - k8s.namespace.name: '{{ include "fluentd.excludeNamespaces" . }}' - k8s.pod.name: '{{ .Values.fluentd.logs.containers.excludePodRegex | quote }}' - k8s.container.name: '{{ .Values.fluentd.logs.containers.excludeContainerRegex | quote }}' - k8s.pod.hostname: '{{ .Values.fluentd.logs.containers.excludeHostRegex | quote }}' - annotation_prefix: "pod_annotations_" - pod_template_hash_key: "pod_labels_pod-template-hash" - pod_name_key: "k8s.pod.pod_name" - pod_key: "k8s.pod.name" - container_annotations: - enabled: '{{ .Values.fluentd.logs.containers.perContainerAnnotationsEnabled }}' - prefixes: '{{ toJson .Values.fluentd.logs.containers.perContainerAnnotationPrefixes }}' - - ## Systemd related processors - filter/include_fluent_tag_host: - logs: - include: - match_type: regexp - record_attributes: - - key: fluent.tag - value: host\..+ - attributes/extract_systemd_source_fields: - actions: - - action: extract - key: fluent.tag - pattern: ^host\.(?P<_sourceName>[a-zA-z0-9]+)\..+$ - - action: insert - from_attribute: _HOSTNAME - key: _sourceHost - filter/include_systemd: - logs: - include: - match_type: regexp - record_attributes: - - key: _SYSTEMD_UNIT - value: .+ - filter/exclude_kubelet: - logs: - exclude: - match_type: strict - record_attributes: - - key: _SYSTEMD_UNIT - value: kubelet.service - filter/exclude_systemd_syslog: - logs: - exclude: - match_type: regexp - record_attributes: - - key: SYSLOG_FACILITY - value: '{{ .Values.fluentd.logs.systemd.excludeFacilityRegex | default "$^" | quote }}' - filter/exclude_systemd_hostname: - logs: - exclude: - match_type: regexp - record_attributes: - - key: _HOSTNAME - value: '{{ .Values.fluentd.logs.systemd.excludeHostRegex | default "$^" | quote }}' - filter/exclude_systemd_priority: - logs: - exclude: - match_type: regexp - record_attributes: - - key: PRIORITY - value: '{{ .Values.fluentd.logs.systemd.excludePriorityRegex | default "$^" | quote }}' - filter/exclude_systemd_unit: - logs: - exclude: - match_type: regexp - record_attributes: - - key: _SYSTEMD_UNIT - value: '{{ .Values.fluentd.logs.systemd.excludeUnitRegex | default "$^" | quote }}' - filter/exclude_kubelet_syslog: - logs: - exclude: - match_type: regexp - record_attributes: - - key: SYSLOG_FACILITY - value: '{{ .Values.fluentd.logs.kubelet.excludeFacilityRegex | default "$^" | quote }}' - filter/exclude_kubelet_hostname: - logs: - exclude: - match_type: regexp - record_attributes: - - key: _HOSTNAME - value: '{{ .Values.fluentd.logs.kubelet.excludeHostRegex | default "$^" | quote }}' - filter/exclude_kubelet_priority: - logs: - exclude: - match_type: regexp - record_attributes: - - key: PRIORITY - value: '{{ .Values.fluentd.logs.kubelet.excludePriorityRegex | default "$^" | quote }}' - filter/exclude_kubelet_unit: - logs: - exclude: - match_type: regexp - record_attributes: - - key: _SYSTEMD_UNIT - value: '{{ .Values.fluentd.logs.kubelet.excludeUnitRegex | default "$^" | quote }}' - - groupbyattrs/systemd: - keys: - - _sourceName - - _sourceHost - - _collector - source/systemd: - collector: '{{ .Values.sumologic.collectorName | default .Values.sumologic.clusterName | quote }}' - source_host: "%{_sourceHost}" - source_name: "%{_sourceName}" - source_category: '{{ .Values.fluentd.logs.systemd.sourceCategory | quote }}' - source_category_prefix: '{{ .Values.fluentd.logs.systemd.sourceCategoryPrefix | quote }}' - source_category_replace_dash: '{{ .Values.fluentd.logs.systemd.sourceCategoryReplaceDash | quote }}' - ## Remove all attributes, so body won't by nested by SumoLogic receiver in case of using otlp format - transform/remove_attributes: - logs: - queries: - - limit(attributes, 0) - - ## kubelet related processors - filter/include_kubelet: - logs: - include: - match_type: strict - record_attributes: - - key: _SYSTEMD_UNIT - value: kubelet.service - source/kubelet: - collector: '{{ .Values.sumologic.collectorName | default .Values.sumologic.clusterName | quote }}' - source_host: "%{_sourceHost}" - source_name: '{{ .Values.fluentd.logs.kubelet.sourceName | quote }}' - source_category: '{{ .Values.fluentd.logs.kubelet.sourceCategory | quote }}' - source_category_prefix: '{{ .Values.fluentd.logs.kubelet.sourceCategoryPrefix | quote }}' - source_category_replace_dash: '{{ .Values.fluentd.logs.kubelet.sourceCategoryReplaceDash | quote }}' - - sumologic_schema: - add_cloud_namespace: false - - service: - telemetry: - logs: - level: '{{ .Values.metadata.logs.logLevel }}' - extensions: - - health_check - - file_storage - - pprof - pipelines: - logs/fluent/containers: - receivers: - - fluentforward - processors: - - memory_limiter - - filter/include_fluent_tag_containers - - attributes/containers - - groupbyattrs/containers - - k8s_tagger - - resource/add_cluster - - source/containers - - resource/drop_annotations - - attributes/remove_fluent_tag - - resource/containers_copy_node_to_host - - sumologic_schema - - batch - exporters: - - sumologic/containers - ## Uncomment this only if you're enabling the Otelcol Log Collector via otellogs.enabled - ## This is commented due to k8s_tagger memory footprint - ## This is the same pipeline like for logs/fluent/containers with the following modifications: - ## - filter/include_fluent_tag_containers and attributes/remove_fluent_tag are being removed - ## as only containers log are being provided to otlp receiver - ## - attributes/containers functionality is being replaced by otellogs operators - # logs/otlp/containers: - # receivers: - # - otlp - # processors: - # - memory_limiter - # - filter/include_containers - # - groupbyattrs/containers - # - k8s_tagger - # - resource/add_cluster - # - source/containers - # - resource/drop_annotations - # - resource/containers_copy_node_to_host - # - sumologic_schema - # - batch - # exporters: - # - sumologic/containers - logs/fluent/systemd: - receivers: - - fluentforward - processors: - - memory_limiter - - filter/include_fluent_tag_host - - filter/include_systemd - - filter/exclude_kubelet - - filter/exclude_systemd_syslog - - filter/exclude_systemd_hostname - - filter/exclude_systemd_priority - - filter/exclude_systemd_unit - - attributes/extract_systemd_source_fields - - attributes/remove_fluent_tag - - groupbyattrs/systemd - - resource/add_cluster - - source/systemd - - batch - exporters: - - sumologic/systemd - logs/fluent/kubelet: - receivers: - - fluentforward - processors: - - memory_limiter - - filter/include_fluent_tag_host - - filter/include_kubelet - - filter/exclude_kubelet_syslog - - filter/exclude_kubelet_hostname - - filter/exclude_kubelet_priority - - filter/exclude_kubelet_unit - - attributes/extract_systemd_source_fields - - attributes/remove_fluent_tag - - groupbyattrs/systemd - - resource/add_cluster - - source/kubelet - - batch - exporters: - - sumologic/systemd - ## This is the same pipeline like logs/fluent/systemd, but with the following changes: - ## - otlp receiver instead of fluentforward - ## - added transform/remove_attributes processor - logs/otlp/systemd: - receivers: - - otlp - processors: - - memory_limiter - - filter/include_fluent_tag_host - - filter/include_systemd - - filter/exclude_kubelet - - filter/exclude_systemd_syslog - - filter/exclude_systemd_hostname - - filter/exclude_systemd_priority - - filter/exclude_systemd_unit - - attributes/extract_systemd_source_fields - - attributes/remove_fluent_tag - - groupbyattrs/systemd - - resource/add_cluster - - source/systemd - - transform/remove_attributes - - batch - exporters: - - sumologic/systemd - ## This is the same pipeline like logs/fluent/kubelet, but with the following changes: - ## - otlp receiver instead of fluentforward - ## - added transform/remove_attributes processor - logs/otlp/kubelet: - receivers: - - otlp - processors: - - memory_limiter - - filter/include_fluent_tag_host - - filter/include_kubelet - - filter/exclude_kubelet_syslog - - filter/exclude_kubelet_hostname - - filter/exclude_kubelet_priority - - filter/exclude_kubelet_unit - - attributes/extract_systemd_source_fields - - attributes/remove_fluent_tag - - groupbyattrs/systemd - - resource/add_cluster - - source/kubelet - - transform/remove_attributes - - batch - exporters: - - sumologic/systemd + ## Directly alter the OT configuration. The value of this key should be a dictionary, that will + ## be directly merged with the generated configuration, overriding existing values. + ## For example: + # override: + # processors: + # batch: + # send_batch_size: 512 + ## will change the batch size of the pipeline. + ## + ## WARNING: This field is not subject to backwards-compatibility guarantees offered by the rest + ## of this chart. It involves implementation details that may change even in minor versions. + ## Use with caution, and consider opening an issue, so your customization can be added in a safer way. + merge: {} + ## Completely override existing config and replace it with the contents of this value. + ## The value of this key should be a dictionary, that will replace the normal configuration. + ## This is an advanced feature, use with caution, and review the generated configuration first. + override: {} statefulset: nodeSelector: {} tolerations: [] diff --git a/docs/opentelemetry-collector.md b/docs/opentelemetry-collector.md index b80311903d..04578e8874 100644 --- a/docs/opentelemetry-collector.md +++ b/docs/opentelemetry-collector.md @@ -90,10 +90,20 @@ Configuration specific to the log collector DaemonSet can be found under the `ot Finally, configuration specific to the metadata enrichment StatefulSet can be found under the `metadata.logs` key. -In both of the aforementioned cases, the raw configuration can be overridden - this is done respectively by using -the `otellogs.config.override` and `metadata.logs.config` sections. Only use these if your use case isn't covered -by the high-level settings. See [Sumologic OpenTelemetry Collector configuration][configuration] -for more information +There are two ways of directly configuring OpenTelemetry Collector in either of these cases. +These are both advanced features requiring a good understanding of this chart's architecture and +OpenTelemetry Collector configuration. + +The `metadata.logs.config.merge` and `otellogs.config.merge` keys can be used to provide configuration that will be merged +with the Helm Chart's default configuration. It should be noted that this field is not subject to +normal backwards compatibility guarantees, the default configuration can change even in minor +versions while preserving the same end-to-end behaviour. Use of this field is discouraged - ideally +the necessary customizations should be able to be achieved without touching the otel configuration +directly. Please open an issue if your use case requires the use of this field. + +The `metadata.metrics.config.override` and `otellogs.config.override` keys can be used to provide configuration that will be completely +replace the default configuration. As above, care must be taken not to depend on implementation details +that may change between minor releases of this Chart. [configuration]: https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/Configuration.md [values]: /deploy/helm/sumologic/values.yaml diff --git a/tests/helm/metadata_logs_otc/static/basic.input.yaml b/tests/helm/metadata_logs_otc/static/basic.input.yaml deleted file mode 100644 index ebabfc74c3..0000000000 --- a/tests/helm/metadata_logs_otc/static/basic.input.yaml +++ /dev/null @@ -1,4 +0,0 @@ -sumologic: - logs: - metadata: - provider: otelcol diff --git a/tests/helm/metadata_logs_otc/static/fluentbit.input.yaml b/tests/helm/metadata_logs_otc/static/fluentbit.input.yaml new file mode 100644 index 0000000000..4c6fb889b3 --- /dev/null +++ b/tests/helm/metadata_logs_otc/static/fluentbit.input.yaml @@ -0,0 +1,10 @@ +sumologic: + logs: + metadata: + provider: otelcol + collector: + otelcol: + enabled: false + +fluent-bit: + enabled: true diff --git a/tests/helm/metadata_logs_otc/static/basic.output.yaml b/tests/helm/metadata_logs_otc/static/fluentbit.output.yaml similarity index 82% rename from tests/helm/metadata_logs_otc/static/basic.output.yaml rename to tests/helm/metadata_logs_otc/static/fluentbit.output.yaml index 6e2326ed29..a1b96c06dc 100644 --- a/tests/helm/metadata_logs_otc/static/basic.output.yaml +++ b/tests/helm/metadata_logs_otc/static/fluentbit.output.yaml @@ -11,7 +11,6 @@ metadata: heritage: "Helm" data: config.yaml: | - exporters: sumologic/containers: endpoint: ${SUMO_ENDPOINT_DEFAULT_LOGS_SOURCE} @@ -106,56 +105,56 @@ data: match_type: regexp record_attributes: - key: _HOSTNAME - value: "$^" + value: $^ filter/exclude_kubelet_priority: logs: exclude: match_type: regexp record_attributes: - key: PRIORITY - value: "$^" + value: $^ filter/exclude_kubelet_syslog: logs: exclude: match_type: regexp record_attributes: - key: SYSLOG_FACILITY - value: "$^" + value: $^ filter/exclude_kubelet_unit: logs: exclude: match_type: regexp record_attributes: - key: _SYSTEMD_UNIT - value: "$^" + value: $^ filter/exclude_systemd_hostname: logs: exclude: match_type: regexp record_attributes: - key: _HOSTNAME - value: "$^" + value: $^ filter/exclude_systemd_priority: logs: exclude: match_type: regexp record_attributes: - key: PRIORITY - value: "$^" + value: $^ filter/exclude_systemd_syslog: logs: exclude: match_type: regexp record_attributes: - key: SYSLOG_FACILITY - value: "$^" + value: $^ filter/exclude_systemd_unit: logs: exclude: match_type: regexp record_attributes: - key: _SYSTEMD_UNIT - value: "$^" + value: $^ filter/include_containers: logs: include: @@ -240,7 +239,7 @@ data: attributes: - action: upsert key: cluster - value: "kubernetes" + value: kubernetes resource/containers_copy_node_to_host: attributes: - action: upsert @@ -252,7 +251,7 @@ data: pattern: ^pod_annotations_.* source/containers: annotation_prefix: pod_annotations_ - collector: "kubernetes" + collector: kubernetes container_annotations: enabled: false prefixes: [] @@ -265,22 +264,22 @@ data: pod_name_key: k8s.pod.pod_name pod_template_hash_key: pod_labels_pod-template-hash source_category: '%{k8s.namespace.name}/%{k8s.pod.pod_name}' - source_category_prefix: "kubernetes/" - source_category_replace_dash: "/" + source_category_prefix: kubernetes/ + source_category_replace_dash: / source_host: '%{k8s.pod.hostname}' source_name: '%{k8s.namespace.name}.%{k8s.pod.name}.%{k8s.container.name}' source/kubelet: - collector: "kubernetes" - source_category: "kubelet" - source_category_prefix: "kubernetes/" - source_category_replace_dash: "/" + collector: kubernetes + source_category: kubelet + source_category_prefix: kubernetes/ + source_category_replace_dash: / source_host: '%{_sourceHost}' - source_name: "k8s_kubelet" + source_name: k8s_kubelet source/systemd: - collector: "kubernetes" - source_category: "system" - source_category_prefix: "kubernetes/" - source_category_replace_dash: "/" + collector: kubernetes + source_category: system + source_category_prefix: kubernetes/ + source_category_replace_dash: / source_host: '%{_sourceHost}' source_name: '%{_sourceName}' sumologic_schema: @@ -292,10 +291,6 @@ data: receivers: fluentforward: endpoint: 0.0.0.0:24321 - otlp: - protocols: - http: - endpoint: 0.0.0.0:4318 service: extensions: - health_check @@ -359,47 +354,6 @@ data: - batch receivers: - fluentforward - logs/otlp/kubelet: - exporters: - - sumologic/systemd - processors: - - memory_limiter - - filter/include_fluent_tag_host - - filter/include_kubelet - - filter/exclude_kubelet_syslog - - filter/exclude_kubelet_hostname - - filter/exclude_kubelet_priority - - filter/exclude_kubelet_unit - - attributes/extract_systemd_source_fields - - attributes/remove_fluent_tag - - groupbyattrs/systemd - - resource/add_cluster - - source/kubelet - - transform/remove_attributes - - batch - receivers: - - otlp - logs/otlp/systemd: - exporters: - - sumologic/systemd - processors: - - memory_limiter - - filter/include_fluent_tag_host - - filter/include_systemd - - filter/exclude_kubelet - - filter/exclude_systemd_syslog - - filter/exclude_systemd_hostname - - filter/exclude_systemd_priority - - filter/exclude_systemd_unit - - attributes/extract_systemd_source_fields - - attributes/remove_fluent_tag - - groupbyattrs/systemd - - resource/add_cluster - - source/systemd - - transform/remove_attributes - - batch - receivers: - - otlp telemetry: logs: level: info diff --git a/tests/helm/metadata_logs_otc/static/merge.input.yaml b/tests/helm/metadata_logs_otc/static/merge.input.yaml new file mode 100644 index 0000000000..ca3ce10594 --- /dev/null +++ b/tests/helm/metadata_logs_otc/static/merge.input.yaml @@ -0,0 +1,18 @@ +sumologic: + logs: + metadata: + provider: otelcol + collector: + otelcol: + enabled: true + +fluent-bit: + enabled: false + +metadata: + logs: + config: + merge: + processors: + batch: + send_batch_size: 7 diff --git a/tests/helm/metadata_logs_otc/static/merge.output.yaml b/tests/helm/metadata_logs_otc/static/merge.output.yaml new file mode 100644 index 0000000000..0117b9dab3 --- /dev/null +++ b/tests/helm/metadata_logs_otc/static/merge.output.yaml @@ -0,0 +1,361 @@ +--- +# Source: sumologic/templates/logs/otelcol/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: RELEASE-NAME-sumologic-otelcol-logs + labels: + app: RELEASE-NAME-sumologic-otelcol-logs + chart: "sumologic-%CURRENT_CHART_VERSION%" + release: "RELEASE-NAME" + heritage: "Helm" +data: + config.yaml: | + exporters: + sumologic/containers: + endpoint: ${SUMO_ENDPOINT_DEFAULT_LOGS_SOURCE} + json_logs: + add_timestamp: true + timestamp_key: timestamp + log_format: json + sending_queue: + enabled: true + num_consumers: 10 + persistent_storage_enabled: true + queue_size: 10000 + source_category: '%{_sourceCategory}' + source_host: '%{_sourceHost}' + source_name: '%{_sourceName}' + sumologic/systemd: + endpoint: ${SUMO_ENDPOINT_DEFAULT_LOGS_SOURCE} + json_logs: + add_timestamp: true + flatten_body: true + timestamp_key: timestamp + log_format: json + sending_queue: + enabled: true + num_consumers: 10 + persistent_storage_enabled: true + queue_size: 10000 + source_category: '%{_sourceCategory}' + source_host: '%{_sourceHost}' + source_name: '%{_sourceName}' + extensions: + file_storage: + compaction: + directory: /var/lib/storage/otc + on_rebound: true + on_start: true + directory: /var/lib/storage/otc + timeout: 10s + health_check: {} + pprof: {} + processors: + attributes/containers: + actions: + - action: extract + key: fluent.tag + pattern: ^containers\.var\.log\.containers\.(?P[^_]+)_(?P[^_]+)_(?P.+)-(?P[a-f0-9]{64})\.log$ + - action: insert + from_attribute: container_id + key: k8s.container.id + - action: delete + key: container_id + - action: insert + from_attribute: k8s_pod_name + key: k8s.pod.name + - action: delete + key: k8s_pod_name + - action: insert + from_attribute: k8s_namespace + key: k8s.namespace.name + - action: delete + key: k8s_namespace + - action: insert + from_attribute: k8s_container_name + key: k8s.container.name + - action: delete + key: k8s_container_name + attributes/extract_systemd_source_fields: + actions: + - action: extract + key: fluent.tag + pattern: ^host\.(?P<_sourceName>[a-zA-z0-9]+)\..+$ + - action: insert + from_attribute: _HOSTNAME + key: _sourceHost + attributes/remove_fluent_tag: + actions: + - action: delete + key: fluent.tag + batch: + send_batch_size: 7 + timeout: 1s + filter/exclude_kubelet: + logs: + exclude: + match_type: strict + record_attributes: + - key: _SYSTEMD_UNIT + value: kubelet.service + filter/exclude_kubelet_hostname: + logs: + exclude: + match_type: regexp + record_attributes: + - key: _HOSTNAME + value: $^ + filter/exclude_kubelet_priority: + logs: + exclude: + match_type: regexp + record_attributes: + - key: PRIORITY + value: $^ + filter/exclude_kubelet_syslog: + logs: + exclude: + match_type: regexp + record_attributes: + - key: SYSLOG_FACILITY + value: $^ + filter/exclude_kubelet_unit: + logs: + exclude: + match_type: regexp + record_attributes: + - key: _SYSTEMD_UNIT + value: $^ + filter/exclude_systemd_hostname: + logs: + exclude: + match_type: regexp + record_attributes: + - key: _HOSTNAME + value: $^ + filter/exclude_systemd_priority: + logs: + exclude: + match_type: regexp + record_attributes: + - key: PRIORITY + value: $^ + filter/exclude_systemd_syslog: + logs: + exclude: + match_type: regexp + record_attributes: + - key: SYSLOG_FACILITY + value: $^ + filter/exclude_systemd_unit: + logs: + exclude: + match_type: regexp + record_attributes: + - key: _SYSTEMD_UNIT + value: $^ + filter/include_containers: + logs: + include: + match_type: regexp + record_attributes: + - key: k8s.container.name + value: .+ + filter/include_fluent_tag_containers: + logs: + include: + match_type: regexp + record_attributes: + - key: fluent.tag + value: containers\..+ + filter/include_fluent_tag_host: + logs: + include: + match_type: regexp + record_attributes: + - key: fluent.tag + value: host\..+ + filter/include_kubelet: + logs: + include: + match_type: strict + record_attributes: + - key: _SYSTEMD_UNIT + value: kubelet.service + filter/include_systemd: + logs: + include: + match_type: regexp + record_attributes: + - key: _SYSTEMD_UNIT + value: .+ + groupbyattrs/containers: + keys: + - k8s.container.id + - k8s.container.name + - k8s.namespace.name + - k8s.pod.name + - _collector + groupbyattrs/systemd: + keys: + - _sourceName + - _sourceHost + - _collector + k8s_tagger: + extract: + annotations: + - key: '*' + tag_name: pod_annotations_%s + delimiter: _ + labels: + - key: '*' + tag_name: pod_labels_%s + metadata: + - containerId + - containerName + - daemonSetName + - deploymentName + - hostName + - namespace + - nodeName + - podId + - podName + - replicaSetName + - serviceName + - statefulSetName + namespace_labels: + - key: '*' + tag_name: namespace_labels_%s + owner_lookup_enabled: true + passthrough: false + pod_association: + - from: build_hostname + memory_limiter: + check_interval: 5s + limit_percentage: 75 + spike_limit_percentage: 20 + resource/add_cluster: + attributes: + - action: upsert + key: cluster + value: kubernetes + resource/containers_copy_node_to_host: + attributes: + - action: upsert + from_attribute: k8s.node.name + key: k8s.pod.hostname + resource/drop_annotations: + attributes: + - action: delete + pattern: ^pod_annotations_.* + source/containers: + annotation_prefix: pod_annotations_ + collector: kubernetes + container_annotations: + enabled: false + prefixes: [] + exclude: + k8s.container.name: "" + k8s.namespace.name: "" + k8s.pod.hostname: "" + k8s.pod.name: "" + pod_key: k8s.pod.name + pod_name_key: k8s.pod.pod_name + pod_template_hash_key: pod_labels_pod-template-hash + source_category: '%{k8s.namespace.name}/%{k8s.pod.pod_name}' + source_category_prefix: kubernetes/ + source_category_replace_dash: / + source_host: '%{k8s.pod.hostname}' + source_name: '%{k8s.namespace.name}.%{k8s.pod.name}.%{k8s.container.name}' + source/kubelet: + collector: kubernetes + source_category: kubelet + source_category_prefix: kubernetes/ + source_category_replace_dash: / + source_host: '%{_sourceHost}' + source_name: k8s_kubelet + source/systemd: + collector: kubernetes + source_category: system + source_category_prefix: kubernetes/ + source_category_replace_dash: / + source_host: '%{_sourceHost}' + source_name: '%{_sourceName}' + sumologic_schema: + add_cloud_namespace: false + transform/remove_attributes: + logs: + queries: + - limit(attributes, 0) + receivers: + otlp: + protocols: + http: + endpoint: 0.0.0.0:4318 + service: + extensions: + - health_check + - file_storage + - pprof + pipelines: + logs/otlp/containers: + exporters: + - sumologic/containers + processors: + - memory_limiter + - filter/include_containers + - groupbyattrs/containers + - k8s_tagger + - resource/add_cluster + - source/containers + - resource/drop_annotations + - resource/containers_copy_node_to_host + - sumologic_schema + - batch + receivers: + - otlp + logs/otlp/kubelet: + exporters: + - sumologic/systemd + processors: + - memory_limiter + - filter/include_fluent_tag_host + - filter/include_kubelet + - filter/exclude_kubelet_syslog + - filter/exclude_kubelet_hostname + - filter/exclude_kubelet_priority + - filter/exclude_kubelet_unit + - attributes/extract_systemd_source_fields + - attributes/remove_fluent_tag + - groupbyattrs/systemd + - resource/add_cluster + - source/kubelet + - transform/remove_attributes + - batch + receivers: + - otlp + logs/otlp/systemd: + exporters: + - sumologic/systemd + processors: + - memory_limiter + - filter/include_fluent_tag_host + - filter/include_systemd + - filter/exclude_kubelet + - filter/exclude_systemd_syslog + - filter/exclude_systemd_hostname + - filter/exclude_systemd_priority + - filter/exclude_systemd_unit + - attributes/extract_systemd_source_fields + - attributes/remove_fluent_tag + - groupbyattrs/systemd + - resource/add_cluster + - source/systemd + - transform/remove_attributes + - batch + receivers: + - otlp + telemetry: + logs: + level: info diff --git a/tests/helm/metadata_logs_otc/static/otel.input.yaml b/tests/helm/metadata_logs_otc/static/otel.input.yaml new file mode 100644 index 0000000000..cbdcf56aa1 --- /dev/null +++ b/tests/helm/metadata_logs_otc/static/otel.input.yaml @@ -0,0 +1,10 @@ +sumologic: + logs: + metadata: + provider: otelcol + collector: + otelcol: + enabled: true + +fluent-bit: + enabled: false diff --git a/tests/helm/metadata_logs_otc/static/otel.output.yaml b/tests/helm/metadata_logs_otc/static/otel.output.yaml new file mode 100644 index 0000000000..52f8e7c225 --- /dev/null +++ b/tests/helm/metadata_logs_otc/static/otel.output.yaml @@ -0,0 +1,361 @@ +--- +# Source: sumologic/templates/logs/otelcol/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: RELEASE-NAME-sumologic-otelcol-logs + labels: + app: RELEASE-NAME-sumologic-otelcol-logs + chart: "sumologic-%CURRENT_CHART_VERSION%" + release: "RELEASE-NAME" + heritage: "Helm" +data: + config.yaml: | + exporters: + sumologic/containers: + endpoint: ${SUMO_ENDPOINT_DEFAULT_LOGS_SOURCE} + json_logs: + add_timestamp: true + timestamp_key: timestamp + log_format: json + sending_queue: + enabled: true + num_consumers: 10 + persistent_storage_enabled: true + queue_size: 10000 + source_category: '%{_sourceCategory}' + source_host: '%{_sourceHost}' + source_name: '%{_sourceName}' + sumologic/systemd: + endpoint: ${SUMO_ENDPOINT_DEFAULT_LOGS_SOURCE} + json_logs: + add_timestamp: true + flatten_body: true + timestamp_key: timestamp + log_format: json + sending_queue: + enabled: true + num_consumers: 10 + persistent_storage_enabled: true + queue_size: 10000 + source_category: '%{_sourceCategory}' + source_host: '%{_sourceHost}' + source_name: '%{_sourceName}' + extensions: + file_storage: + compaction: + directory: /var/lib/storage/otc + on_rebound: true + on_start: true + directory: /var/lib/storage/otc + timeout: 10s + health_check: {} + pprof: {} + processors: + attributes/containers: + actions: + - action: extract + key: fluent.tag + pattern: ^containers\.var\.log\.containers\.(?P[^_]+)_(?P[^_]+)_(?P.+)-(?P[a-f0-9]{64})\.log$ + - action: insert + from_attribute: container_id + key: k8s.container.id + - action: delete + key: container_id + - action: insert + from_attribute: k8s_pod_name + key: k8s.pod.name + - action: delete + key: k8s_pod_name + - action: insert + from_attribute: k8s_namespace + key: k8s.namespace.name + - action: delete + key: k8s_namespace + - action: insert + from_attribute: k8s_container_name + key: k8s.container.name + - action: delete + key: k8s_container_name + attributes/extract_systemd_source_fields: + actions: + - action: extract + key: fluent.tag + pattern: ^host\.(?P<_sourceName>[a-zA-z0-9]+)\..+$ + - action: insert + from_attribute: _HOSTNAME + key: _sourceHost + attributes/remove_fluent_tag: + actions: + - action: delete + key: fluent.tag + batch: + send_batch_size: 1024 + timeout: 1s + filter/exclude_kubelet: + logs: + exclude: + match_type: strict + record_attributes: + - key: _SYSTEMD_UNIT + value: kubelet.service + filter/exclude_kubelet_hostname: + logs: + exclude: + match_type: regexp + record_attributes: + - key: _HOSTNAME + value: $^ + filter/exclude_kubelet_priority: + logs: + exclude: + match_type: regexp + record_attributes: + - key: PRIORITY + value: $^ + filter/exclude_kubelet_syslog: + logs: + exclude: + match_type: regexp + record_attributes: + - key: SYSLOG_FACILITY + value: $^ + filter/exclude_kubelet_unit: + logs: + exclude: + match_type: regexp + record_attributes: + - key: _SYSTEMD_UNIT + value: $^ + filter/exclude_systemd_hostname: + logs: + exclude: + match_type: regexp + record_attributes: + - key: _HOSTNAME + value: $^ + filter/exclude_systemd_priority: + logs: + exclude: + match_type: regexp + record_attributes: + - key: PRIORITY + value: $^ + filter/exclude_systemd_syslog: + logs: + exclude: + match_type: regexp + record_attributes: + - key: SYSLOG_FACILITY + value: $^ + filter/exclude_systemd_unit: + logs: + exclude: + match_type: regexp + record_attributes: + - key: _SYSTEMD_UNIT + value: $^ + filter/include_containers: + logs: + include: + match_type: regexp + record_attributes: + - key: k8s.container.name + value: .+ + filter/include_fluent_tag_containers: + logs: + include: + match_type: regexp + record_attributes: + - key: fluent.tag + value: containers\..+ + filter/include_fluent_tag_host: + logs: + include: + match_type: regexp + record_attributes: + - key: fluent.tag + value: host\..+ + filter/include_kubelet: + logs: + include: + match_type: strict + record_attributes: + - key: _SYSTEMD_UNIT + value: kubelet.service + filter/include_systemd: + logs: + include: + match_type: regexp + record_attributes: + - key: _SYSTEMD_UNIT + value: .+ + groupbyattrs/containers: + keys: + - k8s.container.id + - k8s.container.name + - k8s.namespace.name + - k8s.pod.name + - _collector + groupbyattrs/systemd: + keys: + - _sourceName + - _sourceHost + - _collector + k8s_tagger: + extract: + annotations: + - key: '*' + tag_name: pod_annotations_%s + delimiter: _ + labels: + - key: '*' + tag_name: pod_labels_%s + metadata: + - containerId + - containerName + - daemonSetName + - deploymentName + - hostName + - namespace + - nodeName + - podId + - podName + - replicaSetName + - serviceName + - statefulSetName + namespace_labels: + - key: '*' + tag_name: namespace_labels_%s + owner_lookup_enabled: true + passthrough: false + pod_association: + - from: build_hostname + memory_limiter: + check_interval: 5s + limit_percentage: 75 + spike_limit_percentage: 20 + resource/add_cluster: + attributes: + - action: upsert + key: cluster + value: kubernetes + resource/containers_copy_node_to_host: + attributes: + - action: upsert + from_attribute: k8s.node.name + key: k8s.pod.hostname + resource/drop_annotations: + attributes: + - action: delete + pattern: ^pod_annotations_.* + source/containers: + annotation_prefix: pod_annotations_ + collector: kubernetes + container_annotations: + enabled: false + prefixes: [] + exclude: + k8s.container.name: "" + k8s.namespace.name: "" + k8s.pod.hostname: "" + k8s.pod.name: "" + pod_key: k8s.pod.name + pod_name_key: k8s.pod.pod_name + pod_template_hash_key: pod_labels_pod-template-hash + source_category: '%{k8s.namespace.name}/%{k8s.pod.pod_name}' + source_category_prefix: kubernetes/ + source_category_replace_dash: / + source_host: '%{k8s.pod.hostname}' + source_name: '%{k8s.namespace.name}.%{k8s.pod.name}.%{k8s.container.name}' + source/kubelet: + collector: kubernetes + source_category: kubelet + source_category_prefix: kubernetes/ + source_category_replace_dash: / + source_host: '%{_sourceHost}' + source_name: k8s_kubelet + source/systemd: + collector: kubernetes + source_category: system + source_category_prefix: kubernetes/ + source_category_replace_dash: / + source_host: '%{_sourceHost}' + source_name: '%{_sourceName}' + sumologic_schema: + add_cloud_namespace: false + transform/remove_attributes: + logs: + queries: + - limit(attributes, 0) + receivers: + otlp: + protocols: + http: + endpoint: 0.0.0.0:4318 + service: + extensions: + - health_check + - file_storage + - pprof + pipelines: + logs/otlp/containers: + exporters: + - sumologic/containers + processors: + - memory_limiter + - filter/include_containers + - groupbyattrs/containers + - k8s_tagger + - resource/add_cluster + - source/containers + - resource/drop_annotations + - resource/containers_copy_node_to_host + - sumologic_schema + - batch + receivers: + - otlp + logs/otlp/kubelet: + exporters: + - sumologic/systemd + processors: + - memory_limiter + - filter/include_fluent_tag_host + - filter/include_kubelet + - filter/exclude_kubelet_syslog + - filter/exclude_kubelet_hostname + - filter/exclude_kubelet_priority + - filter/exclude_kubelet_unit + - attributes/extract_systemd_source_fields + - attributes/remove_fluent_tag + - groupbyattrs/systemd + - resource/add_cluster + - source/kubelet + - transform/remove_attributes + - batch + receivers: + - otlp + logs/otlp/systemd: + exporters: + - sumologic/systemd + processors: + - memory_limiter + - filter/include_fluent_tag_host + - filter/include_systemd + - filter/exclude_kubelet + - filter/exclude_systemd_syslog + - filter/exclude_systemd_hostname + - filter/exclude_systemd_priority + - filter/exclude_systemd_unit + - attributes/extract_systemd_source_fields + - attributes/remove_fluent_tag + - groupbyattrs/systemd + - resource/add_cluster + - source/systemd + - transform/remove_attributes + - batch + receivers: + - otlp + telemetry: + logs: + level: info diff --git a/tests/helm/metadata_logs_otc/static/override.input.yaml b/tests/helm/metadata_logs_otc/static/override.input.yaml new file mode 100644 index 0000000000..06dfd6768d --- /dev/null +++ b/tests/helm/metadata_logs_otc/static/override.input.yaml @@ -0,0 +1,193 @@ +sumologic: + logs: + metadata: + provider: otelcol + +metadata: + logs: + config: + override: + exporters: + sumologic/containers: + endpoint: ${SUMO_ENDPOINT_DEFAULT_LOGS_SOURCE} + json_logs: + add_timestamp: true + timestamp_key: timestamp + log_format: json + sending_queue: + enabled: true + num_consumers: 10 + persistent_storage_enabled: true + queue_size: 10000 + source_category: '%{_sourceCategory}' + source_host: '%{_sourceHost}' + source_name: '%{_sourceName}' + extensions: + file_storage: + compaction: + directory: /var/lib/storage/otc + on_rebound: true + on_start: true + directory: /var/lib/storage/otc + timeout: 10s + health_check: {} + pprof: {} + processors: + attributes/containers: + actions: + - action: extract + key: fluent.tag + pattern: ^containers\.var\.log\.containers\.(?P[^_]+)_(?P[^_]+)_(?P.+)-(?P[a-f0-9]{64})\.log$ + - action: insert + from_attribute: container_id + key: k8s.container.id + - action: delete + key: container_id + - action: insert + from_attribute: k8s_pod_name + key: k8s.pod.name + - action: delete + key: k8s_pod_name + - action: insert + from_attribute: k8s_namespace + key: k8s.namespace.name + - action: delete + key: k8s_namespace + - action: insert + from_attribute: k8s_container_name + key: k8s.container.name + - action: delete + key: k8s_container_name + attributes/remove_fluent_tag: + actions: + - action: delete + key: fluent.tag + batch: + send_batch_size: 1024 + timeout: 1s + filter/include_containers: + logs: + include: + match_type: regexp + record_attributes: + - key: k8s.container.name + value: .+ + filter/include_fluent_tag_containers: + logs: + include: + match_type: regexp + record_attributes: + - key: fluent.tag + value: containers\..+ + filter/include_fluent_tag_host: + logs: + include: + match_type: regexp + record_attributes: + - key: fluent.tag + value: host\..+ + groupbyattrs/containers: + keys: + - k8s.container.id + - k8s.container.name + - k8s.namespace.name + - k8s.pod.name + - _collector + k8s_tagger: + extract: + annotations: + - key: '*' + tag_name: pod_annotations_%s + delimiter: _ + labels: + - key: '*' + tag_name: pod_labels_%s + metadata: + - containerId + - containerName + - daemonSetName + - deploymentName + - hostName + - namespace + - nodeName + - podId + - podName + - replicaSetName + - serviceName + - statefulSetName + namespace_labels: + - key: '*' + tag_name: namespace_labels_%s + owner_lookup_enabled: true + passthrough: false + pod_association: + - from: build_hostname + memory_limiter: + check_interval: 5s + limit_percentage: 75 + spike_limit_percentage: 20 + resource/add_cluster: + attributes: + - action: upsert + key: cluster + value: kubernetes + resource/containers_copy_node_to_host: + attributes: + - action: upsert + from_attribute: k8s.node.name + key: k8s.pod.hostname + resource/drop_annotations: + attributes: + - action: delete + pattern: ^pod_annotations_.* + source/containers: + annotation_prefix: pod_annotations_ + collector: kubernetes + container_annotations: + enabled: false + prefixes: [] + exclude: + k8s.container.name: "" + k8s.namespace.name: "" + k8s.pod.hostname: "" + k8s.pod.name: "" + pod_key: k8s.pod.name + pod_name_key: k8s.pod.pod_name + pod_template_hash_key: pod_labels_pod-template-hash + source_category: '%{k8s.namespace.name}/%{k8s.pod.pod_name}' + source_category_prefix: kubernetes/ + source_category_replace_dash: / + source_host: '%{k8s.pod.hostname}' + source_name: '%{k8s.namespace.name}.%{k8s.pod.name}.%{k8s.container.name}' + sumologic_schema: + add_cloud_namespace: false + receivers: + otlp: + protocols: + http: + endpoint: 0.0.0.0:4318 + service: + extensions: + - health_check + - file_storage + - pprof + pipelines: + logs/otlp/containers: + exporters: + - sumologic/containers + processors: + - memory_limiter + - filter/include_containers + - groupbyattrs/containers + - k8s_tagger + - resource/add_cluster + - source/containers + - resource/drop_annotations + - resource/containers_copy_node_to_host + - sumologic_schema + - batch + receivers: + - otlp + telemetry: + logs: + level: info diff --git a/tests/helm/metadata_logs_otc/static/override.output.yaml b/tests/helm/metadata_logs_otc/static/override.output.yaml new file mode 100644 index 0000000000..9a608a470b --- /dev/null +++ b/tests/helm/metadata_logs_otc/static/override.output.yaml @@ -0,0 +1,197 @@ +--- +# Source: sumologic/templates/logs/otelcol/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: RELEASE-NAME-sumologic-otelcol-logs + labels: + app: RELEASE-NAME-sumologic-otelcol-logs + chart: "sumologic-%CURRENT_CHART_VERSION%" + release: "RELEASE-NAME" + heritage: "Helm" +data: + config.yaml: | + exporters: + sumologic/containers: + endpoint: ${SUMO_ENDPOINT_DEFAULT_LOGS_SOURCE} + json_logs: + add_timestamp: true + timestamp_key: timestamp + log_format: json + sending_queue: + enabled: true + num_consumers: 10 + persistent_storage_enabled: true + queue_size: 10000 + source_category: '%{_sourceCategory}' + source_host: '%{_sourceHost}' + source_name: '%{_sourceName}' + extensions: + file_storage: + compaction: + directory: /var/lib/storage/otc + on_rebound: true + on_start: true + directory: /var/lib/storage/otc + timeout: 10s + health_check: {} + pprof: {} + processors: + attributes/containers: + actions: + - action: extract + key: fluent.tag + pattern: ^containers\.var\.log\.containers\.(?P[^_]+)_(?P[^_]+)_(?P.+)-(?P[a-f0-9]{64})\.log$ + - action: insert + from_attribute: container_id + key: k8s.container.id + - action: delete + key: container_id + - action: insert + from_attribute: k8s_pod_name + key: k8s.pod.name + - action: delete + key: k8s_pod_name + - action: insert + from_attribute: k8s_namespace + key: k8s.namespace.name + - action: delete + key: k8s_namespace + - action: insert + from_attribute: k8s_container_name + key: k8s.container.name + - action: delete + key: k8s_container_name + attributes/remove_fluent_tag: + actions: + - action: delete + key: fluent.tag + batch: + send_batch_size: 1024 + timeout: 1s + filter/include_containers: + logs: + include: + match_type: regexp + record_attributes: + - key: k8s.container.name + value: .+ + filter/include_fluent_tag_containers: + logs: + include: + match_type: regexp + record_attributes: + - key: fluent.tag + value: containers\..+ + filter/include_fluent_tag_host: + logs: + include: + match_type: regexp + record_attributes: + - key: fluent.tag + value: host\..+ + groupbyattrs/containers: + keys: + - k8s.container.id + - k8s.container.name + - k8s.namespace.name + - k8s.pod.name + - _collector + k8s_tagger: + extract: + annotations: + - key: '*' + tag_name: pod_annotations_%s + delimiter: _ + labels: + - key: '*' + tag_name: pod_labels_%s + metadata: + - containerId + - containerName + - daemonSetName + - deploymentName + - hostName + - namespace + - nodeName + - podId + - podName + - replicaSetName + - serviceName + - statefulSetName + namespace_labels: + - key: '*' + tag_name: namespace_labels_%s + owner_lookup_enabled: true + passthrough: false + pod_association: + - from: build_hostname + memory_limiter: + check_interval: 5s + limit_percentage: 75 + spike_limit_percentage: 20 + resource/add_cluster: + attributes: + - action: upsert + key: cluster + value: kubernetes + resource/containers_copy_node_to_host: + attributes: + - action: upsert + from_attribute: k8s.node.name + key: k8s.pod.hostname + resource/drop_annotations: + attributes: + - action: delete + pattern: ^pod_annotations_.* + source/containers: + annotation_prefix: pod_annotations_ + collector: kubernetes + container_annotations: + enabled: false + prefixes: [] + exclude: + k8s.container.name: "" + k8s.namespace.name: "" + k8s.pod.hostname: "" + k8s.pod.name: "" + pod_key: k8s.pod.name + pod_name_key: k8s.pod.pod_name + pod_template_hash_key: pod_labels_pod-template-hash + source_category: '%{k8s.namespace.name}/%{k8s.pod.pod_name}' + source_category_prefix: kubernetes/ + source_category_replace_dash: / + source_host: '%{k8s.pod.hostname}' + source_name: '%{k8s.namespace.name}.%{k8s.pod.name}.%{k8s.container.name}' + sumologic_schema: + add_cloud_namespace: false + receivers: + otlp: + protocols: + http: + endpoint: 0.0.0.0:4318 + service: + extensions: + - health_check + - file_storage + - pprof + pipelines: + logs/otlp/containers: + exporters: + - sumologic/containers + processors: + - memory_limiter + - filter/include_containers + - groupbyattrs/containers + - k8s_tagger + - resource/add_cluster + - source/containers + - resource/drop_annotations + - resource/containers_copy_node_to_host + - sumologic_schema + - batch + receivers: + - otlp + telemetry: + logs: + level: info diff --git a/tests/helm/metadata_logs_otc/static/systemd.input.yaml b/tests/helm/metadata_logs_otc/static/systemd.input.yaml new file mode 100644 index 0000000000..12c050d348 --- /dev/null +++ b/tests/helm/metadata_logs_otc/static/systemd.input.yaml @@ -0,0 +1,12 @@ +sumologic: + logs: + metadata: + provider: otelcol + collector: + otelcol: + enabled: true + systemd: + enabled: false + +fluent-bit: + enabled: false diff --git a/tests/helm/metadata_logs_otc/static/systemd.output.yaml b/tests/helm/metadata_logs_otc/static/systemd.output.yaml new file mode 100644 index 0000000000..ef8353b324 --- /dev/null +++ b/tests/helm/metadata_logs_otc/static/systemd.output.yaml @@ -0,0 +1,190 @@ +--- +# Source: sumologic/templates/logs/otelcol/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: RELEASE-NAME-sumologic-otelcol-logs + labels: + app: RELEASE-NAME-sumologic-otelcol-logs + chart: "sumologic-%CURRENT_CHART_VERSION%" + release: "RELEASE-NAME" + heritage: "Helm" +data: + config.yaml: | + exporters: + sumologic/containers: + endpoint: ${SUMO_ENDPOINT_DEFAULT_LOGS_SOURCE} + json_logs: + add_timestamp: true + timestamp_key: timestamp + log_format: json + sending_queue: + enabled: true + num_consumers: 10 + persistent_storage_enabled: true + queue_size: 10000 + source_category: '%{_sourceCategory}' + source_host: '%{_sourceHost}' + source_name: '%{_sourceName}' + extensions: + file_storage: + compaction: + directory: /var/lib/storage/otc + on_rebound: true + on_start: true + directory: /var/lib/storage/otc + timeout: 10s + health_check: {} + pprof: {} + processors: + attributes/containers: + actions: + - action: extract + key: fluent.tag + pattern: ^containers\.var\.log\.containers\.(?P[^_]+)_(?P[^_]+)_(?P.+)-(?P[a-f0-9]{64})\.log$ + - action: insert + from_attribute: container_id + key: k8s.container.id + - action: delete + key: container_id + - action: insert + from_attribute: k8s_pod_name + key: k8s.pod.name + - action: delete + key: k8s_pod_name + - action: insert + from_attribute: k8s_namespace + key: k8s.namespace.name + - action: delete + key: k8s_namespace + - action: insert + from_attribute: k8s_container_name + key: k8s.container.name + - action: delete + key: k8s_container_name + attributes/remove_fluent_tag: + actions: + - action: delete + key: fluent.tag + batch: + send_batch_size: 1024 + timeout: 1s + filter/include_containers: + logs: + include: + match_type: regexp + record_attributes: + - key: k8s.container.name + value: .+ + filter/include_fluent_tag_containers: + logs: + include: + match_type: regexp + record_attributes: + - key: fluent.tag + value: containers\..+ + groupbyattrs/containers: + keys: + - k8s.container.id + - k8s.container.name + - k8s.namespace.name + - k8s.pod.name + - _collector + k8s_tagger: + extract: + annotations: + - key: '*' + tag_name: pod_annotations_%s + delimiter: _ + labels: + - key: '*' + tag_name: pod_labels_%s + metadata: + - containerId + - containerName + - daemonSetName + - deploymentName + - hostName + - namespace + - nodeName + - podId + - podName + - replicaSetName + - serviceName + - statefulSetName + namespace_labels: + - key: '*' + tag_name: namespace_labels_%s + owner_lookup_enabled: true + passthrough: false + pod_association: + - from: build_hostname + memory_limiter: + check_interval: 5s + limit_percentage: 75 + spike_limit_percentage: 20 + resource/add_cluster: + attributes: + - action: upsert + key: cluster + value: kubernetes + resource/containers_copy_node_to_host: + attributes: + - action: upsert + from_attribute: k8s.node.name + key: k8s.pod.hostname + resource/drop_annotations: + attributes: + - action: delete + pattern: ^pod_annotations_.* + source/containers: + annotation_prefix: pod_annotations_ + collector: kubernetes + container_annotations: + enabled: false + prefixes: [] + exclude: + k8s.container.name: "" + k8s.namespace.name: "" + k8s.pod.hostname: "" + k8s.pod.name: "" + pod_key: k8s.pod.name + pod_name_key: k8s.pod.pod_name + pod_template_hash_key: pod_labels_pod-template-hash + source_category: '%{k8s.namespace.name}/%{k8s.pod.pod_name}' + source_category_prefix: kubernetes/ + source_category_replace_dash: / + source_host: '%{k8s.pod.hostname}' + source_name: '%{k8s.namespace.name}.%{k8s.pod.name}.%{k8s.container.name}' + sumologic_schema: + add_cloud_namespace: false + receivers: + otlp: + protocols: + http: + endpoint: 0.0.0.0:4318 + service: + extensions: + - health_check + - file_storage + - pprof + pipelines: + logs/otlp/containers: + exporters: + - sumologic/containers + processors: + - memory_limiter + - filter/include_containers + - groupbyattrs/containers + - k8s_tagger + - resource/add_cluster + - source/containers + - resource/drop_annotations + - resource/containers_copy_node_to_host + - sumologic_schema + - batch + receivers: + - otlp + telemetry: + logs: + level: info diff --git a/tests/helm/metadata_logs_otc/static/templates.input.yaml b/tests/helm/metadata_logs_otc/static/templates.input.yaml index 2b4c26e4ba..c1cf732f68 100644 --- a/tests/helm/metadata_logs_otc/static/templates.input.yaml +++ b/tests/helm/metadata_logs_otc/static/templates.input.yaml @@ -3,6 +3,12 @@ sumologic: logs: metadata: provider: otelcol + collector: + otelcol: + enabled: true + +fluent-bit: + enabled: false fluentd: logs: diff --git a/tests/helm/metadata_logs_otc/static/templates.output.yaml b/tests/helm/metadata_logs_otc/static/templates.output.yaml index 2763940153..a41fcd6d4d 100644 --- a/tests/helm/metadata_logs_otc/static/templates.output.yaml +++ b/tests/helm/metadata_logs_otc/static/templates.output.yaml @@ -11,7 +11,6 @@ metadata: heritage: "Helm" data: config.yaml: | - exporters: sumologic/containers: endpoint: ${SUMO_ENDPOINT_DEFAULT_LOGS_SOURCE} @@ -106,56 +105,56 @@ data: match_type: regexp record_attributes: - key: _HOSTNAME - value: "my_kubelet_excludeHostRegex" + value: my_kubelet_excludeHostRegex filter/exclude_kubelet_priority: logs: exclude: match_type: regexp record_attributes: - key: PRIORITY - value: "my_kubelet_excludePriorityRegex" + value: my_kubelet_excludePriorityRegex filter/exclude_kubelet_syslog: logs: exclude: match_type: regexp record_attributes: - key: SYSLOG_FACILITY - value: "my_kubelet_excludeFacilityRegex" + value: my_kubelet_excludeFacilityRegex filter/exclude_kubelet_unit: logs: exclude: match_type: regexp record_attributes: - key: _SYSTEMD_UNIT - value: "my_kubelet_excludeUnitRegex" + value: my_kubelet_excludeUnitRegex filter/exclude_systemd_hostname: logs: exclude: match_type: regexp record_attributes: - key: _HOSTNAME - value: "my_systemd_excludeHostRegex" + value: my_systemd_excludeHostRegex filter/exclude_systemd_priority: logs: exclude: match_type: regexp record_attributes: - key: PRIORITY - value: "my_systemd_excludePriorityRegex" + value: my_systemd_excludePriorityRegex filter/exclude_systemd_syslog: logs: exclude: match_type: regexp record_attributes: - key: SYSLOG_FACILITY - value: "my_systemd_excludeFacilityRegex" + value: my_systemd_excludeFacilityRegex filter/exclude_systemd_unit: logs: exclude: match_type: regexp record_attributes: - key: _SYSTEMD_UNIT - value: "my_systemd_excludeUnitRegex" + value: my_systemd_excludeUnitRegex filter/include_containers: logs: include: @@ -240,7 +239,7 @@ data: attributes: - action: upsert key: cluster - value: "kubernetes" + value: kubernetes resource/containers_copy_node_to_host: attributes: - action: upsert @@ -252,35 +251,35 @@ data: pattern: ^pod_annotations_.* source/containers: annotation_prefix: pod_annotations_ - collector: "my_collectorName" + collector: my_collectorName container_annotations: enabled: false prefixes: [] exclude: - k8s.container.name: "my_containers_excludeContainerRegex" - k8s.namespace.name: "my_containers_excludeNamespaceRegex" - k8s.pod.hostname: "my_containers_excludeHostRegex" - k8s.pod.name: "my_containers_excludePodRegex" + k8s.container.name: my_containers_excludeContainerRegex + k8s.namespace.name: my_containers_excludeNamespaceRegex + k8s.pod.hostname: my_containers_excludeHostRegex + k8s.pod.name: my_containers_excludePodRegex pod_key: k8s.pod.name pod_name_key: k8s.pod.pod_name pod_template_hash_key: pod_labels_pod-template-hash source_category: '%{k8s.namespace.name}/%{k8s.pod.pod_name}' - source_category_prefix: "my_containers_sourceCategoryPrefix" - source_category_replace_dash: "my_containers_sourceCategoryReplaceDash" + source_category_prefix: my_containers_sourceCategoryPrefix + source_category_replace_dash: my_containers_sourceCategoryReplaceDash source_host: '%{k8s.pod.hostname}' source_name: '%{k8s.namespace.name}.%{k8s.pod.name}.%{k8s.container.name}' source/kubelet: - collector: "my_collectorName" - source_category: "kubelet" - source_category_prefix: "my_kubelet_sourceCategoryPrefix" - source_category_replace_dash: "my_kubelet_sourceCategoryReplaceDash" + collector: my_collectorName + source_category: kubelet + source_category_prefix: my_kubelet_sourceCategoryPrefix + source_category_replace_dash: my_kubelet_sourceCategoryReplaceDash source_host: '%{_sourceHost}' - source_name: "k8s_kubelet" + source_name: k8s_kubelet source/systemd: - collector: "my_collectorName" - source_category: "system" - source_category_prefix: "my_systemd_sourceCategoryPrefix" - source_category_replace_dash: "my_systemd_sourceCategoryReplaceDash" + collector: my_collectorName + source_category: system + source_category_prefix: my_systemd_sourceCategoryPrefix + source_category_replace_dash: my_systemd_sourceCategoryReplaceDash source_host: '%{_sourceHost}' source_name: '%{_sourceName}' sumologic_schema: @@ -290,8 +289,6 @@ data: queries: - limit(attributes, 0) receivers: - fluentforward: - endpoint: 0.0.0.0:24321 otlp: protocols: http: @@ -302,63 +299,22 @@ data: - file_storage - pprof pipelines: - logs/fluent/containers: + logs/otlp/containers: exporters: - sumologic/containers processors: - memory_limiter - - filter/include_fluent_tag_containers - - attributes/containers + - filter/include_containers - groupbyattrs/containers - k8s_tagger - resource/add_cluster - source/containers - resource/drop_annotations - - attributes/remove_fluent_tag - resource/containers_copy_node_to_host - sumologic_schema - batch receivers: - - fluentforward - logs/fluent/kubelet: - exporters: - - sumologic/systemd - processors: - - memory_limiter - - filter/include_fluent_tag_host - - filter/include_kubelet - - filter/exclude_kubelet_syslog - - filter/exclude_kubelet_hostname - - filter/exclude_kubelet_priority - - filter/exclude_kubelet_unit - - attributes/extract_systemd_source_fields - - attributes/remove_fluent_tag - - groupbyattrs/systemd - - resource/add_cluster - - source/kubelet - - batch - receivers: - - fluentforward - logs/fluent/systemd: - exporters: - - sumologic/systemd - processors: - - memory_limiter - - filter/include_fluent_tag_host - - filter/include_systemd - - filter/exclude_kubelet - - filter/exclude_systemd_syslog - - filter/exclude_systemd_hostname - - filter/exclude_systemd_priority - - filter/exclude_systemd_unit - - attributes/extract_systemd_source_fields - - attributes/remove_fluent_tag - - groupbyattrs/systemd - - resource/add_cluster - - source/systemd - - batch - receivers: - - fluentforward + - otlp logs/otlp/kubelet: exporters: - sumologic/systemd