Skip to content

Commit

Permalink
chore: upgrade metadata otelcol to 0.66.0
Browse files Browse the repository at this point in the history
  • Loading branch information
Mikołaj Świątek committed Dec 13, 2022
1 parent d1a7686 commit d2bd8fe
Show file tree
Hide file tree
Showing 10 changed files with 60 additions and 54 deletions.
3 changes: 2 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### Changed

- chore: upgrade otelcol for events to 0.66.0 [#2686]
- chore: upgrade otelcol to 0.66.0-sumo-0 [#2686] [#2687]
- chore: upgrade nginx to 1.23.1 [#2544] [#2554]
- feat: enable remote write proxy by default [#2483]
- chore: update kubernetes-tools to 2.13.0 [#2515]
Expand Down Expand Up @@ -163,6 +163,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
[#2664]: https://github.com/SumoLogic/sumologic-kubernetes-collection/pull/2664
[#2653]: https://github.com/SumoLogic/sumologic-kubernetes-collection/pull/2653
[#2686]: https://github.com/SumoLogic/sumologic-kubernetes-collection/pull/2686
[#2687]: https://github.com/SumoLogic/sumologic-kubernetes-collection/pull/2687
[v3.0.0-beta.0]: https://github.com/SumoLogic/sumologic-kubernetes-collection/compare/v2.17.0...v3.0.0-beta.0
[telegraf_operator_comapare_1.3.5_and_1.3.10]: https://github.com/influxdata/helm-charts/compare/telegraf-operator-1.3.5...telegraf-operator-1.3.10
[cert-manager-1.4]: https://github.com/cert-manager/cert-manager/releases/tag/v1.4.0
Expand Down
21 changes: 10 additions & 11 deletions deploy/helm/sumologic/conf/logs/otelcol/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,13 @@ exporters:
add_timestamp: true
timestamp_key: timestamp
endpoint: ${SUMO_ENDPOINT_DEFAULT_LOGS_SOURCE}
source_name: "%{_sourceName}"
source_category: "%{_sourceCategory}"
source_host: "%{_sourceHost}"
## Configuration for sending queue
## ref: https://github.com/open-telemetry/opentelemetry-collector/tree/release/v0.37.x/exporter/exporterhelper#configuration
sending_queue:
enabled: true
persistent_storage_enabled: {{ .Values.metadata.persistence.enabled }}
{{- if .Values.metadata.persistence.enabled }}
storage: file_storage
{{- end }}
num_consumers: 10
queue_size: 10_000
{{ end }}
Expand All @@ -54,14 +53,13 @@ exporters:
## otellogs based logs will be all send as body attributes
flatten_body: true
endpoint: ${SUMO_ENDPOINT_DEFAULT_LOGS_SOURCE}
source_name: "%{_sourceName}"
source_category: "%{_sourceCategory}"
source_host: "%{_sourceHost}"
## Configuration for sending queue
## ref: https://github.com/open-telemetry/opentelemetry-collector/tree/release/v0.37.x/exporter/exporterhelper#configuration
sending_queue:
enabled: true
persistent_storage_enabled: {{ .Values.metadata.persistence.enabled }}
{{- if .Values.metadata.persistence.enabled }}
storage: file_storage
{{- end }}
num_consumers: 10
queue_size: 10_000
{{ end }}
Expand Down Expand Up @@ -309,9 +307,10 @@ processors:
source_category_replace_dash: {{ .Values.sumologic.logs.systemd.sourceCategoryReplaceDash | quote }}
## Remove all attributes, so body won't by nested by SumoLogic receiver in case of using otlp format
transform/remove_attributes:
logs:
queries:
- limit(attributes, 0)
log_statements:
- context: log
statements:
- limit(attributes, 0, [])

## kubelet related processors
filter/include_kubelet:
Expand Down
32 changes: 24 additions & 8 deletions deploy/helm/sumologic/conf/metrics/otelcol/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,9 @@ exporters:
## ref: https://github.com/open-telemetry/opentelemetry-collector/tree/release/v0.37.x/exporter/exporterhelper#configuration
sending_queue:
enabled: true
persistent_storage_enabled: {{ .Values.metadata.persistence.enabled }}
{{- if .Values.metadata.persistence.enabled }}
storage: file_storage
{{- end }}
num_consumers: 10
## setting queue_size a high number, so we always use maximum space of the storage
## minimal alert non-triggering queue size (if only one exporter is being used): 10GB/16MB = 640
Expand All @@ -85,7 +87,9 @@ exporters:
endpoint: ${SUMO_ENDPOINT_APISERVER_METRICS_SOURCE}
sending_queue:
enabled: true
persistent_storage_enabled: {{ .Values.metadata.persistence.enabled }}
{{- if .Values.metadata.persistence.enabled }}
storage: file_storage
{{- end }}
num_consumers: 10
queue_size: 10_000
max_request_body_size: 16_777_216 # 16 MB before compression
Expand All @@ -96,7 +100,9 @@ exporters:
endpoint: ${SUMO_ENDPOINT_CONTROL_PLANE_METRICS_SOURCE}
sending_queue:
enabled: true
persistent_storage_enabled: {{ .Values.metadata.persistence.enabled }}
{{- if .Values.metadata.persistence.enabled }}
storage: file_storage
{{- end }}
num_consumers: 10
queue_size: 10_000
max_request_body_size: 16_777_216 # 16 MB before compression
Expand All @@ -107,7 +113,9 @@ exporters:
endpoint: ${SUMO_ENDPOINT_CONTROLLER_METRICS_SOURCE}
sending_queue:
enabled: true
persistent_storage_enabled: {{ .Values.metadata.persistence.enabled }}
{{- if .Values.metadata.persistence.enabled }}
storage: file_storage
{{- end }}
num_consumers: 10
queue_size: 10_000
max_request_body_size: 16_777_216 # 16 MB before compression
Expand All @@ -118,7 +126,9 @@ exporters:
endpoint: ${SUMO_ENDPOINT_KUBELET_METRICS_SOURCE}
sending_queue:
enabled: true
persistent_storage_enabled: {{ .Values.metadata.persistence.enabled }}
{{- if .Values.metadata.persistence.enabled }}
storage: file_storage
{{- end }}
num_consumers: 10
queue_size: 10_000
max_request_body_size: 16_777_216 # 16 MB before compression
Expand All @@ -129,7 +139,9 @@ exporters:
endpoint: ${SUMO_ENDPOINT_NODE_METRICS_SOURCE}
sending_queue:
enabled: true
persistent_storage_enabled: {{ .Values.metadata.persistence.enabled }}
{{- if .Values.metadata.persistence.enabled }}
storage: file_storage
{{- end }}
num_consumers: 10
queue_size: 10_000
max_request_body_size: 16_777_216 # 16 MB before compression
Expand All @@ -140,7 +152,9 @@ exporters:
endpoint: ${SUMO_ENDPOINT_SCHEDULER_METRICS_SOURCE}
sending_queue:
enabled: true
persistent_storage_enabled: {{ .Values.metadata.persistence.enabled }}
{{- if .Values.metadata.persistence.enabled }}
storage: file_storage
{{- end }}
num_consumers: 10
queue_size: 10_000
max_request_body_size: 16_777_216 # 16 MB before compression
Expand All @@ -151,7 +165,9 @@ exporters:
endpoint: ${SUMO_ENDPOINT_STATE_METRICS_SOURCE}
sending_queue:
enabled: true
persistent_storage_enabled: {{ .Values.metadata.persistence.enabled }}
{{- if .Values.metadata.persistence.enabled }}
storage: file_storage
{{- end }}
num_consumers: 10
queue_size: 10_000
max_request_body_size: 16_777_216 # 16 MB before compression
Expand Down
2 changes: 1 addition & 1 deletion deploy/helm/sumologic/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3695,7 +3695,7 @@ metadata:
## Configure image for Opentelemetry Collector (for logs and metrics)
image:
repository: public.ecr.aws/sumologic/sumologic-otel-collector
tag: 0.57.2-sumo-1
tag: 0.66.0-sumo-0
pullPolicy: IfNotPresent

securityContext:
Expand Down
17 changes: 6 additions & 11 deletions tests/helm/metadata_logs_otc/static/otel.output.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,8 @@ data:
sending_queue:
enabled: true
num_consumers: 10
persistent_storage_enabled: true
queue_size: 10000
source_category: '%{_sourceCategory}'
source_host: '%{_sourceHost}'
source_name: '%{_sourceName}'
storage: file_storage
sumologic/systemd:
endpoint: ${SUMO_ENDPOINT_DEFAULT_LOGS_SOURCE}
json_logs:
Expand All @@ -36,11 +33,8 @@ data:
sending_queue:
enabled: true
num_consumers: 10
persistent_storage_enabled: true
queue_size: 10000
source_category: '%{_sourceCategory}'
source_host: '%{_sourceHost}'
source_name: '%{_sourceName}'
storage: file_storage
extensions:
file_storage:
compaction:
Expand Down Expand Up @@ -285,9 +279,10 @@ data:
sumologic_schema:
add_cloud_namespace: false
transform/remove_attributes:
logs:
queries:
- limit(attributes, 0)
log_statements:
- context: log
statements:
- limit(attributes, 0, [])
receivers:
otlp:
protocols:
Expand Down
17 changes: 6 additions & 11 deletions tests/helm/metadata_logs_otc/static/templates.output.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,8 @@ data:
sending_queue:
enabled: true
num_consumers: 10
persistent_storage_enabled: true
queue_size: 10000
source_category: '%{_sourceCategory}'
source_host: '%{_sourceHost}'
source_name: '%{_sourceName}'
storage: file_storage
sumologic/systemd:
endpoint: ${SUMO_ENDPOINT_DEFAULT_LOGS_SOURCE}
json_logs:
Expand All @@ -36,11 +33,8 @@ data:
sending_queue:
enabled: true
num_consumers: 10
persistent_storage_enabled: true
queue_size: 10000
source_category: '%{_sourceCategory}'
source_host: '%{_sourceHost}'
source_name: '%{_sourceName}'
storage: file_storage
extensions:
file_storage:
compaction:
Expand Down Expand Up @@ -285,9 +279,10 @@ data:
sumologic_schema:
add_cloud_namespace: false
transform/remove_attributes:
logs:
queries:
- limit(attributes, 0)
log_statements:
- context: log
statements:
- limit(attributes, 0, [])
receivers:
otlp:
protocols:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ spec:
priorityClassName: "prio"
containers:
- name: otelcol
image: public.ecr.aws/sumologic/sumologic-otel-collector:0.57.2-sumo-1
image: public.ecr.aws/sumologic/sumologic-otel-collector:0.66.0-sumo-0
imagePullPolicy: IfNotPresent
args:
- --config=/etc/otel/config.yaml
Expand Down
16 changes: 8 additions & 8 deletions tests/helm/metadata_metrics_otc/static/basic.output.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ data:
sending_queue:
enabled: true
num_consumers: 10
persistent_storage_enabled: true
queue_size: 10000
storage: file_storage
timeout: 30s
sumologic/control_plane:
endpoint: ${SUMO_ENDPOINT_CONTROL_PLANE_METRICS_SOURCE}
Expand All @@ -29,8 +29,8 @@ data:
sending_queue:
enabled: true
num_consumers: 10
persistent_storage_enabled: true
queue_size: 10000
storage: file_storage
timeout: 30s
sumologic/controller:
endpoint: ${SUMO_ENDPOINT_CONTROLLER_METRICS_SOURCE}
Expand All @@ -39,8 +39,8 @@ data:
sending_queue:
enabled: true
num_consumers: 10
persistent_storage_enabled: true
queue_size: 10000
storage: file_storage
timeout: 30s
sumologic/default:
endpoint: ${SUMO_ENDPOINT_DEFAULT_METRICS_SOURCE}
Expand All @@ -49,8 +49,8 @@ data:
sending_queue:
enabled: true
num_consumers: 10
persistent_storage_enabled: true
queue_size: 10000
storage: file_storage
timeout: 30s
sumologic/kubelet:
endpoint: ${SUMO_ENDPOINT_KUBELET_METRICS_SOURCE}
Expand All @@ -59,8 +59,8 @@ data:
sending_queue:
enabled: true
num_consumers: 10
persistent_storage_enabled: true
queue_size: 10000
storage: file_storage
timeout: 30s
sumologic/node:
endpoint: ${SUMO_ENDPOINT_NODE_METRICS_SOURCE}
Expand All @@ -69,8 +69,8 @@ data:
sending_queue:
enabled: true
num_consumers: 10
persistent_storage_enabled: true
queue_size: 10000
storage: file_storage
timeout: 30s
sumologic/scheduler:
endpoint: ${SUMO_ENDPOINT_SCHEDULER_METRICS_SOURCE}
Expand All @@ -79,8 +79,8 @@ data:
sending_queue:
enabled: true
num_consumers: 10
persistent_storage_enabled: true
queue_size: 10000
storage: file_storage
timeout: 30s
sumologic/state:
endpoint: ${SUMO_ENDPOINT_STATE_METRICS_SOURCE}
Expand All @@ -89,8 +89,8 @@ data:
sending_queue:
enabled: true
num_consumers: 10
persistent_storage_enabled: true
queue_size: 10000
storage: file_storage
timeout: 30s
extensions:
file_storage:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ spec:
priorityClassName: "prio"
containers:
- name: otelcol
image: public.ecr.aws/sumologic/sumologic-otel-collector:0.57.2-sumo-1
image: public.ecr.aws/sumologic/sumologic-otel-collector:0.66.0-sumo-0
imagePullPolicy: IfNotPresent
args:
- --config=/etc/otel/config.yaml
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,4 @@ otellogs:

metadata:
image:
tag: 0.56.0-sumo-0-fips
tag: 0.66.0-sumo-0-fips

0 comments on commit d2bd8fe

Please sign in to comment.