Skip to content

Commit

Permalink
chore: add example for cascadingfilter multiple instances (#1568)
Browse files Browse the repository at this point in the history
* chore: add example for cascadingfilter multiple instances
* chore: update readme
  • Loading branch information
mat-rumian committed Apr 26, 2024
1 parent bd4fa8b commit c898c47
Show file tree
Hide file tree
Showing 2 changed files with 134 additions and 0 deletions.
2 changes: 2 additions & 0 deletions examples/otelcolconfigs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
- [tracing agent configuration](./agent_configuration_template.yaml) - example configuration to collect traces.
- [tracing gateway configuration](./gateway_configuration_template.yaml) - example configuration to load balance traces using [loadbalancingexporter][loadbalancingexporter].
- [tracing sampler configuration](./sampler_configuration_template.yaml) - example configuration to filter traces using [cascadingfilterprocessor][cascadingfilterprocessor].
- [multiple tracing sampler configuration](./sampler_configuration_multiple_instances_template.yaml) - example configuration to filter traces using [cascadingfilterprocessor][cascadingfilterprocessor]
in case of multiple instances.

[hostmetricsreceiver]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.52.0/receiver/hostmetricsreceiver
[json_parser]: https://github.com/open-telemetry/opentelemetry-log-collection/blob/main/docs/operators/json_parser.md
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
receivers:
jaeger:
protocols:
thrift_compact:
endpoint: "0.0.0.0:6831"
thrift_binary:
endpoint: "0.0.0.0:6832"
grpc:
endpoint: "0.0.0.0:14250"
thrift_http:
endpoint: "0.0.0.0:14268"
opencensus:
endpoint: "0.0.0.0:55678"
otlp:
protocols:
grpc:
endpoint: "0.0.0.0:4317"
http:
endpoint: "0.0.0.0:4318"
zipkin:
endpoint: "0.0.0.0:9411"
processors:
## The memory_limiter processor is used to prevent out of memory situations on the collector.
memory_limiter:
## check_interval is the time between measurements of memory usage for the
## purposes of avoiding going over the limits. Defaults to zero, so no
## checks will be performed. Values below 1 second are not recommended since
## it can result in unnecessary CPU consumption.
check_interval: 5s

## Maximum amount of memory, in MiB, targeted to be allocated by the process heap.
## Note that typically the total memory usage of process will be about 50MiB higher
## than this value.
limit_mib: 1900

## Smart cascading filtering rules with preset limits.
cascading_filter:
## collector_instances (default = 1): In case of multiple deployments sharing single configuration of the cascadingfilter,
## should be used to scale down properly spans_per_second global and policy limits.
## Value should be positive integer corresponding to the number of collectors with
## configured cascadingfilters e.g. collector_instances=5. As a result configured spans_per_second
## limit will be divided by 5 for global and policy limits.
collector_instances: 1
## (default = 30s): Wait time since the first span of a trace arrived before making
## a filtering decision
decision_wait: 30s
## (default = 100000): Maximum number of traces kept in memory
num_traces: 200000
## (default = 0): Expected number of new traces (helps in allocating data structures)
expected_new_traces_per_sec: 1000
## (default = 0): defines the global limit of maximum number of spans per second
## that are going to be emitted
spans_per_second: 1660
## Number of spans that are always probabilistically filtered
## (hence might be used for metrics calculation).
probabilistic_filtering_rate: 100
## Rules that will reject all traces matching them
trace_reject_filters:
# Adjust or remove the example as needed
- name: remove-all-traces-with-health-span
name_pattern: "health.*"
# Adjust or remove the example as needed
- name: remove-all-traces-with-healthcheck-service
attributes:
- key: service.name
values:
- "healthcheck/.*"
use_regex: true
## Rules that will accept traces with this criteria, but only to a given volume
trace_accept_filters:
# Adjust or remove the example as needed
- name: sampling-priority,
attributes:
- key: sampling.priority
values:
- "1"
spans_per_second: 400 # <- adjust the output traffic level
# Adjust or remove the example as needed
- name: traces-with-errors
properties:
min_number_of_errors: 1
spans_per_second: 400 # <- adjust the output traffic level
# Adjust or remove the example as needed
- name: traces-with-and-high-latency
properties:
min_duration: 10s
spans_per_second: 800 # <- adjust the output traffic level
# Adjust or remove the example as needed
#- name: some-service-traces-with-some-attribute
# attributes:
# - key: service.name
# values:
# - some-service
# - key: important-key
# values:
# - value1
# - value2
# use_regex: true
# spans_per_second: 300 # <- adjust the output traffic level
# Adjust or remove the example as needed
- name: everything_else
spans_per_second: -1 # If there's anything left in the budget, it will randomly select remaining traces

## The batch processor accepts spans and places them into batches grouped by node and resource
batch:
## Number of spans after which a batch will be sent regardless of time
send_batch_size: 256
## Never more than this many spans are being sent in a batch
send_batch_max_size: 512
## Time duration after which a batch will be sent regardless of size
timeout: 5s

extensions:
health_check: {}
exporters:
otlphttp:
## OTLP HTTP Endpoint URL
endpoint: ENDPOINT_URL
service:
extensions: [health_check]
pipelines:
traces:
receivers: [jaeger, opencensus, otlp, zipkin]
processors: [memory_limiter, cascading_filter, batch]
exporters: [otlphttp]
## Following generates verbose logs with span content, useful to verify what
## metadata is being tagged. To enable, uncomment and add "telemetry.logs" to service below.
## There are two levels that could be used: `debug` and `info` with the former
## being much more verbose and including (sampled) spans content
# telemetry:
# logs:
# level: debug

0 comments on commit c898c47

Please sign in to comment.