Skip to content
Permalink
Branch: master
Find file Copy path
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
440 lines (414 sloc) 13.2 KB
##############################################
# Global bits of config in the default section
#
[default]
#
# ID is used to identify pipeline to third parties, e.g. fluentd tag
# and prometheus instance
id = pipeline
#
# Prometheus setup to export metrics about the pipeline. This is where
# prometheus will come and scrape metrics from. This configuration is
# only pertinent if you plan to monitor the pipeline itself using
# prometheus - and can be ignored. You can to comment out
# metamonitoring_prometheus_resource to stop from serving metrics. If
# you do wish to monitor pipeline, tools/monitor/run.sh will fire
# prometheus and grafana with some prebuilt dashboards (requires
# docker). Point a browser at localhost:3000 as per
# https://github.com/grafana/grafana#running
#
metamonitoring_prometheus_resource = /metrics
metamonitoring_prometheus_server = :8989
# fluentd setup allows pipeline to export its logs in support of
# manageability at scale. Tag used is derived from ID
#
# fluentd = localhost:24224
# base_path_xlation
# This option points at a file which contains JSON base path mappings
# of base_path. This is typically used to morph PDT to MDT paths on
# input side where it makes sense (i.e. gpb and gpb codecs). Not
# applied to SNMP since base path is derived from snmp spec. (JSON is
# missing because today Telemetry header it not assumed for JSON
# inputs. This may change in the future.
# base_path_xlation=bpx.json
#
# Section start with a section name. The only constraint for section
# names is that 'default' is not used. You can have as many sections
# as you wish, as long as each section has a unique name.
##############################################
# Example of a TCP dialout (router connects to pipeline over TCP)
#
[testbed]
stage = xport_input
#
# Module type, the type dictates the rest of the options in the section.
# TCP can only be used as xport_iinput currently. UDP works similarly.
#
type = tcp
#
# Supported encapsulation is 'st' for streaming telemetry header. This
# is the header used to carry streaming telemetry payload in TCP and UDP.
#
encap = st
#
# TCP option dictating which binding to listen on. Can be a host name
# or address and port, or just port.
#
listen = :5432
#
# To enable dumping data as it is rxed, uncomment the following, and
# run with --debug option.
#
# logdata = on
#
# It is also possible to turn on TCP keepalives. Setting keepalive to
# 0 (default) stops pipeline from explicitly turning on
# keepalives. Otherwise, keepalive is turned on with period. TCP
# Keepalives do NOT need any special explicit configuration or support
# from the other end.
#
# keepalive_seconds = 0
##############################################
# Example of a gRPC dialin (pipeline connects to router over gRPC)
# [mymdtrouter]
# stage = xport_input
#
# grpc: in the role client connecting to router doing server side
# streaming.
#
# type = grpc
#
# Encoding requested by grpc client: gpbcompact, gpbkv
# This is only pertinent to dialin and describes the request
# to the router - i.e. do I want compact or k/v payload
# encoding = gpbkv
#
# Encapsulation pushed by client: gpb, gpbkv, gpbcompact
# As of 6.1.1 release, we support gpb (which is a common
# header used to carry compact and k/v gpb). In older
# releases, it might be necessary to configure compact or k/v.
# encap = gpb
#
# Server to connect to
#
#server = 192.168.123.2:57344
#
# Subscription IDs to subscribe to, as configured on the router. Set
# is specified as a comma separated string of subscription ID names.
#
#subscriptions = sub1,sub2
#
#
# Access control options; TLS, username/password
#
# TLS enabled or not, CA cert in PEM, and server name to accept.
#
# tls = false
# tls_pem = ca.pem
# tls_servername = tlsservername
#
# username/password
#
# Public key encrypted password is specified here. The pem with RSA
# key used to encrypt passwords is passed in as a cli argument.
#
# The encrypted password will be generated if pipeline is run, -pem is
# specified, and password option is NOT included in the section. In
# that user will be prompted for user/pass, and a new separate copy of
# configuration will be created for convenience.
#
# Passphrase-less RSA key pair can be generated using ssh-keygen if
# necessary: 'ssh-keygen -t rsa -b 4096'
#
# username = johndoe
# password = eBzbEJi6dXF+a9gmII7lJdtbd9o8HCDwjJqZl2eK...
#
# To enable dumping data as it is rxed, uncomment the following, and
# run with --debug option.
#
# logdata = on
#
# [mymdtrouter]
# stage = xport_input
# type = grpc
# encoding = gpbkv
# server = 192.168.123.2:57344
# subscriptions = HEALTH,QOS
# tls = true
# tls_pem = ca.pem
# tls_servername = tlsservername
# username = johndoe
# password = eBzbEJi6dXF+a9gmII7lJdtbd9o8HCDwjJqZl2eK...
##############################################
# Example of a gRPC dialout (router connects to pipeline over gRPC)
#[gRPCDialout]
#
# stage = xport_input
#
# grpc: in the role of server with router connecting to pipeline and
# client side streaming.
#
# type = grpc
#
# encap = gpb
# Encapsulation pushed by client: gpb, gpbkv, gpbcompact.
# As of 6.1.1 release, we support gpb (which is a common
# header used to carry compact and k/v gpb), and we default
# to gpb. In older releases, it might be necessary to configure
# compact or k/v.
#
# Socket to listen on
#
# listen = :57500
#
# Access control options; TLS
#
# TLS enabled or not, server CA cert in PEM, and server name cert
# issued for. (Look for CN in 'openssl x509 -noout -text -in <ca.pem>')
#
# tls = false
# tls_pem = cert.pem
# tls_key = server.key
# tls_servername = tlsservername
#
# To enable dumping data as it is rxed, uncomment the following, and
# run with --debug option.
#
# logdata = on
#
##############################################
# Example of a replay input stage: used to replay telemetry archive
#
# [replay_archive]
# stage = xport_input
# type = replay
#
# Mandatory setting indicating file containing the archive. The archive
# can be generated from real telemetry using an output 'tap' module
# configured with `raw=true` instead of an `encoding=` option.
# file = dump.bin
#
# The archive in the file will be replayed over and over unless the
# number of messages required is specified and if loop is set to
# true. This number of messages sent can be specified using the firstn
# option. Note that if firstn exceeds the number of message in the
# archive the stamped time will go back. (In future, a restamp option
# may be made available.) Loop is ignored if firstn is set.
# loop = false
# firstn = 1000
#
# The delay between messages originated can be configured in micorseconds
# using the delayusec option. Default is for a 200ms delay between replayed
# messages
# delayusec = 200000
##############################################
# Example of a kafka output stage: used to publish content to kafka
#
# [mykafka]
# stage = xport_output
#
# Module type: kafka publisher is only supported in xport_output stage currently.
#
# type = kafka
#
# Kafka specific key option. This is an optional setting and is
# specific to a weird requirement of our home grown consumer on kafka
# bus.
#
# key = id
#
# Encoding: gpb, gpbkv, json or json_events
#
# encoding = json
#
# Kafka specific option. The brokers for the kafka bus
#
# brokers = kafka.example.com:9092
#
# Kafka specific option. The topic to publish against.
#
# topic = telemetry
#
# Optional: It is also possible to specify a dynamic derivation of
# topic to send on. This mechanism is a text template applied to the
# metadata of the message; currently a structure containing the Path
# (encoding path) and Identifier (router name) fields. In future the
# whole message body may be exposed. In the case where the template
# fails to extract and return a string, the message will be published
# on 'topic' above.
#
# The example extract encoding path:
#
# topic_metadata_template = topic_template_testb.txt
#
# The option required_acks can be used to influence whether the
# producer waits for acknowledgments from just the local broker
# (i.e. the broker to which the message is published), or for a
# consensus commit from replicas. The options are "local" and "commit"
# respectively. By default, we default to "none".
#
# required_acks = none
#
# The optional buffered channel depth used to accommodate transient
# producer/consumer throughput.
#
# datachanneldepth = 1000
#
# To enable dumping data as it is rxed, uncomment the following, and
# run with --debug option.
#
# logdata = on
##############################################
# Example of kafka input stage: used to consume content from kafka
#
# [kafkaconsumer]
# type = kafka
# key = path_and_id
# stage = xport_input
# brokers = kafka.example.com:9092
# topic = consumertopic3
#
# Multiple clients can dynamically loadbalance consumption from different
# partitions as long as they share the same consumer group name
# consumergroup = mycollectors
# encoding = json
# logdata = on
##############################################
# Example of a tap stage; dump content to file, or at least count messages
#
[inspector]
stage = xport_output
#
# Module type: tap is only supported in xport_output stage currently.
#
type = tap
#
# File to dump decoded messages
#
file = /data/dump.txt
#
# encoding = json
#
# Options: json_events | gpb, gpb_kv. If format is a binary format, or
# not supported for input encoding in use (gpb if proto is available,
# gpbk/v or JSON), we fall back to hex. Default is JSON
#
# raw = true
#
# If raw is set to true, no encoding should be specified. The outcome
# is that raw payload is dumped on top of a streaming telemetry
# header. The resulting archive can be replayed (i.e. reread and fed
# to output stages) by pipeline using the 'replay' input module.
#
# The optional buffered channel depth used to accommodate transient
# producer/consumer throughput.
#
datachanneldepth = 1000
# If all we want to do is count frames, set countonly to true
#
# countonly = false
##############################################
# Example of a metrics stage; allows us to export content to a time
# series database (influxdb or prometheus) as well as measure time
# distribution of samples.
#
# [mymetrics]
# stage = xport_output
#
# Module type: metrics is only supported in xport_output stage
#
# type = metrics
#
# Spec of metrics to collect in metrics.json
#
# file = /data/metrics.json
#
# The optional buffered channel depth used to accommodate transient
# producer/consumer throughput.
#
# datachanneldepth = 1000
#
# For troubleshooting purposes, it is possible to dump the metrics to
# a local file/s, in order to understand what is being exported from
# persepective of pipeline metrics module.
#
# dump = metricsdump.txt
#
# What type of package we export too. Options include influx or prometheus.
#
# output = influx
#
# Config options for influx is currently
#
# Influx server
# influx = http://influx.example.com:8086
#
# Database to populate, currently static (might be templatised in the future).
# Expectation is that database exists and user has read/write access.
# database = dbname
#
# Workers; number of different sessions set up with influx node.
# workers = 10
#
# Config options for prometheus is currently
#
# Prometheus option: pushgw, address:port where to push metrics too
#
# pushgw = pushgw.example.com:9091
#
# Prometheus option: jobname provides the prometheus jobname associated
# with all exported metrics. Default provided
#
# jobname = telemetry
#
# Prometheus option: instance provides the prometheus instance
# associated with all exported metrics from this pipeline. Default
# provided is to use ID (in default section)
#
# instance = myinstance
#
# Track statistics about metrics collections. This only makes sense if
# we are metamonitoring too i.e. metamonitoring configuration. Sensor
# collection stats are tracked if the metric specification indicates
# "track": true for the sensor in question. For any sensor marked
# "track" we export to prometheus statistics about the intersample
# gap. This allows us to visualise median period, as well as 90th and
# 99th percentile of all periods. In other words, we can plot over
# time the period within which n (for n = 50, 90 or 99) % the next
# sample is received.
#
# Max number of sensor instances tracked at one time? Not cheap
# (e.g. we stach labels)! Must be set for any stats to be exported.
# statsensorcount = 1000
##############################################
# Example of a grpcOut stage which allows other parties to pull
# content from pipeline over gRPC. pipeline acts as a server and
# server side streams all content received using the specified
# encoding (usual matrix of input/output encoding applies)
# [mygrpcout]
# type = grpc
# stage = xport_output
# encoding = json
# listen = localhost:5959
# logdata = off
##############################################
# Example of how to apply template transformation on content pushed
# out of any output stage (exception: metrics output). Template is
# applied over GPB K/V input streams (others will follow if required)
# and uses golang text templates.
#
# [templatetest]
# stage = xport_output
# type = tap
# file = dumpfiltererd.txt
# encoding = template
# template = filter_test.json
##############################################
# Example of a UDP stage which allows other parties to connect
# over UDP and produce content over the st header (same as TCP)
# [udpin]
# type = udp
# stage = xport_input
# listen = localhost:5958
# encap = st
# logdata = off
You can’t perform that action at this time.