diff --git a/telegraf/telegraf.conf b/telegraf/telegraf.conf index 94b3d0da4..8ebf0a7a7 100644 --- a/telegraf/telegraf.conf +++ b/telegraf/telegraf.conf @@ -30,12 +30,15 @@ ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true - ## Telegraf will send metrics to outputs in batches of at - ## most metric_batch_size metrics. + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 + ## For failed writes, telegraf will cache metric_buffer_limit metrics for each ## output, and will flush this buffer on a successful write. Oldest metrics ## are dropped first when this buffer fills. + ## This buffer only fills when writes fail to output plugin(s). metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. @@ -52,10 +55,20 @@ ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s flush_jitter = "0s" - ## Run telegraf in debug mode + ## By default, precision will be set to the same timestamp order as the + ## collection interval, with the maximum being 1s. + ## Precision will NOT be used for service inputs, such as logparser and statsd. + ## Valid values are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Logging configuration: + ## Run telegraf with debug log messages. debug = false - ## Run telegraf in quiet mode + ## Run telegraf in quiet mode (error log messages only). quiet = false + ## Specify the log file name. The empty string means to log to stderr. + logfile = "" + ## Override default hostname, if empty use os.Hostname() hostname = "" ## If set to true, do no set the "host" tag in the telegraf agent. @@ -75,11 +88,11 @@ urls = ["http://localhost:8086"] # required ## The target database for metrics (telegraf will create it if not exists). database = "telegraf" # required - ## Retention policy to write to. - retention_policy = "default" - ## Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". - ## note: using "s" precision greatly improves InfluxDB compression. - precision = "s" + + ## Retention policy to write to. Empty string writes to the default rp. + retention_policy = "" + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all" + write_consistency = "any" ## Write timeout (for the InfluxDB client), formatted as a string. ## If not provided, will default to 5s. 0s means no timeout (not recommended). @@ -103,10 +116,10 @@ # [[outputs.amon]] # ## Amon Server Key # server_key = "my-server-key" # required. -# +# # ## Amon Instance URL # amon_instance = "https://youramoninstance" # required -# +# # ## Connection timeout. # # timeout = "5s" @@ -122,21 +135,21 @@ # ## Telegraf tag to use as a routing key # ## ie, if this tag exists, it's value will be used as the routing key # routing_tag = "host" -# +# # ## InfluxDB retention policy # # retention_policy = "default" # ## InfluxDB database # # database = "telegraf" # ## InfluxDB precision # # precision = "s" -# +# # ## Optional SSL Config # # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_cert = "/etc/telegraf/cert.pem" # # ssl_key = "/etc/telegraf/key.pem" # ## Use SSL but skip chain & host verification # # insecure_skip_verify = false -# +# # ## Data format to output. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: @@ -147,26 +160,32 @@ # # Configuration for AWS CloudWatch output. # [[outputs.cloudwatch]] # ## Amazon REGION -# region = 'us-east-1' -# +# region = "us-east-1" +# # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) explicit credentials from 'access_key' and 'secret_key' -# ## 2) environment variables -# ## 3) shared credentials file -# ## 4) EC2 Instance Profile +# ## 1) Assumed credentials via STS if role_arn is specified +# ## 2) explicit credentials from 'access_key' and 'secret_key' +# ## 3) shared profile from 'profile' +# ## 4) environment variables +# ## 5) shared credentials file +# ## 6) EC2 Instance Profile # #access_key = "" # #secret_key = "" -# +# #token = "" +# #role_arn = "" +# #profile = "" +# #shared_credential_file = "" +# # ## Namespace for the CloudWatch MetricDatums -# namespace = 'InfluxData/Telegraf' +# namespace = "InfluxData/Telegraf" # # Configuration for DataDog API to send metrics to. # [[outputs.datadog]] # ## Datadog API key # apikey = "my-secret-key" # required. -# +# # ## Connection timeout. # # timeout = "5s" @@ -175,7 +194,7 @@ # [[outputs.file]] # ## Files to write to, "stdout" is a specially handled file. # files = ["stdout", "/tmp/metrics.out"] -# +# # ## Data format to output. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: @@ -186,6 +205,8 @@ # # Configuration for Graphite server to send metrics to # [[outputs.graphite]] # ## TCP endpoint for your graphite instance. +# ## If multiple endpoints are configured, output will be load balanced. +# ## Only one of the endpoints will be written to with each iteration. # servers = ["localhost:2003"] # ## Prefix metrics name # prefix = "" @@ -196,6 +217,27 @@ # timeout = 2 +# # Send telegraf metrics to graylog(s) +# [[outputs.graylog]] +# ## Udp endpoint for your graylog instance. +# servers = ["127.0.0.1:12201", "192.168.1.1:12201"] + + +# # Configuration for sending metrics to an Instrumental project +# [[outputs.instrumental]] +# ## Project API Token (required) +# api_token = "API Token" # required +# ## Prefix the metrics with a given name +# prefix = "" +# ## Stats output template (Graphite formatting) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# template = "host.tags.measurement.field" +# ## Timeout in seconds to connect +# timeout = "2s" +# ## Display Communcation to Instrumental +# debug = false + + # # Configuration for the Kafka server to send metrics to # [[outputs.kafka]] # ## URLs of kafka brokers @@ -205,14 +247,14 @@ # ## Telegraf tag to use as a routing key # ## ie, if this tag exists, it's value will be used as the routing key # routing_tag = "host" -# +# # ## CompressionCodec represents the various compression codecs recognized by # ## Kafka in messages. # ## 0 : No compression # ## 1 : Gzip compression # ## 2 : Snappy compression # compression_codec = 0 -# +# # ## RequiredAcks is used in Produce Requests to tell the broker how many # ## replica acknowledgements it must see before responding # ## 0 : the producer never waits for an acknowledgement from the broker. @@ -228,17 +270,17 @@ # ## guarantee that no messages will be lost as long as at least one in # ## sync replica remains. # required_acks = -1 -# +# # ## The total number of times to retry sending a message # max_retry = 3 -# +# # ## Optional SSL Config # # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_cert = "/etc/telegraf/cert.pem" # # ssl_key = "/etc/telegraf/key.pem" # ## Use SSL but skip chain & host verification # # insecure_skip_verify = false -# +# # ## Data format to output. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: @@ -250,16 +292,22 @@ # [[outputs.kinesis]] # ## Amazon REGION of kinesis endpoint. # region = "ap-southeast-2" -# +# # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) explicit credentials from 'access_key' and 'secret_key' -# ## 2) environment variables -# ## 3) shared credentials file -# ## 4) EC2 Instance Profile +# ## 1) Assumed credentials via STS if role_arn is specified +# ## 2) explicit credentials from 'access_key' and 'secret_key' +# ## 3) shared profile from 'profile' +# ## 4) environment variables +# ## 5) shared credentials file +# ## 6) EC2 Instance Profile # #access_key = "" # #secret_key = "" -# +# #token = "" +# #role_arn = "" +# #profile = "" +# #shared_credential_file = "" +# # ## Kinesis StreamName must exist prior to starting telegraf. # streamname = "StreamName" # ## PartitionKey as used for sharding data. @@ -281,36 +329,59 @@ # api_token = "my-secret-token" # required. # ## Debug # # debug = false -# ## Tag Field to populate source attribute (optional) -# ## This is typically the _hostname_ from which the metric was obtained. -# source_tag = "host" # ## Connection timeout. # # timeout = "5s" -# ## Output Name Template (same as graphite buckets) +# ## Output source Template (same as graphite buckets) # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite -# template = "host.tags.measurement.field" +# ## This template is used in librato's source (not metric's name) +# template = "host" +# # # Configuration for MQTT server to send metrics to # [[outputs.mqtt]] # servers = ["localhost:1883"] # required. -# +# # ## MQTT outputs send metrics to this topic format # ## "///" # ## ex: prefix/web01.example.com/mem # topic_prefix = "telegraf" -# +# # ## username and password to connect MQTT server. # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" -# +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf measurements to NATS +# [[outputs.nats]] +# ## URLs of NATS servers +# servers = ["nats://localhost:4222"] +# ## Optional credentials +# # username = "" +# # password = "" +# ## NATS subject for producer messages +# subject = "telegraf" +# # ## Optional SSL Config # # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_cert = "/etc/telegraf/cert.pem" # # ssl_key = "/etc/telegraf/key.pem" # ## Use SSL but skip chain & host verification # # insecure_skip_verify = false -# +# # ## Data format to output. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: @@ -324,7 +395,7 @@ # server = "localhost:4150" # ## NSQ topic for producer messages # topic = "telegraf" -# +# # ## Data format to output. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: @@ -336,14 +407,19 @@ # [[outputs.opentsdb]] # ## prefix for metrics keys # prefix = "my.specific.prefix." -# -# ## Telnet Mode ## -# ## DNS name of the OpenTSDB server in telnet mode +# +# ## DNS name of the OpenTSDB server +# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the +# ## telnet API. "http://opentsdb.example.com" will use the Http API. # host = "opentsdb.example.com" -# -# ## Port of the OpenTSDB server in telnet mode +# +# ## Port of the OpenTSDB server # port = 4242 -# +# +# ## Number of data points to send to OpenTSDB in Http requests. +# ## Not used with telnet API. +# httpBatchSize = 50 +# # ## Debug true - Prints OpenTSDB communication # debug = false @@ -365,6 +441,30 @@ +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + +# # Print all metrics that pass through this filter. +# [[processors.printer]] + + + +############################################################################### +# AGGREGATOR PLUGINS # +############################################################################### + +# # Keep the aggregate min/max of each metric passing through. +# [[aggregators.minmax]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false + + + ############################################################################### # INPUT PLUGINS # ############################################################################### @@ -375,8 +475,8 @@ percpu = true ## Whether to report total system cpu stats or not totalcpu = true - ## Comment this line if you want the raw CPU time metrics - fielddrop = ["time_*"] + ## If true, collect raw CPU time metrics. + collect_cpu_time = false # Read metrics about disk usage by mount point @@ -396,8 +496,8 @@ ## disk partitions. ## Setting devices will restrict the stats to the specified devices. # devices = ["sda", "sdb"] - ## Uncomment the following line if you do not need disk serial numbers. - # skip_serial_number = true + ## Uncomment the following line if you need disk serial numbers. + # skip_serial_number = false # Get kernel statistics from /proc/stat @@ -425,7 +525,7 @@ # no configuration -# # Read stats from an aerospike server +# # Read stats from aerospike server(s) # [[inputs.aerospike]] # ## Aerospike servers to connect to (with port) # ## This plugin will query all namespaces the aerospike @@ -436,6 +536,7 @@ # # Read Apache status information (mod_status) # [[inputs.apache]] # ## An array of Apache status URI to gather stats. +# ## Default is "http://localhost/server-status?auto". # urls = ["http://localhost/server-status?auto"] @@ -444,7 +545,7 @@ # ## Bcache sets path # ## If not specified, then default is: # bcachePath = "/sys/fs/bcache" -# +# # ## By default, telegraf gather stats for all bcache devices # ## Setting devices will restrict the stats to the specified # ## bcache devices. @@ -469,43 +570,125 @@ # ] +# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. +# [[inputs.ceph]] +# ## This is the recommended interval to poll. Too frequent and you will lose +# ## data points due to timeouts during rebalancing and recovery +# interval = '1m' +# +# ## All configuration values are optional, defaults are shown below +# +# ## location of ceph binary +# ceph_binary = "/usr/bin/ceph" +# +# ## directory in which to look for socket files +# socket_dir = "/var/run/ceph" +# +# ## prefix of MON and OSD socket files, used to determine socket type +# mon_prefix = "ceph-mon" +# osd_prefix = "ceph-osd" +# +# ## suffix used to identify socket files +# socket_suffix = "asok" +# +# ## Ceph user to authenticate as +# ceph_user = "client.admin" +# +# ## Ceph configuration to use to locate the cluster +# ceph_config = "/etc/ceph/ceph.conf" +# +# ## Whether to gather statistics via the admin socket +# gather_admin_socket_stats = true +# +# ## Whether to gather statistics via ceph commands +# gather_cluster_stats = true + + +# # Read specific statistics per cgroup +# [[inputs.cgroup]] +# ## Directories in which to look for files, globs are supported. +# ## Consider restricting paths to the set of cgroups you really +# ## want to monitor if you have a large number of cgroups, to avoid +# ## any cardinality issues. +# # paths = [ +# # "/cgroup/memory", +# # "/cgroup/memory/child1", +# # "/cgroup/memory/child2/*", +# # ] +# ## cgroup stat fields, as file names, globs are supported. +# ## these file names are appended to each path from above. +# # files = ["memory.*usage*", "memory.limit_in_bytes"] + + # # Pull Metric Statistics from Amazon CloudWatch # [[inputs.cloudwatch]] # ## Amazon Region -# region = 'us-east-1' -# +# region = "us-east-1" +# # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) explicit credentials from 'access_key' and 'secret_key' -# ## 2) environment variables -# ## 3) shared credentials file -# ## 4) EC2 Instance Profile +# ## 1) Assumed credentials via STS if role_arn is specified +# ## 2) explicit credentials from 'access_key' and 'secret_key' +# ## 3) shared profile from 'profile' +# ## 4) environment variables +# ## 5) shared credentials file +# ## 6) EC2 Instance Profile # #access_key = "" # #secret_key = "" -# +# #token = "" +# #role_arn = "" +# #profile = "" +# #shared_credential_file = "" +# # ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) -# period = '1m' -# +# period = "5m" +# # ## Collection Delay (required - must account for metrics availability via CloudWatch API) -# delay = '1m' -# +# delay = "5m" +# # ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid # ## gaps or overlap in pulled data -# interval = '1m' -# +# interval = "5m" +# +# ## Configure the TTL for the internal cache of metrics. +# ## Defaults to 1 hr if not specified +# #cache_ttl = "10m" +# # ## Metric Statistic Namespace (required) -# namespace = 'AWS/ELB' -# +# namespace = "AWS/ELB" +# +# ## Maximum requests per second. Note that the global default AWS rate limit is +# ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a +# ## maximum of 10. Optional - default value is 10. +# ratelimit = 10 +# # ## Metrics to Pull (optional) # ## Defaults to all Metrics in Namespace if nothing is provided # ## Refreshes Namespace available metrics every 1h # #[[inputs.cloudwatch.metrics]] -# # names = ['Latency', 'RequestCount'] +# # names = ["Latency", "RequestCount"] # # # # ## Dimension filters for Metric (optional) # # [[inputs.cloudwatch.metrics.dimensions]] -# # name = 'LoadBalancerName' -# # value = 'p-example' +# # name = "LoadBalancerName" +# # value = "p-example" + + +# # Gather health check statuses from services registered in Consul +# [[inputs.consul]] +# ## Most of these values defaults to the one configured on a Consul's agent level. +# ## Optional Consul server address (default: "localhost") +# # address = "localhost" +# ## Optional URI scheme for the Consul server (default: "http") +# # scheme = "http" +# ## Optional ACL token used in every request (default: "") +# # token = "" +# ## Optional username used for request HTTP Basic Authentication (default: "") +# # username = "" +# ## Optional password used for HTTP Basic Authentication (default: "") +# # password = "" +# ## Optional data centre to query the health checks from (default: "") +# # datacentre = "" # # Read metrics from one or many couchbase clusters @@ -542,17 +725,17 @@ # [[inputs.dns_query]] # ## servers to query # servers = ["8.8.8.8"] # required -# +# # ## Domains or subdomains to query. "."(root) is default # domains = ["."] # optional -# +# # ## Query record type. Default is "A" # ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. # record_type = "A" # optional -# +# # ## Dns server port. 53 is default # port = 53 # optional -# +# # ## Query timeout in seconds. Default is 2 seconds # timeout = 2 # optional @@ -567,6 +750,13 @@ # container_names = [] # ## Timeout for docker list, info, and stats commands # timeout = "5s" +# +# ## Whether to report for each container per-device blkio (8:0, 8:1...) and +# ## network (eth0, eth1, ...) stats or not +# perdevice = true +# ## Whether to report for each container total blkio and network stats or not +# total = false +# # # Read statistics from one or many dovecot servers @@ -588,26 +778,40 @@ # [[inputs.elasticsearch]] # ## specify a list of one or more Elasticsearch servers # servers = ["http://localhost:9200"] -# +# +# ## Timeout for HTTP requests to the elastic search server(s) +# http_timeout = "5s" +# # ## set local to false when you want to read the indices stats from all nodes # ## within the cluster # local = true -# +# # ## set cluster_health to true when you want to also obtain cluster level stats # cluster_health = false +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false # # Read metrics from one or more commands that can output to stdout # [[inputs.exec]] # ## Commands array -# commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] -# +# commands = [ +# "/tmp/test.sh", +# "/usr/bin/mycollector --foo=bar", +# "/tmp/collect_*.sh" +# ] +# # ## Timeout for each command to complete. # timeout = "5s" -# +# # ## measurement name suffix (for separating different commands) # name_suffix = "_mycollector" -# +# # ## Data format to consume. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: @@ -631,15 +835,57 @@ # md5 = false +# # Read flattened metrics from one or more GrayLog HTTP endpoints +# [[inputs.graylog]] +# ## API endpoint, currently supported API: +# ## +# ## - multiple (Ex http://:12900/system/metrics/multiple) +# ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace}) +# ## +# ## For namespace endpoint, the metrics array will be ignored for that call. +# ## Endpoint can contain namespace and multiple type calls. +# ## +# ## Please check http://[graylog-server-ip]:12900/api-browser for full list +# ## of endpoints +# servers = [ +# "http://[graylog-server-ip]:12900/system/metrics/multiple", +# ] +# +# ## Metrics list +# ## List of metrics can be found on Graylog webservice documentation. +# ## Or by hitting the the web service api at: +# ## http://[graylog-host]:12900/system/metrics +# metrics = [ +# "jvm.cl.loaded", +# "jvm.memory.pools.Metaspace.committed" +# ] +# +# ## Username and password +# username = "" +# password = "" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + # # Read metrics of haproxy, via socket or csv stats page # [[inputs.haproxy]] # ## An array of address to gather stats about. Specify an ip on hostname # ## with optional port. ie localhost, 10.10.3.33:1936, etc. -# -# ## If no servers are specified, then default to 127.0.0.1:1936 -# servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] -# ## Or you can also use local socket -# ## servers = ["socket:/run/haproxy/admin.sock"] +# ## Make sure you specify the complete path to the stats endpoint +# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats +# # +# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats +# servers = ["http://myhaproxy.com:1936/haproxy?stats"] +# ## +# ## You can also use local socket with standard wildcard globbing. +# ## Server address not starting with 'http' will be treated as a possible +# ## socket, so both examples below are valid. +# ## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"] # # HTTP/HTTPS request given an address a method and a timeout @@ -647,7 +893,7 @@ # ## Server address (default http://localhost) # address = "http://github.com" # ## Set response_timeout (default 5 seconds) -# response_timeout = 5 +# response_timeout = "5s" # ## HTTP Request Method # method = "GET" # ## Whether to follow redirects from the server (defaults to false) @@ -659,41 +905,50 @@ # # body = ''' # # {'fake':'data'} # # ''' +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false # # Read flattened metrics from one or more JSON HTTP endpoints # [[inputs.httpjson]] # ## NOTE This plugin only reads numerical measurements, strings and booleans # ## will be ignored. -# +# # ## a name for the service being polled # name = "webserver_stats" -# +# # ## URL of each server in the service's cluster # servers = [ # "http://localhost:9999/stats/", # "http://localhost:9998/stats/", # ] -# +# ## Set response_timeout (default 5 seconds) +# response_timeout = "5s" +# # ## HTTP method to use: GET or POST (case-sensitive) # method = "GET" -# +# # ## List of tag names to extract from top-level of JSON server response # # tag_keys = [ # # "my_tag_1", # # "my_tag_2" # # ] -# +# # ## HTTP parameters (all values must be strings) # [inputs.httpjson.parameters] # event_type = "cpu_spike" # threshold = "0.75" -# +# # ## HTTP Header parameters (all values must be strings) # # [inputs.httpjson.headers] # # X-Auth-Token = "my-xauth-token" # # apiVersion = "v1" -# +# # ## Optional SSL Config # # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_cert = "/etc/telegraf/cert.pem" @@ -707,11 +962,15 @@ # ## Works with InfluxDB debug endpoints out of the box, # ## but other services can use this format too. # ## See the influxdb plugin's README for more details. -# +# # ## Multiple URLs from which to read InfluxDB-formatted JSON +# ## Default is "http://localhost:8086/debug/vars". # urls = [ # "http://localhost:8086/debug/vars" # ] +# +# ## http request & header timeout +# timeout = "5s" # # Read metrics from one or many bare metal servers @@ -727,8 +986,9 @@ # # Read JMX metrics through Jolokia # [[inputs.jolokia]] # ## This is the context root used to compose the jolokia url +# ## NOTE that your jolokia security policy must allow for POST requests. # context = "/jolokia" -# +# # ## This specifies the mode used # # mode = "proxy" # # @@ -738,8 +998,8 @@ # # [inputs.jolokia.proxy] # # host = "127.0.0.1" # # port = "8080" -# -# +# +# # ## List of servers exposing jolokia read service # [[inputs.jolokia.servers]] # name = "as-server-01" @@ -747,7 +1007,7 @@ # port = "8080" # # username = "myuser" # # password = "mypassword" -# +# # ## List of metrics collected on above servers # ## Each metric consists in a name, a jmx path and either # ## a pass or drop slice attribute. @@ -756,13 +1016,13 @@ # name = "heap_memory_usage" # mbean = "java.lang:type=Memory" # attribute = "HeapMemoryUsage" -# +# # ## This collect thread counts metrics. # [[inputs.jolokia.metrics]] # name = "thread_count" # mbean = "java.lang:type=Threading" # attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" -# +# # ## This collect number of class loaded/unloaded counts metrics. # [[inputs.jolokia.metrics]] # name = "class_count" @@ -770,6 +1030,22 @@ # attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" +# # Read metrics from the kubernetes kubelet api +# [[inputs.kubernetes]] +# ## URL for the kubelet +# url = "http://1.1.1.1:10255" +# +# ## Use bearer token for authorization +# # bearer_token = /path/to/bearer/token +# +# ## Optional SSL Config +# # ssl_ca = /path/to/cafile +# # ssl_cert = /path/to/certfile +# # ssl_key = /path/to/keyfile +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + # # Read metrics from a LeoFS Server via SNMP # [[inputs.leofs]] # ## An array of URI to gather stats about LeoFS. @@ -815,21 +1091,33 @@ # # Telegraf plugin for gathering metrics from N Mesos masters # [[inputs.mesos]] -# # Timeout, in ms. +# ## Timeout, in ms. # timeout = 100 -# # A list of Mesos masters, default value is localhost:5050. +# ## A list of Mesos masters. # masters = ["localhost:5050"] -# # Metrics groups to be collected, by default, all enabled. +# ## Master metrics groups to be collected, by default, all enabled. # master_collections = [ # "resources", # "master", # "system", -# "slaves", +# "agents", # "frameworks", +# "tasks", # "messages", # "evqueue", # "registrar", # ] +# ## A list of Mesos slaves, default is [] +# # slaves = [] +# ## Slave metrics groups to be collected, by default, all enabled. +# # slave_collections = [ +# # "resources", +# # "agent", +# # "system", +# # "executors", +# # "tasks", +# # "messages", +# # ] # # Read metrics from one or many MongoDB servers @@ -840,6 +1128,7 @@ # ## mongodb://10.10.3.33:18832, # ## 10.0.0.1:10000, etc. # servers = ["127.0.0.1:27017"] +# gather_perdb_stats = false # # Read metrics from one or many mysql servers @@ -848,8 +1137,8 @@ # ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] # ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name # ## e.g. -# ## root:passwd@tcp(127.0.0.1:3306)/?tls=false -# ## root@tcp(127.0.0.1:3306)/?tls=false +# ## db_user:passwd@tcp(127.0.0.1:3306)/?tls=false +# ## db_user@tcp(127.0.0.1:3306)/?tls=false # # # ## If no servers are specified, then localhost is used as the host. # servers = ["tcp(127.0.0.1:3306)/"] @@ -876,13 +1165,13 @@ # ## gather metrics from SHOW BINARY LOGS command output # gather_binary_logs = false # # -# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_TABLE +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE # gather_table_io_waits = false # # # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS # gather_table_lock_waits = false # # -# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_INDEX_USAGE +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE # gather_index_io_waits = false # # # ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS @@ -913,14 +1202,15 @@ # protocol = "tcp" # ## Server address (default localhost) # address = "github.com:80" -# ## Set timeout (default 1.0 seconds) -# timeout = 1.0 -# ## Set read timeout (default 1.0 seconds) -# read_timeout = 1.0 +# ## Set timeout +# timeout = "1s" +# # ## Optional string sent to the server # # send = "ssh" # ## Optional expected string in answer # # expect = "ssh" +# ## Set read timeout (only used if expecting a response) +# read_timeout = "1s" # # Read TCP metrics such as established, time wait and sockets counts. @@ -940,6 +1230,18 @@ # endpoints = ["http://localhost:4151"] +# # Collect kernel snmp counters and network interface statistics +# [[inputs.nstat]] +# ## file paths for proc files. If empty default paths will be used: +# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 +# ## These can also be overridden with env variables, see README. +# proc_net_netstat = "/proc/net/netstat" +# proc_net_snmp = "/proc/net/snmp" +# proc_net_snmp6 = "/proc/net/snmp6" +# ## dump metrics with 0 values too +# dump_zeros = true + + # # Get standard NTP query metrics, requires ntpq executable. # [[inputs.ntpq]] # ## If false, set the -n ntpq flag. Can reduce metric gather time. @@ -991,13 +1293,13 @@ # ## urls to ping # urls = ["www.google.com"] # required # ## number of pings to send per collection (ping -c ) -# count = 1 # required +# # count = 1 # ## interval, in s, at which to ping. 0 == default (ping -i ) -# ping_interval = 0.0 -# ## ping timeout, in s. 0 == no timeout (ping -W ) -# timeout = 1.0 +# # ping_interval = 1.0 +# ## per-ping timeout, in s. 0 == no timeout (ping -W ) +# # timeout = 1.0 # ## interface to send ping from (ping -I ) -# interface = "" +# # interface = "" # # Read metrics from one or many postgresql servers @@ -1016,9 +1318,13 @@ # ## to grab metrics for. # ## # address = "host=localhost user=postgres sslmode=disable" -# +# +# ## A list of databases to explicitly ignore. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'databases' option. +# # ignored_databases = ["postgres", "template0", "template1"] +# # ## A list of databases to pull metrics about. If not specified, metrics for all -# ## databases are gathered. +# ## databases are gathered. Do NOT use with the 'ignore_databases' option. # # databases = ["app_production", "testing"] @@ -1098,7 +1404,10 @@ # # pattern = "nginx" # ## user as argument for pgrep (ie, pgrep -u ) # # user = "nginx" -# +# +# ## override for process_name +# ## This is optional; default is sourced from /proc//status +# # process_name = "bar" # ## Field name prefix # prefix = "" # ## comment this out if you want raw cpu_time stats @@ -1109,11 +1418,16 @@ # [[inputs.prometheus]] # ## An array of urls to scrape metrics from. # urls = ["http://localhost:9100/metrics"] -# -# ## Use SSL but skip chain & host verification -# # insecure_skip_verify = false +# # ## Use bearer token for authorization # # bearer_token = /path/to/bearer/token +# +# ## Optional SSL Config +# # ssl_ca = /path/to/cafile +# # ssl_cert = /path/to/certfile +# # ssl_key = /path/to/keyfile +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false # # Reads last_run_summary.yaml file and converts to measurments @@ -1124,11 +1438,18 @@ # # Read metrics from one or many RabbitMQ servers via the management API # [[inputs.rabbitmq]] -# url = "http://localhost:15672" # required +# # url = "http://localhost:15672" # # name = "rmq-server-1" # optional tag # # username = "guest" # # password = "guest" -# +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# # ## A list of nodes to pull metrics about. If not specified, metrics for # ## all nodes are gathered. # # nodes = ["rabbit@node1", "rabbit@node2"] @@ -1147,6 +1468,7 @@ # ## e.g. # ## tcp://localhost:6379 # ## tcp://:password@192.168.99.100 +# ## unix:///var/run/redis.sock # ## # ## If no servers are specified, then localhost is used as the host. # ## If no port is specified, 6379 is used @@ -1169,8 +1491,67 @@ # servers = ["http://localhost:8098"] -# # Reads oids value from one or many snmp agents +# # Retrieves SNMP values from remote agents # [[inputs.snmp]] +# agents = [ "127.0.0.1:161" ] +# ## Timeout for each SNMP query. +# timeout = "5s" +# ## Number of retries to attempt within timeout. +# retries = 3 +# ## SNMP version, values can be 1, 2, or 3 +# version = 2 +# +# ## SNMP community string. +# community = "public" +# +# ## The GETBULK max-repetitions parameter +# max_repetitions = 10 +# +# ## SNMPv3 auth parameters +# #sec_name = "myuser" +# #auth_protocol = "md5" # Values: "MD5", "SHA", "" +# #auth_password = "pass" +# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv" +# #context_name = "" +# #priv_protocol = "" # Values: "DES", "AES", "" +# #priv_password = "" +# +# ## measurement name +# name = "system" +# [[inputs.snmp.field]] +# name = "hostname" +# oid = ".1.0.0.1.1" +# [[inputs.snmp.field]] +# name = "uptime" +# oid = ".1.0.0.1.2" +# [[inputs.snmp.field]] +# name = "load" +# oid = ".1.0.0.1.3" +# [[inputs.snmp.field]] +# oid = "HOST-RESOURCES-MIB::hrMemorySize" +# +# [[inputs.snmp.table]] +# ## measurement name +# name = "remote_servers" +# inherit_tags = [ "hostname" ] +# [[inputs.snmp.table.field]] +# name = "server" +# oid = ".1.0.0.0.1.0" +# is_tag = true +# [[inputs.snmp.table.field]] +# name = "connections" +# oid = ".1.0.0.0.1.1" +# [[inputs.snmp.table.field]] +# name = "latency" +# oid = ".1.0.0.0.1.2" +# +# [[inputs.snmp.table]] +# ## auto populate table's fields using the MIB +# oid = "HOST-RESOURCES-MIB::hrNetworkTable" + + +# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. +# [[inputs.snmp_legacy]] # ## Use 'oids.txt' file to translate oids to names # ## To generate 'oids.txt' you need to run: # ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt @@ -1192,7 +1573,7 @@ # collect = ["mybulk", "sysservices", "sysdescr"] # # Simple list of OIDs to get, in addition to "collect" # get_oids = [] -# +# # [[inputs.snmp.host]] # address = "192.168.2.3:161" # community = "public" @@ -1204,31 +1585,31 @@ # "ifNumber", # ".1.3.6.1.2.1.1.3.0", # ] -# +# # [[inputs.snmp.get]] # name = "ifnumber" # oid = "ifNumber" -# +# # [[inputs.snmp.get]] # name = "interface_speed" # oid = "ifSpeed" # instance = "0" -# +# # [[inputs.snmp.get]] # name = "sysuptime" # oid = ".1.3.6.1.2.1.1.3.0" # unit = "second" -# +# # [[inputs.snmp.bulk]] # name = "mybulk" # max_repetition = 127 # oid = ".1.3.6.1.2.1.1" -# +# # [[inputs.snmp.bulk]] # name = "ifoutoctets" # max_repetition = 127 # oid = "ifOutOctets" -# +# # [[inputs.snmp.host]] # address = "192.168.2.13:161" # #address = "127.0.0.1:161" @@ -1241,19 +1622,19 @@ # [[inputs.snmp.host.table]] # name = "iftable3" # include_instances = ["enp5s0", "eth1"] -# +# # # SNMP TABLEs # # table without mapping neither subtables # [[inputs.snmp.table]] # name = "iftable1" # oid = ".1.3.6.1.2.1.31.1.1.1" -# +# # # table without mapping but with subtables # [[inputs.snmp.table]] # name = "iftable2" # oid = ".1.3.6.1.2.1.31.1.1.1" # sub_tables = [".1.3.6.1.2.1.2.2.1.13"] -# +# # # table with mapping but without subtables # [[inputs.snmp.table]] # name = "iftable3" @@ -1261,7 +1642,7 @@ # # if empty. get all instances # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" # # if empty, get all subtables -# +# # # table with both mapping and subtables # [[inputs.snmp.table]] # name = "iftable4" @@ -1286,68 +1667,6 @@ # # ] -# # Sysstat metrics collector -# [[inputs.sysstat]] -# ## Path to the sadc command. -# # -# ## Common Defaults: -# ## Debian/Ubuntu: /usr/lib/sysstat/sadc -# ## Arch: /usr/lib/sa/sadc -# ## RHEL/CentOS: /usr/lib64/sa/sadc -# sadc_path = "/usr/lib/sa/sadc" # required -# # -# # -# ## Path to the sadf command, if it is not in PATH -# # sadf_path = "/usr/bin/sadf" -# # -# # -# ## Activities is a list of activities, that are passed as argument to the -# ## sadc collector utility (e.g: DISK, SNMP etc...) -# ## The more activities that are added, the more data is collected. -# # activities = ["DISK"] -# # -# # -# ## Group metrics to measurements. -# ## -# ## If group is false each metric will be prefixed with a description -# ## and represents itself a measurement. -# ## -# ## If Group is true, corresponding metrics are grouped to a single measurement. -# # group = true -# # -# # -# ## Options for the sadf command. The values on the left represent the sadf -# ## options and the values on the right their description (wich are used for -# ## grouping and prefixing metrics). -# ## -# ## Run 'sar -h' or 'man sar' to find out the supported options for your -# ## sysstat version. -# [inputs.sysstat.options] -# -C = "cpu" -# -B = "paging" -# -b = "io" -# -d = "disk" # requires DISK activity -# "-n ALL" = "network" -# "-P ALL" = "per_cpu" -# -q = "queue" -# -R = "mem" -# -r = "mem_util" -# -S = "swap_util" -# -u = "cpu_util" -# -v = "inode" -# -W = "swap" -# -w = "task" -# # -H = "hugepages" # only available for newer linux distributions -# # "-I ALL" = "interrupts" # requires INT activity -# # -# # -# ## Device tags can be used to add additional tags for devices. -# ## For example the configuration below adds a tag vg with value rootvg for -# ## all metrics with sda devices. -# # [[inputs.sysstat.device_tags.sda]] -# # vg = "rootvg" - - # # Inserts sine and cosine waves for demonstration purposes # [[inputs.trig]] # ## Set the amplitude @@ -1362,25 +1681,37 @@ # pools = ["redis_pool", "mc_pool"] -# # Read metrics of ZFS from arcstats, zfetchstats and vdev_cache_stats +# # A plugin to collect stats from Varnish HTTP Cache +# [[inputs.varnish]] +# ## The default location of the varnishstat binary can be overridden with: +# binary = "/usr/bin/varnishstat" +# +# ## By default, telegraf gather stats for 3 metric points. +# ## Setting stats will override the defaults shown below. +# ## Glob matching can be used, ie, stats = ["MAIN.*"] +# ## stats may also be set to ["*"], which will collect all stats +# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] + + +# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools # [[inputs.zfs]] -# ## ZFS kstat path +# ## ZFS kstat path. Ignored on FreeBSD # ## If not specified, then default is: -# kstatPath = "/proc/spl/kstat/zfs" -# +# # kstatPath = "/proc/spl/kstat/zfs" +# # ## By default, telegraf gather all zfs stats # ## If not specified, then default is: -# kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] -# +# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] +# # ## By default, don't gather zpool stats -# poolMetrics = false +# # poolMetrics = false # # Reads 'mntr' stats from one or many zookeeper servers # [[inputs.zookeeper]] # ## An array of address to gather stats about. Specify an ip or hostname # ## with port. ie localhost:2181, 10.0.0.1:2181, etc. -# +# # ## If no servers are specified, then localhost is used as the host. # ## If no port is specified, 2181 is used # servers = [":2181"] @@ -1391,10 +1722,23 @@ # SERVICE INPUT PLUGINS # ############################################################################### -# # A Github Webhook Event collector -# [[inputs.github_webhooks]] -# ## Address and port to host Webhook listener on -# service_address = ":1618" +# # Influx HTTP write listener +# [[inputs.http_listener]] +# ## Address and port to host HTTP listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) +# max_body_size = 0 +# +# ## Maximum line size allowed to be sent in bytes. +# ## 0 means to use the default of 65536 bytes (64 kibibytes) +# max_line_size = 0 # # Read metrics from Kafka topic(s) @@ -1404,12 +1748,12 @@ # ## an array of Zookeeper connection strings # zookeeper_peers = ["localhost:2181"] # ## Zookeeper Chroot -# zookeeper_chroot = "/" +# zookeeper_chroot = "" # ## the name of the consumer group # consumer_group = "telegraf_metrics_consumers" # ## Offset (must be either "oldest" or "newest") # offset = "oldest" -# +# # ## Data format to consume. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: @@ -1417,37 +1761,68 @@ # data_format = "influx" +# # Stream and parse log file(s). +# [[inputs.logparser]] +# ## Log files to parse. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/**.log -> recursively find all .log files in /var/log +# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log +# ## /var/log/apache.log -> only tail the apache log file +# files = ["/var/log/apache/access.log"] +# ## Read file from beginning. +# from_beginning = false +# +# ## Parse logstash-style "grok" patterns: +# ## Telegraf built-in parsing patterns: https://goo.gl/dkay10 +# [inputs.logparser.grok] +# ## This is a list of patterns to check the given log file(s) for. +# ## Note that adding patterns here increases processing time. The most +# ## efficient configuration is to have one pattern per logparser. +# ## Other common built-in patterns are: +# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) +# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) +# patterns = ["%{COMBINED_LOG_FORMAT}"] +# ## Name of the outputted measurement name. +# measurement = "apache_access_log" +# ## Full path(s) to custom pattern files. +# custom_pattern_files = [] +# ## Custom patterns can also be defined here. Put one pattern per line. +# custom_patterns = ''' +# ''' + + # # Read metrics from MQTT topic(s) # [[inputs.mqtt_consumer]] # servers = ["localhost:1883"] # ## MQTT QoS, must be 0, 1, or 2 # qos = 0 -# +# # ## Topics to subscribe to # topics = [ # "telegraf/host01/cpu", # "telegraf/+/mem", # "sensors/#", # ] -# +# # # if true, messages that can't be delivered while the subscriber is offline # # will be delivered when it comes back (such as on service restart). # # NOTE: if true, client_id MUST be set # persistent_session = false # # If empty, a random client ID will be generated. # client_id = "" -# +# # ## username and password to connect MQTT server. # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" -# +# # ## Optional SSL Config # # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_cert = "/etc/telegraf/cert.pem" # # ssl_key = "/etc/telegraf/key.pem" # ## Use SSL but skip chain & host verification # # insecure_skip_verify = false -# +# # ## Data format to consume. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: @@ -1458,14 +1833,34 @@ # # Read metrics from NATS subject(s) # [[inputs.nats_consumer]] # ## urls of NATS servers -# servers = ["nats://localhost:4222"] +# # servers = ["nats://localhost:4222"] # ## Use Transport Layer Security -# secure = false +# # secure = false # ## subject(s) to consume -# subjects = ["telegraf"] +# # subjects = ["telegraf"] # ## name a queue group -# queue_group = "telegraf_consumers" -# +# # queue_group = "telegraf_consumers" +# +# ## Sets the limits for pending msgs and bytes for each subscription +# ## These shouldn't need to be adjusted except in very high throughput scenarios +# # pending_message_limit = 65536 +# # pending_bytes_limit = 67108864 +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read NSQ topic for metrics. +# [[inputs.nsq_consumer]] +# ## An string representing the NSQD TCP Endpoint +# server = "localhost:4150" +# topic = "telegraf" +# channel = "consumer" +# max_in_flight = 100 +# # ## Data format to consume. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: @@ -1487,24 +1882,24 @@ # delete_timings = true # ## Percentiles to calculate for timing & histogram stats # percentiles = [90] -# +# # ## separator to use between elements of a statsd metric # metric_separator = "_" -# +# # ## Parses tags in the datadog statsd format # ## http://docs.datadoghq.com/guides/dogstatsd/ # parse_data_dog_tags = false -# +# # ## Statsd data translation templates, more info can be read here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite # # templates = [ # # "cpu.* measurement*" # # ] -# +# # ## Number of UDP messages allowed to queue up, once filled, # ## the statsd server will start dropping packets # allowed_pending_messages = 10000 -# +# # ## Number of timing/histogram values to track per-measurement in the # ## calculation of percentiles. Raising this limit increases the accuracy # ## of percentiles but also increases the memory usage and cpu time. @@ -1525,7 +1920,7 @@ # files = ["/var/mymetrics.out"] # ## Read file from beginning. # from_beginning = false -# +# # ## Data format to consume. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: @@ -1536,15 +1931,15 @@ # # Generic TCP listener # [[inputs.tcp_listener]] # ## Address and port to host TCP listener on -# service_address = ":8094" -# +# # service_address = ":8094" +# # ## Number of TCP messages allowed to queue up. Once filled, the # ## TCP listener will start dropping packets. -# allowed_pending_messages = 10000 -# +# # allowed_pending_messages = 10000 +# # ## Maximum number of concurrent TCP connections to allow -# max_tcp_connections = 250 -# +# # max_tcp_connections = 250 +# # ## Data format to consume. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: @@ -1555,15 +1950,32 @@ # # Generic UDP listener # [[inputs.udp_listener]] # ## Address and port to host UDP listener on -# service_address = ":8092" -# +# # service_address = ":8092" +# # ## Number of UDP messages allowed to queue up. Once filled, the # ## UDP listener will start dropping packets. -# allowed_pending_messages = 10000 -# +# # allowed_pending_messages = 10000 +# # ## Data format to consume. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" + +# # A Webhooks Event collector +# [[inputs.webhooks]] +# ## Address and port to host Webhook listener on +# service_address = ":1619" +# +# [inputs.webhooks.filestack] +# path = "/filestack" +# +# [inputs.webhooks.github] +# path = "/github" +# +# [inputs.webhooks.mandrill] +# path = "/mandrill" +# +# [inputs.webhooks.rollbar] +# path = "/rollbar"