Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Fetching contributors…

Cannot retrieve contributors at this time

237 lines (194 sloc) 8.373 kb
#
# MANAGED BY PUPPET
#
[Main]
# The host of the Datadog intake server to send agent data to
dd_url: <%= @dd_url %>
# If you need a proxy to connect to the Internet, provide the settings here
<% if @proxy_host.empty? -%>
# proxy_host:
<% else -%>
proxy_host: <%= @proxy_host %>
<% end -%>
<% if @proxy_port.empty? -%>
# proxy_port:
<% else -%>
proxy_port: <%= @proxy_port %>
<% end -%>
<% if @proxy_user.empty? -%>
# proxy_user:
<% else -%>
proxy_user: <%= @proxy_user %>
<% end -%>
<% if @proxy_password.empty? -%>
# proxy_password:
<% else -%>
proxy_password: <%= @proxy_password %>
<% end -%>
# If you run the agent behind haproxy, you might want to set this to yes
# skip_ssl_validation: no
# The Datadog api key to associate your Agent's data with your organization.
# Can be found here:
# https://app.datadoghq.com/account/settings
api_key: <%= @api_key %>
# Force the hostname to whatever you want.
<% if @host.empty? -%>
# hostname:
<% else -%>
hostname: <%= @host %>
<% end -%>
# Set the host's tags
#tags: mytag0, mytag1
<%
tag_list = @tags
@facts_to_tags.each do |f|
value = scope.lookupvar(f)
if not value.nil?
tag = "#{f}:#{value}"
tag_list << tag
end
end
if not tag_list.empty?
-%>
tags: <%= tag_list.join(', ') %>
<% end -%>
# Collect AWS EC2 custom tags as agent tags
# collect_ec2_tags: no
# Collect instance metadata
# The Agent will try to collect instance metadata for EC2 and GCE instances by
# trying to connect to the local endpoint: http://169.254.169.254
# See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html
# and https://developers.google.com/compute/docs/metadata
# for more information
# collect_instance_metadata: yes
# Set the threshold for accepting points to allow anything
# with recent_point_threshold seconds
# Defaults to 30 seconds if no value is provided
#recent_point_threshold: 30
# Use mount points instead of volumes to track disk and fs metrics
use_mount: <%= @use_mount %>
# Change port the Agent is listening to
# listen_port: 17123
# Start a graphite listener on this port
# graphite_listen_port: 17124
# Additional directory to look for Datadog checks
# additional_checksd: /etc/dd-agent/checks.d/
# Allow non-local traffic to this Agent
# This is required when using this Agent as a proxy for other Agents
# that might not have an internet connection
# For more information, please see
# https://github.com/DataDog/dd-agent/wiki/Network-Traffic-and-Proxy-Configuration
non_local_traffic: <%= @non_local_traffic %>
# Select the Tornado HTTP Client in the forwarder
# Default to the simple http client
# use_curl_http_client: False
# The loopback address the Forwarder and Dogstatsd will bind.
# Optional, it is mainly used when running the agent on Openshift
# bind_host: localhost
# ========================================================================== #
# Pup configuration
# ========================================================================== #
# Pup is a small server that displays metric data collected by the Agent.
# Think of it as a fancy status page or a toe dip into the world of
# datadog. It can be connected to on the port below.
# use_pup: no
# pup_port: 17125
# pup_interface: localhost
# pup_url: http://localhost:17125
# ========================================================================== #
# DogStatsd configuration #
# ========================================================================== #
# If you don't want to enable the DogStatsd server, set this option to no
# use_dogstatsd: yes
# DogStatsd is a small server that aggregates your custom app metrics. For
# usage information, check out http://api.datadoghq.com
# Make sure your client is sending to the same port.
# dogstatsd_port : 8125
# By default dogstatsd will post aggregate metrics to the Agent (which handles
# errors/timeouts/retries/etc). To send directly to the datadog api, set this
# to https://app.datadoghq.com.
# dogstatsd_target : http://localhost:17123
## The dogstatsd flush period.
# dogstatsd_interval : 10
## If 'yes', counters and rates will be normalized to 1 second (that is divided
## by the dogstatsd_interval) before being sent to the server. Defaults to 'yes'
# dogstatsd_normalize : yes
# If you want to forward every packet received by the dogstatsd server
# to another statsd server, uncomment these lines.
# WARNING: Make sure that forwarded packets are regular statsd packets and not "dogstatsd" packets,
# as your other statsd server might not be able to handle them.
# statsd_forward_host: address_of_own_statsd_server
# statsd_forward_port: 8125
# ========================================================================== #
# Service-specific configuration #
# ========================================================================== #
# -------------------------------------------------------------------------- #
# Disk #
# -------------------------------------------------------------------------- #
# Some infrastrucures have many constantly changing virtual devices (e.g. folks
# running constantly churning linux containers) whose metrics aren't
# interesting for datadog. To filter out a particular pattern of devices
# from collection, configure a regex here:
# device_blacklist_re: .*\/dev\/mapper\/lxc-box.*
# -------------------------------------------------------------------------- #
# Ganglia #
# -------------------------------------------------------------------------- #
# Ganglia host where gmetad is running
#ganglia_host: localhost
# Ganglia port where gmetad is running
#ganglia_port: 8651
# -------------------------------------------------------------------------- #
# Dogstream (log file parser)
# -------------------------------------------------------------------------- #
# Comma-separated list of logs to parse and optionally custom parsers to use.
# The form should look like this:
#
# dogstreams: /path/to/log1:parsers_module:custom_parser, /path/to/log2, /path/to/log3, ...
#
# Or this:
#
# dogstreams: /path/to/log1:/path/to/my/parsers_module.py:custom_parser, /path/to/log2, /path/to/log3, ...
#
# Each entry is a path to a log file and optionally a Python module/function pair
# separated by colons.
#
# Custom parsers should take a 2 parameters, a logger object and
# a string parameter of the current line to parse. It should return a tuple of
# the form:
# (metric (str), timestamp (unix timestamp), value (float), attributes (dict))
# where attributes should at least contain the key 'metric_type', specifying
# whether the given metric is a 'counter' or 'gauge'.
#
# Unless parsers are specified with an absolute path, the modules must exist in
# the Agent's PYTHONPATH. You can set this as an environment variable when
# starting the Agent. If the name of the custom parser function is not passed,
# 'parser' is assumed.
#
# If this value isn't specified, the default parser assumes this log format:
# metric timestamp value key0=val0 key1=val1 ...
#
# ========================================================================== #
# Custom Emitters #
# ========================================================================== #
# Comma-separated list of emitters to be used in addition to the standard one
#
# Expected to be passed as a comma-separated list of colon-delimited
# name/object pairs.
#
# custom_emitters: /usr/local/my-code/emitters/rabbitmq.py:RabbitMQEmitter
#
# If the name of the emitter function is not specified, 'emitter' is assumed.
# ========================================================================== #
# Logging
# ========================================================================== #
log_level: <%= @_loglevel %>
# collector_log_file: /var/log/datadog/collector.log
# forwarder_log_file: /var/log/datadog/forwarder.log
# dogstatsd_log_file: /var/log/datadog/dogstatsd.log
# pup_log_file: /var/log/datadog/pup.log
# if syslog is enabled but a host and port are not set, a local domain socket
# connection will be attempted
#
log_to_syslog: <%= @log_to_syslog ? "yes" : "no" %>
# syslog_host:
# syslog_port:
Jump to Line
Something went wrong with that request. Please try again.