Skip to content
This repository has been archived by the owner. It is now read-only.
Permalink
master
Switch branches/tags
Go to file
Signed-off-by: Hasan Mahmood <hasan.mahmood@datadoghq.com>
76 contributors

Users who have contributed to this file

@clamoriniere @L3n41c @xvello @hkaj @vboulineau @xornivore @l0k0ms @vreon @irlevesque @CharlyF @ahmed-mez @c-knowles
## Default values for Datadog Agent
## See Datadog helm documentation to learn more:
## https://docs.datadoghq.com/agent/kubernetes/helm/
## @param nameOverride - string - optional
## Override name of app.
#
nameOverride: # ""
## @param fullnameOverride - string - optional
## Override the full qualified app name.
#
fullnameOverride: # ""
## @param targetSystem - string - required
## Set the target OS for this deployment
## Possible values: linux, windows
#
targetSystem: "linux"
datadog:
## @param apiKey - string - required
## Set this to your Datadog API key before the Agent runs.
## ref: https://app.datadoghq.com/account/settings#agent/kubernetes
#
apiKey: <DATADOG_API_KEY>
## @param apiKeyExistingSecret - string - optional
## Use existing Secret which stores API key instead of creating a new one.
## If set, this parameter takes precedence over "apiKey".
#
apiKeyExistingSecret: # <DATADOG_API_KEY_SECRET>
## @param appKey - string - optional
## If you are using clusterAgent.metricsProvider.enabled = true, you must set
## a Datadog application key for read access to your metrics.
#
appKey: # <DATADOG_APP_KEY>
## @param appKeyExistingSecret - string - optional
## Use existing Secret which stores APP key instead of creating a new one
## If set, this parameter takes precedence over "appKey".
#
appKeyExistingSecret: # <DATADOG_APP_KEY_SECRET>
## @param securityContext - object - optional
## You can modify the security context used to run the containers by
## modifying the label type below:
#
securityContext: {}
# seLinuxOptions:
# user: "system_u"
# role: "system_r"
# type: "spc_t"
# level: "s0"
## @param clusterName - string - optional
## Set a unique cluster name to allow scoping hosts and Cluster Checks easily
## The name must be unique and must be dot-separated tokens where a token can be up to 40 characters with the following restrictions:
## * Lowercase letters, numbers, and hyphens only.
## * Must start with a letter.
## * Must end with a number or a letter.
## Compared to the rules of GKE, dots are allowed whereas they are not allowed on GKE:
## https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#Cluster.FIELDS.name
#
clusterName: # <CLUSTER_NAME>
## @param site - string - optional - default: 'datadoghq.com'
## The site of the Datadog intake to send Agent data to.
## Set to 'datadoghq.eu' to send data to the EU site.
#
site: # datadoghq.com
## @param dd_url - string - optional - default: 'https://app.datadoghq.com'
## The host of the Datadog intake server to send Agent data to, only set this option
## if you need the Agent to send data to a custom URL.
## Overrides the site setting defined in "site".
#
dd_url: # https://app.datadoghq.com
## @param logLevel - string - required
## Set logging verbosity, valid log levels are:
## trace, debug, info, warn, error, critical, and off
#
logLevel: INFO
## @param kubeStateMetricsEnabled - boolean - required
## If true, deploys the kube-state-metrics deployment.
## ref: https://github.com/kubernetes/charts/tree/master/stable/kube-state-metrics
#
kubeStateMetricsEnabled: true
## @param clusterChecks - object - required
## Enable the Cluster Checks feature on both the cluster-agents and the daemonset
## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/
## Autodiscovery via Kube Service annotations is automatically enabled
#
clusterChecks:
enabled: false
## @param nodeLabelsAsTags - list of key:value strings - optional
## Provide a mapping of Kubernetes Node Labels to Datadog Tags.
#
nodeLabelsAsTags: {}
# beta.kubernetes.io/instance-type: aws-instance-type
# kubernetes.io/role: kube_role
# <KUBERNETES_NODE_LABEL>: <DATADOG_TAG_KEY>
## @param podLabelsAsTags - list of key:value strings - optional
## Provide a mapping of Kubernetes Labels to Datadog Tags.
#
podLabelsAsTags: {}
# app: kube_app
# release: helm_release
# <KUBERNETES_LABEL>: <DATADOG_TAG_KEY>
## @param podAnnotationsAsTags - list of key:value strings - optional
## Provide a mapping of Kubernetes Annotations to Datadog Tags
#
podAnnotationsAsTags: {}
# iam.amazonaws.com/role: kube_iamrole
# <KUBERNETES_ANNOTATIONS>: <DATADOG_TAG_KEY>
## @param tags - list of key:value elements - optional
## List of tags to attach to every metric, event and service check collected by this Agent.
##
## Learn more about tagging: https://docs.datadoghq.com/tagging/
#
tags: []
# - "<KEY_1>:<VALUE_1>"
# - "<KEY_2>:<VALUE_2>"
## @param dogstatsd - object - required
## dogstatsd configuration
## ref: https://docs.datadoghq.com/agent/kubernetes/dogstatsd/
## To emit custom metrics from your Kubernetes application, use DogStatsD.
#
dogstatsd:
## @param port - integer - optional - default: 8125
## Override the Agent DogStatsD port.
## Note: Make sure your client is sending to the same UDP port.
#
port: 8125
## @param originDetection - boolean - optional
## Enable origin detection for container tagging
## https://docs.datadoghq.com/developers/dogstatsd/unix_socket/#using-origin-detection-for-container-tagging
#
originDetection: false
## @param useSocketVolume - boolean - optional
## Enable dogstatsd over Unix Domain Socket
## ref: https://docs.datadoghq.com/developers/dogstatsd/unix_socket/
#
useSocketVolume: false
## @param socketPath - string - optional
## Path to the DogStatsD socket
#
socketPath: /var/run/datadog/dsd.socket
## @param hostSocketPath - string - optional
## host path to the DogStatsD socket
#
hostSocketPath: /var/run/datadog/
## @param useHostPort - boolean - optional
## Sets the hostPort to the same value of the container port. Needs to be used
## for sending custom metrics.
## The ports need to be available on all hosts.
##
## WARNING: Make sure that hosts using this are properly firewalled otherwise
## metrics and traces are accepted from any host able to connect to this host.
#
useHostPort: false
## @param useHostPID - boolean - optional
## Run the agent in the host's PID namespace. This is required for Dogstatsd origin
## detection to work. See https://docs.datadoghq.com/developers/dogstatsd/unix_socket/
#
useHostPID: false
## @param nonLocalTraffic - boolean - optional - default: false
## Enable this to make each node accept non-local statsd traffic.
## ref: https://github.com/DataDog/docker-dd-agent#environment-variables
#
nonLocalTraffic: false
## @param collectEvents - boolean - optional - default: false
## Enables this to start event collection from the kubernetes API
## ref: https://docs.datadoghq.com/agent/kubernetes/event_collection/
#
collectEvents: false
## @param leaderElection - boolean - optional - default: false
## Enables leader election mechanism for event collection.
#
leaderElection: false
## @param leaderLeaseDuration - integer - optional - default: 60
## Set the lease time for leader election in second.
#
leaderLeaseDuration: # 60
## @param logs - object - required
## Enable logs agent and provide custom configs
#
logs:
## @param enabled - boolean - optional - default: false
## Enables this to activate Datadog Agent log collection.
## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup
#
enabled: false
## @param containerCollectAll - boolean - optional - default: false
## Enable this to allow log collection for all containers.
## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup
#
containerCollectAll: false
## @param containerUseFiles - boolean - optional - default: true
## Collect logs from files in /var/log/pods instead of using container runtime API.
## It's usually the most efficient way of collecting logs.
## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup
#
containerCollectUsingFiles: true
## @param apm - object - required
## Enable apm agent and provide custom configs
#
apm:
## @param enabled - boolean - optional - default: false
## Enable this to enable APM and tracing, on port 8126
## ref: https://github.com/DataDog/docker-dd-agent#tracing-from-the-host
#
enabled: false
## @param port - integer - optional - default: 8126
## Override the trace Agent port.
## Note: Make sure your client is sending to the same UDP port.
#
port: 8126
## @param useSocketVolume - boolean - optional
## Enable APM over Unix Domain Socket
## ref: https://docs.datadoghq.com/agent/kubernetes/apm/
#
useSocketVolume: false
## @param socketPath - string - optional
## Path to the trace-agent socket
#
socketPath: /var/run/datadog/apm.socket
## @param hostSocketPath - string - optional
## host path to the trace-agent socket
#
hostSocketPath: /var/run/datadog/
## @param env - list of object - optional
## The dd-agent supports many environment variables
## ref: https://docs.datadoghq.com/agent/docker/?tab=standard#environment-variables
#
env: []
# - name: <ENV_VAR_NAME>
# value: <ENV_VAR_VALUE>
## @param confd - list of objects - optional
## Provide additional check configurations (static and Autodiscovery)
## Each key becomes a file in /conf.d
## ref: https://github.com/DataDog/datadog-agent/tree/master/Dockerfiles/agent#optional-volumes
## ref: https://docs.datadoghq.com/agent/autodiscovery/
#
confd: {}
# redisdb.yaml: |-
# init_config:
# instances:
# - host: "name"
# port: "6379"
# kubernetes_state.yaml: |-
# ad_identifiers:
# - kube-state-metrics
# init_config:
# instances:
# - kube_state_url: http://%%host%%:8080/metrics
## @param checksd - list of key:value strings - optional
## Provide additional custom checks as python code
## Each key becomes a file in /checks.d
## ref: https://github.com/DataDog/datadog-agent/tree/master/Dockerfiles/agent#optional-volumes
#
checksd: {}
# service.py: |-
## @param dockerSocketPath - string - optional
## Path to the docker socket
#
dockerSocketPath: # /var/run/docker.sock
## @param criSocketPath - string - optional
## Path to the container runtime socket (if different from Docker)
## This is supported starting from agent 6.6.0
#
criSocketPath: # /var/run/containerd/containerd.sock
## @param processAgent - object - required
## Enable process agent and provide custom configs
#
processAgent:
## @param enabled - boolean - required
## Set this to true to enable live process monitoring agent
## Note: /etc/passwd is automatically mounted to allow username resolution.
## ref: https://docs.datadoghq.com/graphing/infrastructure/process/#kubernetes-daemonset
#
enabled: true
## @param processCollection - boolean - required
## Set this to true to enable process collection in process monitoring agent
## Requires processAgent.enabled to be set to true to have any effect
#
processCollection: false
## @param systemProbe - object - required
## Enable systemProbe agent and provide custom configs
#
systemProbe:
## @param enabled - boolean - required
## Set this to true to enable system-probe agent
#
enabled: false
## @param debugPort - integer - required
## Specify the port to expose pprof and expvar for system-probe agent
#
debugPort: 0
## @param enableConntrack - boolean - required
## Enable the system-probe agent to connect to the netlink/conntrack subsystem to add NAT information to connection data
## Ref: http://conntrack-tools.netfilter.org/
#
enableConntrack: true
## @param seccomp - string - required
## Apply an ad-hoc seccomp profile to the system-probe agent to restrict its privileges
## Note that this will break `kubectl exec … -c system-probe -- /bin/bash`
#
seccomp: localhost/system-probe
## @param seccompRoot - string - required
## Specify the seccomp profile root directory
#
seccompRoot: /var/lib/kubelet/seccomp
## @param bpfDebug - boolean - required
## Enable logging for kernel debug
#
bpfDebug: false
## @param apparmor profile - string - required
## specify a apparmor profile for system-probe
#
apparmor: unconfined
## @param enableTCPQueueLength - boolean - optional
## Enable the TCP queue length eBPF-based check
#
enableTCPQueueLength: false
## @param enableOOMKill - boolean - optional
## Enable the OOM kill eBPF-based check
#
enableOOMKill: false
## @param collectDNSStats - boolean - optional
## Enable DNS stat collection
#
collectDNSStats: false
orchestratorExplorer:
## @param enabled - boolean - required
## Set this to true to enable the orchestrator explorer.
## This requires processAgent.enabled and clusterAgent.enabled to be set to true
## ref: TODO - add doc link
#
enabled: false
## @param clusterAgent - object - required
## This is the Datadog Cluster Agent implementation that handles cluster-wide
## metrics more cleanly, separates concerns for better rbac, and implements
## the external metrics API so you can autoscale HPAs based on datadog metrics
## ref: https://docs.datadoghq.com/agent/kubernetes/cluster/
#
clusterAgent:
## @param enabled - boolean - required
## Set this to true to enable Datadog Cluster Agent
#
enabled: false
## @param image - object - required
## Define the Datadog Cluster-Agent image to work with.
#
image:
## @param repository - string - required
## Define the repository to use:
#
repository: datadog/cluster-agent
## @param tag - string - required
## Define the Cluster-Agent version to use.
#
tag: 1.7.0
## @param pullPolicy - string - required
## The Kubernetes pull policy.
#
pullPolicy: IfNotPresent
## @param pullSecrets - list of key:value strings - optional
## It is possible to specify docker registry credentials
## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
#
pullSecrets: []
# - name: "<REG_SECRET>"
## @param command - array - optional
## Command to run in the Cluster Agent container
#
command: []
## @param token - string - required
## This needs to be at least 32 characters a-zA-z
## It is a preshared key between the node agents and the cluster agent
## ref:
#
token: ""
## @param replicas - integer - required
## Specify the of cluster agent replicas, if > 1 it allow the cluster agent to
## work in HA mode.
#
replicas: 1
## @param rbac - object - required
## Provide Cluster Agent Deployment pod(s) RBAC configuration
rbac:
## @param created - boolean - required
## If true, create & use RBAC resources
#
create: true
## @param serviceAccountName - string - required
## Ignored if clusterAgentrbac.create is true
#
serviceAccountName: default
## @param metricsProvider - object - required
## Enable the metricsProvider to be able to scale based on metrics in Datadog
#
metricsProvider:
## @param enabled - boolean - required - default: false
## Set this to true to enable Metrics Provider
#
enabled: false
## @param wpaController - boolean - optional
## Enable informer and controller of the watermark pod autoscaler
## NOTE: You need to install the `WatermarkPodAutoscaler` CRD before
#
wpaController: false
## @param useDatadogMetrics - boolean - optional
## Enable usage of DatadogMetric CRD to autoscale on arbitrary Datadog queries
## NOTE: You need to install the `DatadogMetric` CRD before
#
useDatadogMetrics: false
## Configuration for the service for the cluster-agent metrics server
#
service:
## @param type - string - optional
##
#
type: ClusterIP
## @param port - int - optional
##
port: 8443
## @param env - list of object - optional
## The Cluster-Agent supports many additional environment variables that can
## be passed literally.
## ref: https://docs.datadoghq.com/agent/cluster_agent/commands/#cluster-agent-options
#
env: []
## @param admissionController - object - required
## Enable the admissionController to be able to inject APM/Dogstatsd config
## and standard tags (env, service, version) automatically into your pods
#
admissionController:
enabled: false
## @param mutateUnlabelled - boolean - optional
## Enable injecting config without having the pod label 'admission.datadoghq.com/enabled="true"'
#
mutateUnlabelled: false
## @param confd - list of objects - optional
## Provide additional cluster check configurations
## Each key will become a file in /conf.d
## ref: https://docs.datadoghq.com/agent/autodiscovery/
#
confd: {}
# mysql.yaml: |-
# cluster_check: true
# instances:
# - server: '<EXTERNAL_IP>'
# port: 3306
# user: datadog
# pass: '<YOUR_CHOSEN_PASSWORD>'
## @param resources - object -required
## Datadog cluster-agent resource requests and limits.
#
resources: {}
# requests:
# cpu: 200m
# memory: 256Mi
# limits:
# cpu: 200m
# memory: 256Mi
## @param priorityclassName - string - optional
## Name of the priorityClass to apply to the Cluster Agent
#
priorityClassName: # system-cluster-critical
## @param nodeSelector - object - optional
## Allow the Cluster Agent Deployment to schedule on selected nodes
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## @param affinity - object - optional
## Allow the Cluster Agent Deployment to schedule using affinity rules
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
#
affinity: {}
## @param healthPort - integer - optional - default: 5555
## Port number use the cluster-agent to server healthz endpoint
healthPort: 5555
## @param livenessProbe - object - required
## Override the agent's liveness probe logic from the default:
## In case of issues with the probe, you can disable it with the
## following values, to allow easier investigating:
#
livenessProbe:
httpGet:
port: 5555
path: /live
scheme: HTTP
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
## @param readinessProbe - object - required
## Override the cluster-agent's readiness probe logic from the default:
#
readinessProbe:
httpGet:
port: 5555
path: /ready
scheme: HTTP
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
## @param strategy - string - required
## Allow the Cluster Agent deployment to perform a rolling update on helm update
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
#
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
## @param podAnnotations - list of key:value strings - optional
## Annotations to add to the cluster-agents's pod(s)
#
podAnnotations: {}
# key: "value"
## @param useHostNetwork - boolean - optional
## Bind ports on the hostNetwork. Useful for CNI networking where hostPort might
## not be supported. The ports need to be available on all hosts. It can be
## used for custom metrics instead of a service endpoint.
##
## WARNING: Make sure that hosts using this are properly firewalled otherwise
## metrics and traces are accepted from any host able to connect to this host.
#
useHostNetwork: # true
## @param dnsConfig - list of objects - optional
## specify dns configuration options for datadog cluster agent containers e.g ndots
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config
dnsConfig: {}
# options:
# - name: ndots
# value: "1"
## @param volumes - list of objects - optional
## Specify additional volumes to mount in the cluster-agent container
#
volumes: []
# - hostPath:
# path: <HOST_PATH>
# name: <VOLUME_NAME>
## @param volumeMounts - list of objects - optional
## Specify additional volumes to mount in the cluster-agent container
#
volumeMounts: []
# - name: <VOLUME_NAME>
# mountPath: <CONTAINER_PATH>
# readOnly: true
## @param datadog-cluster.yaml - object - optional
## Specify custom contents for the datadog cluster agent config (datadog-cluster.yaml).
#
datadog_cluster_yaml: {}
## @param createPodDisruptionBudget - boolean - optional
## Specify the pod disruption budget to apply to the cluster agents
#
createPodDisruptionBudget: false
agents:
## @param enabled - boolean - required
## You should keep Datadog DaemonSet enabled!
## The exceptional case could be a situation when you need to run
## single Datadog pod per every namespace, but you do not need to
## re-create a DaemonSet for every non-default namespace install.
## Note: StatsD and DogStatsD work over UDP, so you may not
## get guaranteed delivery of the metrics in Datadog-per-namespace setup!
#
enabled: true
## @param image - object - required
## Define the Datadog image to work with.
#
image:
## @param repository - string - required
## Define the repository to use:
## use "datadog/agent" for Datadog Agent 7
## use "datadog/dogstatsd" for Standalone Datadog Agent DogStatsD 7
#
repository: datadog/agent
## @param tag - string - required
## Define the Agent version to use.
## Use 7-jmx to enable jmx fetch collection
#
tag: 7.21.1
## @param doNotCheckTag - boolean - optional
## By default, the version passed in agents.image.tag is checked
## for compatibility with the version of the chart.
## This boolean permits to completely skip this check.
## This is useful, for example, for custom tags that are not
## respecting semantic versioning
#
doNotCheckTag: # false
## @param pullPolicy - string - required
## The Kubernetes pull policy.
#
pullPolicy: IfNotPresent
## @param pullSecrets - list of key:value strings - optional
## It is possible to specify docker registry credentials
## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
#
pullSecrets: []
# - name: "<REG_SECRET>"
## @param rbac - object - required
## Provide Daemonset RBAC configuration
rbac:
## @param created - boolean - required
## If true, create & use RBAC resources
#
create: true
## @param serviceAccountName - string - required
## Ignored if daemonset.rbac.create is true
#
serviceAccountName: default
## @param podSecurity - object - optional
## Provide Daemonset PodSecurityPolicy configuration
podSecurity:
## @param podSecurityPolicy - object - required
## Provide Daemonset PodSecurityPolicy configuration
podSecurityPolicy:
## @param created - boolean - optional
## If true, create a PodSecurityPolicy resource for Agent pods
#
create: false
## @param securityContextConstraints - object - required
## Provide Daemonset securityContextConstraints configuration
securityContextConstraints:
## @param created - boolean - optional
## If true, create a SecurityContextConstraints resource for Agent pods
#
create: false
## @param securityContext - object - required
## Provide securityContext configuration
#
securityContext:
rule: MustRunAs
seLinuxOptions:
user: system_u
role: system_r
type: spc_t
level: s0
## @param privileged - boolean - optional
## If true, Allow to run privileged containers
#
privileged: false
## @param capabilites - list - optional
## Allowed capabilites
#
capabilites:
- SYS_ADMIN
- SYS_RESOURCE
- SYS_PTRACE
- NET_ADMIN
- NET_BROADCAST
- IPC_LOCK
## @param volumes - list - optional
## Allowed volumes types
#
volumes:
- configMap
- downwardAPI
- emptyDir
- hostPath
- secret
## @param seccompProfiles - list - optional
## Allowed seccomp profiles
#
seccompProfiles:
- "runtime/default"
- "localhost/system-probe"
## @param apparmorProfiles - list - optional
## Allowed apparmor profiles
#
apparmorProfiles:
- "runtime/default"
containers:
agent:
## @param env - list - required
## Additional environment variables for the agent container.
#
env: []
## @param logLevel - string - optional
## Set logging verbosity, valid log levels are:
## trace, debug, info, warn, error, critical, and off.
## If not set, fall back to the value of datadog.logLevel.
#
logLevel: # INFO
## @param resources - object - required
## Resource requests and limits for the agent container.
#
resources: {}
# requests:
# cpu: 200m
# memory: 256Mi
# limits:
# cpu: 200m
# memory: 256Mi
## @param livenessProbe - object - required
## Override the agent's liveness probe logic from the default:
## In case of issues with the probe, you can disable it with the
## following values, to allow easier investigating:
#
livenessProbe:
httpGet:
path: /live
port: 5555
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
## @param readinessProbe - object - required
## Override the agent's readiness probe logic from the default:
## In case of issues with the probe, you can disable it with the
## following values, to allow easier investigating:
#
readinessProbe:
httpGet:
path: /ready
port: 5555
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
processAgent:
## @param env - list - required
## Additional environment variables for the process-agent container.
#
env: []
## @param logLevel - string - optional
## Set logging verbosity, valid log levels are:
## trace, debug, info, warn, error, critical, and off.
## If not set, fall back to the value of datadog.logLevel.
#
logLevel: # INFO
## @param resources - object - required
## Resource requests and limits for the process-agent container.
#
resources: {}
# requests:
# cpu: 100m
# memory: 200Mi
# limits:
# cpu: 100m
# memory: 200Mi
traceAgent:
## @param env - list - required
## Additional environment variables for the trace-agent container.
#
env:
## @param logLevel - string - optional
## Set logging verbosity, valid log levels are:
## trace, debug, info, warn, error, critical, and off.
## If not set, fall back to the value of datadog.logLevel.
#
logLevel: # INFO
## @param resources - object - required
## Resource requests and limits for the trace-agent container.
#
resources: {}
# requests:
# cpu: 100m
# memory: 200Mi
# limits:
# cpu: 100m
# memory: 200Mi
## @param livenessProbe - object - optional
## Override the trace agent's liveness probe logic from the default:
## In case of issues with the probe, you can disable it with the
## following values, to allow easier investigating:
#
livenessProbe:
tcpSocket:
port: 8126
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
systemProbe:
## @param env - list - required
## Additional environment variables for the system-probe container.
#
env: []
## @param logLevel - string - optional
## Set logging verbosity, valid log levels are:
## trace, debug, info, warn, error, critical, and off.
## If not set, fall back to the value of datadog.logLevel.
#
logLevel: # INFO
## @param resources - object - required
## Resource requests and limits for the system-probe container.
#
resources: {}
# requests:
# cpu: 100m
# memory: 200Mi
# limits:
# cpu: 100m
# memory: 200Mi
initContainers:
## @param resources - object - required
## Resource requests and limits for the init containers.
#
resources: {}
# requests:
# cpu: 100m
# memory: 200Mi
# limits:
# cpu: 100m
# memory: 200Mi
## @param volumes - list of objects - optional
## Specify additional volumes to mount in the dd-agent container
#
volumes: []
# - hostPath:
# path: <HOST_PATH>
# name: <VOLUME_NAME>
## @param volumeMounts - list of objects - optional
## Specify additional volumes to mount in the dd-agent container
#
volumeMounts: []
# - name: <VOLUME_NAME>
# mountPath: <CONTAINER_PATH>
# readOnly: true
## @param useHostNetwork - boolean - optional
## Bind ports on the hostNetwork. Useful for CNI networking where hostPort might
## not be supported. The ports need to be available on all hosts. It Can be
## used for custom metrics instead of a service endpoint.
##
## WARNING: Make sure that hosts using this are properly firewalled otherwise
## metrics and traces are accepted from any host able to connect to this host.
#
useHostNetwork: false
## @param dnsConfig - list of objects - optional
## specify dns configuration options for datadog cluster agent containers e.g ndots
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config
dnsConfig: {}
# options:
# - name: ndots
# value: "1"
## @param podAnnotations - list of key:value strings - optional
## Annotations to add to the DaemonSet's Pods
#
podAnnotations: {}
# <POD_ANNOTATION>: '[{"key": "<KEY>", "value": "<VALUE>"}]'
## @param tolerations - array - optional
## Allow the DaemonSet to schedule on tainted nodes (requires Kubernetes >= 1.6)
#
tolerations: []
## @param nodeSelector - object - optional
## Allow the DaemonSet to schedule on selected nodes
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## @param affinity - object - optional
## Allow the DaemonSet to schedule using affinity rules
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
#
affinity: {}
## @param updateStrategy - string - optional
## Allow the DaemonSet to perform a rolling update on helm update
## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
#
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: "10%"
## @param priorityClassName - string - optional
## Sets PriorityClassName if defined.
#
priorityClassName:
## @param podLabels - object - optional
## Sets podLabels if defined.
#
podLabels: {}
## @param useConfigMap - boolean - optional
## Configures a configmap to provide the agent configuration
## Use this in combination with the `agent.customAgentConfig` parameter.
#
useConfigMap: # false
## @param customAgentConfig - object - optional
## Specify custom contents for the datadog agent config (datadog.yaml).
## ref: https://docs.datadoghq.com/agent/guide/agent-configuration-files/?tab=agentv6
## ref: https://github.com/DataDog/datadog-agent/blob/master/pkg/config/config_template.yaml
## Note the `agents.useConfigMap` needs to be set to `true` for this parameter to be taken into account.
#
customAgentConfig: {}
# # Autodiscovery for Kubernetes
# listeners:
# - name: kubelet
# config_providers:
# - name: kubelet
# polling: true
# # needed to support legacy docker label config templates
# - name: docker
# polling: true
#
# # Enable APM by setting the DD_APM_ENABLED envvar to true, or override this configuration
# apm_config:
# enabled: false
# apm_non_local_traffic: true
#
# # Enable java cgroup handling. Only one of those options should be enabled,
# # depending on the agent version you are using along that chart.
#
# # agent version < 6.15
# # jmx_use_cgroup_memory_limit: true
#
# # agent version >= 6.15
# # jmx_use_container_support: true
clusterChecksRunner:
## @param enabled - boolean - required
## If true, deploys agent dedicated for running the Cluster Checks instead of running in the Daemonset's agents.
## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/
#
enabled: false
## @param image - object - required
## Define the Datadog image to work with.
#
image:
## @param repository - string - required
## Define the repository to use:
## use "datadog/agent" for Datadog Agent 7
## use "datadog/dogstatsd" for Standalone Datadog Agent DogStatsD 7
#
repository: datadog/agent
## @param tag - string - required
## Define the Agent version to use.
## Use 7-jmx to enable jmx fetch collection
#
tag: 7.20.2
## @param pullPolicy - string - required
## The Kubernetes pull policy.
#
pullPolicy: IfNotPresent
## @param pullSecrets - list of key:value strings - optional
## It is possible to specify docker registry credentials
## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
#
pullSecrets: []
# - name: "<REG_SECRET>"
## @param createPodDisruptionBudget - boolean - optional
## Specify the pod disruption budget to apply to the cluster checks agents
#
createPodDisruptionBudget: false
## @param rbac - object - required
## Provide Cluster Checks Deployment pods RBAC configuration
rbac:
## @param created - boolean - required
## If true, create & use RBAC resources
#
create: true
## @param dedicated - boolean - required
## If true, use a dedicated RBAC resource for the cluster checks agent(s)
#
dedicated: false
## @param serviceAccountAnnotations - object - required
## Annotations to add to the ServiceAccount if clusterChecksRunner.rbac.dedicated is true
#
serviceAccountAnnotations: {}
## @param serviceAccountName - string - required
## Ignored if clusterChecksRunner.rbac.create is true
#
serviceAccountName: default
## @param replicas - integer - required
## If you want to deploy the clusterChecks agent in HA, keep at least clusterChecksRunner.replicas set to 2.
## And increase the clusterChecksRunner.replicas according to the number of Cluster Checks.
#
replicas: 2
## @param resources - object -required
## Datadog clusterchecks-agent resource requests and limits.
#
resources: {}
# requests:
# cpu: 200m
# memory: 500Mi
# limits:
# cpu: 200m
# memory: 500Mi
## @param affinity - object - optional
## Allow the ClusterChecks Deployment to schedule using affinity rules.
## By default, ClusterChecks Deployment Pods are forced to run on different Nodes.
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
#
affinity: {}
## @param strategy - string - optional
## Allow the ClusterChecks deployment to perform a rolling update on helm update
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
#
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
## @param dnsConfig - list of objects - optional
## specify dns configuration options for datadog cluster agent containers e.g ndots
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config
dnsConfig: {}
# options:
# - name: ndots
# value: "1"
## @param nodeSelector - object - optional
## Allow the ClusterChecks Deployment to schedule on selected nodes
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## @param tolerations - array - required
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
#
tolerations: []
## @param livenessProbe - object - required
## In case of issues with the probe, you can disable it with the
## following values, to allow easier investigating:
#
# livenessProbe:
# exec:
# command: ["/bin/true"]
#
livenessProbe:
httpGet:
path: /live
port: 5555
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
## @param readinessProbe - object - required
## In case of issues with the probe, you can disable it with the
## following values, to allow easier investigating:
#
# readinessProbe:
# exec:
# command: ["/bin/true"]
#
readinessProbe:
httpGet:
path: /ready
port: 5555
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
## @param env - list of object - optional
## The dd-agent supports many environment variables
## ref: https://github.com/DataDog/datadog-agent/tree/master/Dockerfiles/agent#environment-variables
#
env: []
# - name: <ENV_VAR_NAME>
# value: <ENV_VAR_VALUE>
## @param volumes - list of objects - optional
## Specify additional volumes to mount in the cluster checks container
#
volumes: []
# - hostPath:
# path: <HOST_PATH>
# name: <VOLUME_NAME>
## @param volumeMounts - list of objects - optional
## Specify additional volumes to mount in the cluster checks container
#
volumeMounts: []
# - name: <VOLUME_NAME>
# mountPath: <CONTAINER_PATH>
# readOnly: true
kube-state-metrics:
rbac:
## @param created - boolean - required
## If true, create & use RBAC resources
#
create: true
serviceAccount:
## @param created - boolean - required
## If true, create ServiceAccount, require rbac kube-state-metrics.rbac.create true
#
create: true
## @param name - string - required
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the fullname template
#
name:
## @param resources - object - optional
## Resource requests and limits for the kube-state-metrics container.
#
resources: {}
# requests:
# cpu: 200m
# memory: 256Mi
# limits:
# cpu: 200m
# memory: 256Mi