forked from DataDog/datadog-agent
-
Notifications
You must be signed in to change notification settings - Fork 2
/
autodiscovery.go
208 lines (177 loc) · 7.12 KB
/
autodiscovery.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package common
import (
"context"
"time"
"github.com/StackVista/stackstate-agent/pkg/autodiscovery"
"github.com/StackVista/stackstate-agent/pkg/autodiscovery/integration"
"github.com/StackVista/stackstate-agent/pkg/autodiscovery/providers"
"github.com/StackVista/stackstate-agent/pkg/autodiscovery/providers/names"
"github.com/StackVista/stackstate-agent/pkg/autodiscovery/scheduler"
"github.com/StackVista/stackstate-agent/pkg/config"
confad "github.com/StackVista/stackstate-agent/pkg/config/autodiscovery"
"github.com/StackVista/stackstate-agent/pkg/util/log"
)
// This is due to an AD limitation that does not allow several listeners to work in parallel
// if they can provide for the same objects.
// When this is solved, we can remove this check and simplify code below
var (
incompatibleListeners = map[string]map[string]struct{}{
"kubelet": {"container": struct{}{}},
"container": {"kubelet": struct{}{}},
}
)
func setupAutoDiscovery(confSearchPaths []string, metaScheduler *scheduler.MetaScheduler) *autodiscovery.AutoConfig {
ad := autodiscovery.NewAutoConfig(metaScheduler)
ad.AddConfigProvider(providers.NewFileConfigProvider(confSearchPaths), false, 0)
// Autodiscovery cannot easily use config.RegisterOverrideFunc() due to Unmarshalling
extraConfigProviders, extraConfigListeners := confad.DiscoverComponentsFromConfig()
var extraEnvProviders []config.ConfigurationProviders
var extraEnvListeners []config.Listeners
if config.IsAutoconfigEnabled() && !config.IsCLCRunner() {
extraEnvProviders, extraEnvListeners = confad.DiscoverComponentsFromEnv()
}
// Register additional configuration providers
var configProviders []config.ConfigurationProviders
var uniqueConfigProviders map[string]config.ConfigurationProviders
err := config.Datadog.UnmarshalKey("config_providers", &configProviders)
if err == nil {
uniqueConfigProviders = make(map[string]config.ConfigurationProviders, len(configProviders)+len(extraEnvProviders)+len(configProviders))
for _, provider := range configProviders {
uniqueConfigProviders[provider.Name] = provider
}
// Add extra config providers
for _, name := range config.Datadog.GetStringSlice("extra_config_providers") {
if _, found := uniqueConfigProviders[name]; !found {
uniqueConfigProviders[name] = config.ConfigurationProviders{Name: name, Polling: true}
} else {
log.Infof("Duplicate AD provider from extra_config_providers discarded as already present in config_providers: %s", name)
}
}
// The "docker" config provider was replaced with the "container" one
// that supports Docker, but also other runtimes. We need this
// conversion to avoid breaking configs that included "docker".
if options, found := uniqueConfigProviders["docker"]; found {
delete(uniqueConfigProviders, "docker")
options.Name = names.Container
uniqueConfigProviders["container"] = options
}
for _, provider := range extraConfigProviders {
if _, found := uniqueConfigProviders[provider.Name]; !found {
uniqueConfigProviders[provider.Name] = provider
}
}
for _, provider := range extraEnvProviders {
if _, found := uniqueConfigProviders[provider.Name]; !found {
uniqueConfigProviders[provider.Name] = provider
}
}
} else {
log.Errorf("Error while reading 'config_providers' settings: %v", err)
}
// Adding all found providers
for _, cp := range uniqueConfigProviders {
factory, found := providers.ProviderCatalog[cp.Name]
if found {
configProvider, err := factory(cp)
if err != nil {
log.Errorf("Error while adding config provider %v: %v", cp.Name, err)
continue
}
pollInterval := providers.GetPollInterval(cp)
if cp.Polling {
log.Infof("Registering %s config provider polled every %s", cp.Name, pollInterval.String())
} else {
log.Infof("Registering %s config provider", cp.Name)
}
ad.AddConfigProvider(configProvider, cp.Polling, pollInterval)
} else {
log.Errorf("Unable to find this provider in the catalog: %v", cp.Name)
}
}
var listeners []config.Listeners
err = config.Datadog.UnmarshalKey("listeners", &listeners)
if err == nil {
// Add extra listeners
for _, name := range config.Datadog.GetStringSlice("extra_listeners") {
listeners = append(listeners, config.Listeners{Name: name})
}
// The "docker" listener was replaced with the "container" one that
// supports Docker, but also other runtimes. We need this conversion to
// avoid breaking configs that included "docker".
for i := range listeners {
if listeners[i].Name == "docker" {
listeners[i].Name = "container"
}
}
for _, listener := range extraConfigListeners {
alreadyPresent := false
for _, existingListener := range listeners {
if listener.Name == existingListener.Name {
alreadyPresent = true
break
}
}
if !alreadyPresent {
listeners = append(listeners, listener)
}
}
// For extraEnvListeners, we need to check incompatibleListeners to avoid generation of duplicate checks
for _, listener := range extraEnvListeners {
skipListener := false
incomp := incompatibleListeners[listener.Name]
for _, existingListener := range listeners {
if listener.Name == existingListener.Name {
skipListener = true
break
}
if _, found := incomp[existingListener.Name]; found {
log.Debugf("Discarding discovered listener: %s as incompatible with listener from config: %s", listener.Name, existingListener.Name)
skipListener = true
break
}
}
if !skipListener {
listeners = append(listeners, listener)
}
}
ad.AddListeners(listeners)
} else {
log.Errorf("Error while reading 'listeners' settings: %v", err)
}
return ad
}
// StartAutoConfig starts auto discovery
func StartAutoConfig() {
AC.LoadAndRun()
}
// WaitForConfigs retries the collection of Autodiscovery configs until the checkMatcher function (which
// defines whether the list of integration configs collected is sufficient) returns true or the timeout is reached.
// Autodiscovery listeners run asynchronously, AC.GetAllConfigs() can fail at the beginning to resolve templated configs
// depending on non-deterministic factors (system load, network latency, active Autodiscovery listeners and their configurations).
// This function improves the resiliency of the check command.
// Note: If the check corresponds to a non-template configuration it should be found on the first try and fast-returned.
func WaitForConfigs(retryInterval, timeout time.Duration, checkMatcher func([]integration.Config) bool) []integration.Config {
allConfigs := AC.GetAllConfigs()
if checkMatcher(allConfigs) {
return allConfigs
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
retryTicker := time.NewTicker(retryInterval)
defer retryTicker.Stop()
for {
select {
case <-ctx.Done():
return allConfigs
case <-retryTicker.C:
allConfigs = AC.GetAllConfigs()
if checkMatcher(allConfigs) {
return allConfigs
}
}
}
}