-
Notifications
You must be signed in to change notification settings - Fork 487
/
scrape.go
297 lines (256 loc) · 9.1 KB
/
scrape.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
package scrape
import (
"context"
"fmt"
"net/url"
"sync"
"time"
"github.com/alecthomas/units"
"github.com/go-kit/log/level"
"github.com/grafana/agent/component"
fa "github.com/grafana/agent/component/common/appendable"
component_config "github.com/grafana/agent/component/common/config"
"github.com/grafana/agent/component/discovery"
"github.com/grafana/agent/component/prometheus"
"github.com/grafana/agent/pkg/build"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/scrape"
)
func init() {
scrape.UserAgent = fmt.Sprintf("GrafanaAgent/%s", build.Version)
component.Register(component.Registration{
Name: "prometheus.scrape",
Args: Arguments{},
Build: func(opts component.Options, args component.Arguments) (component.Component, error) {
return New(opts, args.(Arguments))
},
})
}
// Arguments holds values which are used to configure the prometheus.scrape
// component.
type Arguments struct {
Targets []discovery.Target `river:"targets,attr"`
ForwardTo []*prometheus.Receiver `river:"forward_to,attr"`
// The job name to override the job label with.
JobName string `river:"job_name,attr,optional"`
// Indicator whether the scraped metrics should remain unmodified.
HonorLabels bool `river:"honor_labels,attr,optional"`
// Indicator whether the scraped timestamps should be respected.
HonorTimestamps bool `river:"honor_timestamps,attr,optional"`
// A set of query parameters with which the target is scraped.
Params url.Values `river:"params,attr,optional"`
// How frequently to scrape the targets of this scrape config.
ScrapeInterval time.Duration `river:"scrape_interval,attr,optional"`
// The timeout for scraping targets of this config.
ScrapeTimeout time.Duration `river:"scrape_timeout,attr,optional"`
// The HTTP resource path on which to fetch metrics from targets.
MetricsPath string `river:"metrics_path,attr,optional"`
// The URL scheme with which to fetch metrics from targets.
Scheme string `river:"scheme,attr,optional"`
// An uncompressed response body larger than this many bytes will cause the
// scrape to fail. 0 means no limit.
BodySizeLimit units.Base2Bytes `river:"body_size_limit,attr,optional"`
// More than this many samples post metric-relabeling will cause the scrape
// to fail.
SampleLimit uint `river:"sample_limit,attr,optional"`
// More than this many targets after the target relabeling will cause the
// scrapes to fail.
TargetLimit uint `river:"target_limit,attr,optional"`
// More than this many labels post metric-relabeling will cause the scrape
// to fail.
LabelLimit uint `river:"label_limit,attr,optional"`
// More than this label name length post metric-relabeling will cause the
// scrape to fail.
LabelNameLengthLimit uint `river:"label_name_length_limit,attr,optional"`
// More than this label value length post metric-relabeling will cause the
// scrape to fail.
LabelValueLengthLimit uint `river:"label_value_length_limit,attr,optional"`
HTTPClientConfig component_config.HTTPClientConfig `river:"http_client_config,block,optional"`
// Scrape Options
ExtraMetrics bool `river:"extra_metrics,attr,optional"`
}
// DefaultArguments defines the default settings for a scrape job.
var DefaultArguments = Arguments{
MetricsPath: "/metrics",
Scheme: "http",
HonorLabels: false,
HonorTimestamps: true,
HTTPClientConfig: component_config.DefaultHTTPClientConfig,
ScrapeInterval: 1 * time.Minute, // From config.DefaultGlobalConfig
ScrapeTimeout: 10 * time.Second, // From config.DefaultGlobalConfig
}
// UnmarshalRiver implements river.Unmarshaler.
func (arg *Arguments) UnmarshalRiver(f func(interface{}) error) error {
*arg = DefaultArguments
type args Arguments
return f((*args)(arg))
}
// Component implements the prometheus.scrape component.
type Component struct {
opts component.Options
reloadTargets chan struct{}
mut sync.RWMutex
args Arguments
scraper *scrape.Manager
appendable *fa.FlowAppendable
}
var (
_ component.Component = (*Component)(nil)
)
// New creates a new prometheus.scrape component.
func New(o component.Options, args Arguments) (*Component, error) {
flowAppendable := fa.NewFlowAppendable(args.ForwardTo...)
scrapeOptions := &scrape.Options{ExtraMetrics: args.ExtraMetrics}
scraper := scrape.NewManager(scrapeOptions, o.Logger, flowAppendable)
c := &Component{
opts: o,
reloadTargets: make(chan struct{}, 1),
scraper: scraper,
appendable: flowAppendable,
}
// Call to Update() to set the receivers and targets once at the start.
if err := c.Update(args); err != nil {
return nil, err
}
return c, nil
}
// Run implements component.Component.
func (c *Component) Run(ctx context.Context) error {
defer c.scraper.Stop()
targetSetsChan := make(chan map[string][]*targetgroup.Group)
go func() {
err := c.scraper.Run(targetSetsChan)
level.Info(c.opts.Logger).Log("msg", "scrape manager stopped")
if err != nil {
level.Error(c.opts.Logger).Log("msg", "scrape manager failed", "err", err)
}
}()
for {
select {
case <-ctx.Done():
return nil
case <-c.reloadTargets:
c.mut.RLock()
var (
tgs = c.args.Targets
jobName = c.opts.ID
)
if c.args.JobName != "" {
jobName = c.args.JobName
}
c.mut.RUnlock()
promTargets := c.componentTargetsToProm(jobName, tgs)
select {
case targetSetsChan <- promTargets:
level.Debug(c.opts.Logger).Log("msg", "passed new targets to scrape manager")
case <-ctx.Done():
}
}
}
}
// Update implements component.Component.
func (c *Component) Update(args component.Arguments) error {
newArgs := args.(Arguments)
c.mut.Lock()
defer c.mut.Unlock()
c.args = newArgs
c.appendable.SetReceivers(newArgs.ForwardTo)
sc := getPromScrapeConfigs(c.opts.ID, newArgs)
err := c.scraper.ApplyConfig(&config.Config{
ScrapeConfigs: []*config.ScrapeConfig{sc},
})
if err != nil {
return fmt.Errorf("error applying scrape configs: %w", err)
}
level.Debug(c.opts.Logger).Log("msg", "scrape config was updated")
select {
case c.reloadTargets <- struct{}{}:
default:
}
return nil
}
// Helper function to bridge the in-house configuration with the Prometheus
// scrape_config.
// As explained in the Config struct, the following fields are purposefully
// missing out, as they're being implemented by another components.
// - RelabelConfigs
// - MetricsRelabelConfigs
// - ServiceDiscoveryConfigs
func getPromScrapeConfigs(jobName string, c Arguments) *config.ScrapeConfig {
dec := config.DefaultScrapeConfig
if c.JobName != "" {
dec.JobName = c.JobName
} else {
dec.JobName = jobName
}
dec.HonorLabels = c.HonorLabels
dec.HonorTimestamps = c.HonorTimestamps
dec.Params = c.Params
dec.ScrapeInterval = model.Duration(c.ScrapeInterval)
dec.ScrapeTimeout = model.Duration(c.ScrapeTimeout)
dec.MetricsPath = c.MetricsPath
dec.Scheme = c.Scheme
dec.BodySizeLimit = c.BodySizeLimit
dec.SampleLimit = c.SampleLimit
dec.TargetLimit = c.TargetLimit
dec.LabelLimit = c.LabelLimit
dec.LabelNameLengthLimit = c.LabelNameLengthLimit
dec.LabelValueLengthLimit = c.LabelValueLengthLimit
// HTTP scrape client settings
dec.HTTPClientConfig = *c.HTTPClientConfig.Convert()
return &dec
}
// ScraperStatus reports the status of the scraper's jobs.
type ScraperStatus struct {
TargetStatus []TargetStatus `river:"target,block,optional"`
}
// TargetStatus reports on the status of the latest scrape for a target.
type TargetStatus struct {
JobName string `river:"job,attr"`
URL string `river:"url,attr"`
Health string `river:"health,attr"`
Labels map[string]string `river:"labels,attr"`
LastError string `river:"last_error,attr,optional"`
LastScrape time.Time `river:"last_scrape,attr"`
LastScrapeDuration time.Duration `river:"last_scrape_duration,attr,optional"`
}
// DebugInfo implements component.DebugComponent
func (c *Component) DebugInfo() interface{} {
var res []TargetStatus
for job, stt := range c.scraper.TargetsActive() {
for _, st := range stt {
var lastError string
if st.LastError() != nil {
lastError = st.LastError().Error()
}
if st != nil {
res = append(res, TargetStatus{
JobName: job,
URL: st.URL().String(),
Health: string(st.Health()),
Labels: st.Labels().Map(),
LastError: lastError,
LastScrape: st.LastScrape(),
LastScrapeDuration: st.LastScrapeDuration(),
})
}
}
}
return ScraperStatus{TargetStatus: res}
}
func (c *Component) componentTargetsToProm(jobName string, tgs []discovery.Target) map[string][]*targetgroup.Group {
promGroup := &targetgroup.Group{Source: jobName}
for _, tg := range tgs {
promGroup.Targets = append(promGroup.Targets, convertLabelSet(tg))
}
return map[string][]*targetgroup.Group{jobName: {promGroup}}
}
func convertLabelSet(tg discovery.Target) model.LabelSet {
lset := make(model.LabelSet, len(tg))
for k, v := range tg {
lset[model.LabelName(k)] = model.LabelValue(v)
}
return lset
}