-
Notifications
You must be signed in to change notification settings - Fork 419
/
stats.go
351 lines (317 loc) · 9.64 KB
/
stats.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016 Datadog, Inc.
//go:generate msgp -unexported -marshal=false -o=stats_msgp.go -tests=false
package tracer
import (
"sync"
"sync/atomic"
"time"
"gopkg.in/DataDog/dd-trace-go.v1/internal"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
"github.com/DataDog/datadog-go/v5/statsd"
"github.com/DataDog/sketches-go/ddsketch"
"google.golang.org/protobuf/proto"
)
// aggregableSpan holds necessary information about a span that can be used to
// aggregate statistics in a bucket.
type aggregableSpan struct {
// key specifies the aggregation key under which this span can be placed into
// grouped inside a bucket.
key aggregation
Start, Duration int64
Error int32
TopLevel bool
}
// defaultStatsBucketSize specifies the default span of time that will be
// covered in one stats bucket.
var defaultStatsBucketSize = (10 * time.Second).Nanoseconds()
// concentrator aggregates and stores statistics on incoming spans in time buckets,
// flushing them occasionally to the underlying transport located in the given
// tracer config.
type concentrator struct {
// In specifies the channel to be used for feeding data to the concentrator.
// In order for In to have a consumer, the concentrator must be started using
// a call to Start.
In chan *aggregableSpan
// mu guards below fields
mu sync.Mutex
// buckets maintains a set of buckets, where the map key represents
// the starting point in time of that bucket, in nanoseconds.
buckets map[int64]*rawBucket
// stopped reports whether the concentrator is stopped (when non-zero)
stopped uint32
wg sync.WaitGroup // waits for any active goroutines
bucketSize int64 // the size of a bucket in nanoseconds
stop chan struct{} // closing this channel triggers shutdown
cfg *config // tracer startup configuration
statsdClient internal.StatsdClient // statsd client for sending metrics.
}
// newConcentrator creates a new concentrator using the given tracer
// configuration c. It creates buckets of bucketSize nanoseconds duration.
func newConcentrator(c *config, bucketSize int64) *concentrator {
return &concentrator{
In: make(chan *aggregableSpan, 10000),
bucketSize: bucketSize,
stopped: 1,
buckets: make(map[int64]*rawBucket),
cfg: c,
}
}
// alignTs returns the provided timestamp truncated to the bucket size.
// It gives us the start time of the time bucket in which such timestamp falls.
func alignTs(ts, bucketSize int64) int64 { return ts - ts%bucketSize }
// Start starts the concentrator. A started concentrator needs to be stopped
// in order to gracefully shut down, using Stop.
func (c *concentrator) Start() {
if atomic.SwapUint32(&c.stopped, 0) == 0 {
// already running
log.Warn("(*concentrator).Start called more than once. This is likely a programming error.")
return
}
c.stop = make(chan struct{})
c.wg.Add(1)
go func() {
defer c.wg.Done()
tick := time.NewTicker(time.Duration(c.bucketSize) * time.Nanosecond)
defer tick.Stop()
c.runFlusher(tick.C)
}()
c.wg.Add(1)
go func() {
defer c.wg.Done()
c.runIngester()
}()
}
// runFlusher runs the flushing loop which sends stats to the underlying transport.
func (c *concentrator) runFlusher(tick <-chan time.Time) {
for {
select {
case now := <-tick:
c.flushAndSend(now, withoutCurrentBucket)
case <-c.stop:
return
}
}
}
// statsd returns any tracer configured statsd client, or a no-op.
func (c *concentrator) statsd() internal.StatsdClient {
if c.statsdClient == nil {
return &statsd.NoOpClient{}
}
return c.statsdClient
}
// runIngester runs the loop which accepts incoming data on the concentrator's In
// channel.
func (c *concentrator) runIngester() {
for {
select {
case s := <-c.In:
c.statsd().Incr("datadog.tracer.stats.spans_in", nil, 1)
c.add(s)
case <-c.stop:
return
}
}
}
// add adds s into the concentrator's internal stats buckets.
func (c *concentrator) add(s *aggregableSpan) {
c.mu.Lock()
defer c.mu.Unlock()
btime := alignTs(s.Start+s.Duration, c.bucketSize)
b, ok := c.buckets[btime]
if !ok {
b = newRawBucket(uint64(btime), c.bucketSize)
c.buckets[btime] = b
}
b.handleSpan(s)
}
// Stop stops the concentrator and blocks until the operation completes.
func (c *concentrator) Stop() {
if atomic.SwapUint32(&c.stopped, 1) > 0 {
return
}
close(c.stop)
c.wg.Wait()
drain:
for {
select {
case s := <-c.In:
c.statsd().Incr("datadog.tracer.stats.spans_in", nil, 1)
c.add(s)
default:
break drain
}
}
c.flushAndSend(time.Now(), withCurrentBucket)
}
const (
withCurrentBucket = true
withoutCurrentBucket = false
)
// flushAndSend flushes all the stats buckets with the given timestamp and sends them using the transport specified in
// the concentrator config. The current bucket is only included if includeCurrent is true, such as during shutdown.
func (c *concentrator) flushAndSend(timenow time.Time, includeCurrent bool) {
sp := func() statsPayload {
c.mu.Lock()
defer c.mu.Unlock()
now := timenow.UnixNano()
sp := statsPayload{
Hostname: c.cfg.hostname,
Env: c.cfg.env,
Version: c.cfg.version,
Stats: make([]statsBucket, 0, len(c.buckets)),
}
for ts, srb := range c.buckets {
if !includeCurrent && ts > now-c.bucketSize {
// do not flush the current bucket
continue
}
log.Debug("Flushing bucket %d", ts)
sp.Stats = append(sp.Stats, srb.Export())
delete(c.buckets, ts)
}
return sp
}()
if len(sp.Stats) == 0 {
// nothing to flush
return
}
c.statsd().Incr("datadog.tracer.stats.flush_payloads", nil, 1)
c.statsd().Incr("datadog.tracer.stats.flush_buckets", nil, float64(len(sp.Stats)))
if err := c.cfg.transport.sendStats(&sp); err != nil {
c.statsd().Incr("datadog.tracer.stats.flush_errors", nil, 1)
log.Error("Error sending stats payload: %v", err)
}
}
// aggregation specifies a uniquely identifiable key under which a certain set
// of stats are grouped inside a bucket.
type aggregation struct {
Name string
Type string
Resource string
Service string
StatusCode uint32
Synthetics bool
}
type rawBucket struct {
start, duration uint64
data map[aggregation]*rawGroupedStats
}
func newRawBucket(btime uint64, bsize int64) *rawBucket {
return &rawBucket{
start: btime,
duration: uint64(bsize),
data: make(map[aggregation]*rawGroupedStats),
}
}
func (sb *rawBucket) handleSpan(s *aggregableSpan) {
gs, ok := sb.data[s.key]
if !ok {
gs = newRawGroupedStats()
sb.data[s.key] = gs
}
if s.TopLevel {
gs.topLevelHits++
}
gs.hits++
if s.Error != 0 {
gs.errors++
}
gs.duration += uint64(s.Duration)
// alter resolution of duration distro
trundur := nsTimestampToFloat(s.Duration)
if s.Error != 0 {
gs.errDistribution.Add(trundur)
} else {
gs.okDistribution.Add(trundur)
}
}
// Export transforms a RawBucket into a statsBucket, typically used
// before communicating data to the API, as RawBucket is the internal
// type while statsBucket is the public, shared one.
func (sb *rawBucket) Export() statsBucket {
csb := statsBucket{
Start: sb.start,
Duration: sb.duration,
Stats: make([]groupedStats, len(sb.data)),
}
for k, v := range sb.data {
b, err := v.export(k)
if err != nil {
log.Error("Could not export stats bucket: %v.", err)
continue
}
csb.Stats = append(csb.Stats, b)
}
return csb
}
type rawGroupedStats struct {
hits uint64
topLevelHits uint64
errors uint64
duration uint64
okDistribution *ddsketch.DDSketch
errDistribution *ddsketch.DDSketch
}
func newRawGroupedStats() *rawGroupedStats {
const (
// relativeAccuracy is the value accuracy we have on the percentiles. For example, we can
// say that p99 is 100ms +- 1ms
relativeAccuracy = 0.01
// maxNumBins is the maximum number of bins of the ddSketch we use to store percentiles.
// It can affect relative accuracy, but in practice, 2048 bins is enough to have 1% relative accuracy from
// 80 micro second to 1 year: http://www.vldb.org/pvldb/vol12/p2195-masson.pdf
maxNumBins = 2048
)
okSketch, err := ddsketch.LogCollapsingLowestDenseDDSketch(relativeAccuracy, maxNumBins)
if err != nil {
log.Error("Error when creating ddsketch: %v", err)
}
errSketch, err := ddsketch.LogCollapsingLowestDenseDDSketch(relativeAccuracy, maxNumBins)
if err != nil {
log.Error("Error when creating ddsketch: %v", err)
}
return &rawGroupedStats{
okDistribution: okSketch,
errDistribution: errSketch,
}
}
func (s *rawGroupedStats) export(k aggregation) (groupedStats, error) {
msg := s.okDistribution.ToProto()
okSummary, err := proto.Marshal(msg)
if err != nil {
return groupedStats{}, err
}
msg = s.errDistribution.ToProto()
errSummary, err := proto.Marshal(msg)
if err != nil {
return groupedStats{}, err
}
return groupedStats{
Service: k.Service,
Name: k.Name,
Resource: k.Resource,
HTTPStatusCode: k.StatusCode,
Type: k.Type,
Hits: s.hits,
Errors: s.errors,
Duration: s.duration,
TopLevelHits: s.topLevelHits,
OkSummary: okSummary,
ErrorSummary: errSummary,
Synthetics: k.Synthetics,
}, nil
}
// nsTimestampToFloat converts a nanosec timestamp into a float nanosecond timestamp truncated to a fixed precision
func nsTimestampToFloat(ns int64) float64 {
// 10 bits precision (any value will be +/- 1/1024)
const roundMask int64 = 1 << 10
var shift uint
for ns > roundMask {
ns = ns >> 1
shift++
}
return float64(ns << shift)
}