/
harvest.go
363 lines (320 loc) · 11 KB
/
harvest.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
// Copyright 2020 New Relic Corporation. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package newrelic
import (
"time"
"github.com/newrelic/go-agent/v3/internal"
)
// harvestable is something that can be merged into a harvest.
type harvestable interface {
MergeIntoHarvest(h *harvest)
}
// harvestTypes is a bit set used to indicate which data types are ready to be
// reported.
type harvestTypes uint
const (
harvestMetricsTraces harvestTypes = 1 << iota
harvestSpanEvents
harvestCustomEvents
harvestLogEvents
harvestTxnEvents
harvestErrorEvents
)
const (
// harvestTypesEvents includes all Event types
harvestTypesEvents = harvestSpanEvents | harvestCustomEvents | harvestTxnEvents | harvestErrorEvents | harvestLogEvents
// harvestTypesAll includes all harvest types
harvestTypesAll = harvestMetricsTraces | harvestTypesEvents
)
type harvestTimer struct {
periods map[harvestTypes]time.Duration
lastHarvest map[harvestTypes]time.Time
}
func newHarvestTimer(now time.Time, periods map[harvestTypes]time.Duration) *harvestTimer {
lastHarvest := make(map[harvestTypes]time.Time, len(periods))
for tp := range periods {
lastHarvest[tp] = now
}
return &harvestTimer{periods: periods, lastHarvest: lastHarvest}
}
func (timer *harvestTimer) ready(now time.Time) (ready harvestTypes) {
for tp, period := range timer.periods {
if deadline := timer.lastHarvest[tp].Add(period); now.After(deadline) {
timer.lastHarvest[tp] = deadline
ready |= tp
}
}
return
}
// harvest contains collected data.
type harvest struct {
timer *harvestTimer
Metrics *metricTable
ErrorTraces harvestErrors
TxnTraces *harvestTraces
SlowSQLs *slowQueries
SpanEvents *spanEvents
CustomEvents *customEvents
LogEvents *logEvents
TxnEvents *txnEvents
ErrorEvents *errorEvents
}
const (
// txnEventPayloadlimit is the maximum number of events that should be
// sent up in one post.
txnEventPayloadlimit = 5000
)
// Ready returns a new harvest which contains the data types ready for harvest,
// or nil if no data is ready for harvest.
func (h *harvest) Ready(now time.Time) *harvest {
ready := &harvest{}
types := h.timer.ready(now)
if 0 == types {
return nil
}
if 0 != types&harvestCustomEvents {
h.Metrics.addCount(customEventsSeen, h.CustomEvents.NumSeen(), forced)
h.Metrics.addCount(customEventsSent, h.CustomEvents.NumSaved(), forced)
ready.CustomEvents = h.CustomEvents
h.CustomEvents = newCustomEvents(h.CustomEvents.capacity())
}
if 0 != types&harvestLogEvents {
h.LogEvents.RecordLoggingMetrics(h.Metrics)
ready.LogEvents = h.LogEvents
h.LogEvents = newLogEvents(h.LogEvents.commonAttributes, h.LogEvents.config)
}
if 0 != types&harvestTxnEvents {
h.Metrics.addCount(txnEventsSeen, h.TxnEvents.NumSeen(), forced)
h.Metrics.addCount(txnEventsSent, h.TxnEvents.NumSaved(), forced)
ready.TxnEvents = h.TxnEvents
h.TxnEvents = newTxnEvents(h.TxnEvents.capacity())
}
if 0 != types&harvestErrorEvents {
h.Metrics.addCount(errorEventsSeen, h.ErrorEvents.NumSeen(), forced)
h.Metrics.addCount(errorEventsSent, h.ErrorEvents.NumSaved(), forced)
ready.ErrorEvents = h.ErrorEvents
h.ErrorEvents = newErrorEvents(h.ErrorEvents.capacity())
}
if 0 != types&harvestSpanEvents {
h.Metrics.addCount(spanEventsSeen, h.SpanEvents.NumSeen(), forced)
h.Metrics.addCount(spanEventsSent, h.SpanEvents.NumSaved(), forced)
ready.SpanEvents = h.SpanEvents
h.SpanEvents = newSpanEvents(h.SpanEvents.capacity())
}
// NOTE! Metrics must happen after the event harvest conditionals to
// ensure that the metrics contain the event supportability metrics.
if 0 != types&harvestMetricsTraces {
ready.Metrics = h.Metrics
ready.ErrorTraces = h.ErrorTraces
ready.SlowSQLs = h.SlowSQLs
ready.TxnTraces = h.TxnTraces
h.Metrics = newMetricTable(maxMetrics, now)
h.ErrorTraces = newHarvestErrors(maxHarvestErrors)
h.SlowSQLs = newSlowQueries(maxHarvestSlowSQLs)
h.TxnTraces = newHarvestTraces()
}
return ready
}
// Payloads returns a slice of payload creators.
func (h *harvest) Payloads(splitLargeTxnEvents bool) (ps []payloadCreator) {
if nil == h {
return
}
if nil != h.CustomEvents {
ps = append(ps, h.CustomEvents)
}
if nil != h.LogEvents {
ps = append(ps, h.LogEvents)
}
if nil != h.ErrorEvents {
ps = append(ps, h.ErrorEvents)
}
if nil != h.SpanEvents {
ps = append(ps, h.SpanEvents)
}
if nil != h.Metrics {
ps = append(ps, h.Metrics)
}
if nil != h.ErrorTraces {
ps = append(ps, h.ErrorTraces)
}
if nil != h.TxnTraces {
ps = append(ps, h.TxnTraces)
}
if nil != h.SlowSQLs {
ps = append(ps, h.SlowSQLs)
}
if nil != h.TxnEvents {
if splitLargeTxnEvents {
ps = append(ps, h.TxnEvents.payloads(txnEventPayloadlimit)...)
} else {
ps = append(ps, h.TxnEvents)
}
}
return
}
type harvestConfig struct {
ReportPeriods map[harvestTypes]time.Duration
CommonAttributes commonAttributes
LoggingConfig loggingConfig
MaxSpanEvents int
MaxCustomEvents int
MaxErrorEvents int
MaxTxnEvents int
}
// newHarvest returns a new Harvest.
func newHarvest(now time.Time, configurer harvestConfig) *harvest {
return &harvest{
timer: newHarvestTimer(now, configurer.ReportPeriods),
Metrics: newMetricTable(maxMetrics, now),
ErrorTraces: newHarvestErrors(maxHarvestErrors),
TxnTraces: newHarvestTraces(),
SlowSQLs: newSlowQueries(maxHarvestSlowSQLs),
SpanEvents: newSpanEvents(configurer.MaxSpanEvents),
CustomEvents: newCustomEvents(configurer.MaxCustomEvents),
LogEvents: newLogEvents(configurer.CommonAttributes, configurer.LoggingConfig),
TxnEvents: newTxnEvents(configurer.MaxTxnEvents),
ErrorEvents: newErrorEvents(configurer.MaxErrorEvents),
}
}
func createTrackUsageMetrics(metrics *metricTable) {
for _, m := range internal.GetUsageSupportabilityMetrics() {
metrics.addSingleCount(m, forced)
}
}
func createTraceObserverMetrics(to traceObserver, metrics *metricTable) {
if to == nil {
return
}
for name, val := range to.dumpSupportabilityMetrics() {
metrics.addCount(name, val, forced)
}
}
func createAppLoggingSupportabilityMetrics(lc *loggingConfig, metrics *metricTable) {
lc.connectMetrics(metrics)
}
// CreateFinalMetrics creates extra metrics at harvest time.
func (h *harvest) CreateFinalMetrics(run *appRun, to traceObserver) {
reply := run.Reply
hc := run.harvestConfig
if nil == h {
return
}
// Metrics will be non-nil when harvesting metrics (regardless of
// whether or not there are any metrics to send).
if nil == h.Metrics {
return
}
h.Metrics.addSingleCount(instanceReporting, forced)
// Configurable event harvest supportability metrics:
// https://source.datanerd.us/agents/agent-specs/blob/master/Connect-LEGACY.md#event-harvest-config
period := reply.ConfigurablePeriod()
h.Metrics.addDuration(supportReportPeriod, "", period, period, forced)
h.Metrics.addValue(supportTxnEventLimit, "", float64(hc.MaxTxnEvents), forced)
h.Metrics.addValue(supportCustomEventLimit, "", float64(hc.MaxCustomEvents), forced)
h.Metrics.addValue(supportErrorEventLimit, "", float64(hc.MaxErrorEvents), forced)
h.Metrics.addValue(supportSpanEventLimit, "", float64(hc.MaxSpanEvents), forced)
h.Metrics.addValue(supportLogEventLimit, "", float64(hc.LoggingConfig.maxLogEvents), forced)
createTraceObserverMetrics(to, h.Metrics)
createTrackUsageMetrics(h.Metrics)
createAppLoggingSupportabilityMetrics(&hc.LoggingConfig, h.Metrics)
h.Metrics = h.Metrics.ApplyRules(reply.MetricRules)
}
// payloadCreator is a data type in the harvest.
type payloadCreator interface {
// In the event of a rpm request failure (hopefully simply an
// intermittent collector issue) the payload may be merged into the next
// time period's harvest.
harvestable
// Data prepares JSON in the format expected by the collector endpoint.
// This method should return (nil, nil) if the payload is empty and no
// rpm request is necessary.
Data(agentRunID string, harvestStart time.Time) ([]byte, error)
// EndpointMethod is used for the "method" query parameter when posting
// the data.
EndpointMethod() string
}
// createTxnMetrics creates metrics for a transaction.
func createTxnMetrics(args *txnData, metrics *metricTable) {
withoutFirstSegment := removeFirstSegment(args.FinalName)
// Duration Metrics
var durationRollup string
var totalTimeRollup string
if args.IsWeb {
durationRollup = webRollup
totalTimeRollup = totalTimeWeb
metrics.addDuration(dispatcherMetric, "", args.Duration, 0, forced)
} else {
durationRollup = backgroundRollup
totalTimeRollup = totalTimeBackground
}
metrics.addDuration(args.FinalName, "", args.Duration, 0, forced)
metrics.addDuration(durationRollup, "", args.Duration, 0, forced)
metrics.addDuration(totalTimeRollup, "", args.TotalTime, args.TotalTime, forced)
metrics.addDuration(totalTimeRollup+"/"+withoutFirstSegment, "", args.TotalTime, args.TotalTime, unforced)
// Better CAT Metrics
if cat := args.BetterCAT; cat.Enabled {
caller := callerUnknown
if nil != cat.Inbound && cat.Inbound.HasNewRelicTraceInfo {
caller.Type = cat.Inbound.Type
caller.App = cat.Inbound.App
caller.Account = cat.Inbound.Account
}
if cat.TransportType != "" {
caller.TransportType = cat.TransportType
}
m := durationByCallerMetric(caller)
metrics.addDuration(m.all, "", args.Duration, args.Duration, unforced)
metrics.addDuration(m.webOrOther(args.IsWeb), "", args.Duration, args.Duration, unforced)
// Transport Duration Metric
if nil != cat.Inbound && cat.Inbound.HasNewRelicTraceInfo {
d := cat.Inbound.TransportDuration
m = transportDurationMetric(caller)
metrics.addDuration(m.all, "", d, d, unforced)
metrics.addDuration(m.webOrOther(args.IsWeb), "", d, d, unforced)
}
// CAT Error Metrics
if args.HasErrors() {
m = errorsByCallerMetric(caller)
metrics.addSingleCount(m.all, unforced)
metrics.addSingleCount(m.webOrOther(args.IsWeb), unforced)
}
args.DistributedTracingSupport.createMetrics(metrics)
}
// Apdex Metrics
if args.Zone != apdexNone {
metrics.addApdex(apdexRollup, "", args.ApdexThreshold, args.Zone, forced)
mname := apdexPrefix + withoutFirstSegment
metrics.addApdex(mname, "", args.ApdexThreshold, args.Zone, unforced)
}
// Error Metrics
if args.NoticeErrors() {
metrics.addSingleCount(errorsRollupMetric.all, forced)
metrics.addSingleCount(errorsRollupMetric.webOrOther(args.IsWeb), forced)
metrics.addSingleCount(errorsPrefix+args.FinalName, forced)
}
if args.HasExpectedErrors() {
metrics.addSingleCount(expectedErrorsRollupMetric.all, forced)
}
// Queueing Metrics
if args.Queuing > 0 {
metrics.addDuration(queueMetric, "", args.Queuing, args.Queuing, forced)
}
}
var (
// This should only be used by harvests in cases where a connect response is unavailable
dfltHarvestCfgr = harvestConfig{
ReportPeriods: map[harvestTypes]time.Duration{harvestTypesAll: fixedHarvestPeriod},
MaxTxnEvents: internal.MaxTxnEvents,
MaxSpanEvents: internal.MaxSpanEvents,
MaxCustomEvents: internal.MaxCustomEvents,
MaxErrorEvents: internal.MaxErrorEvents,
LoggingConfig: loggingConfig{
true,
false,
true,
false,
internal.MaxLogEvents,
},
}
)