forked from elastic/beats
-
Notifications
You must be signed in to change notification settings - Fork 0
/
monitoring.go
128 lines (99 loc) · 3.37 KB
/
monitoring.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
package pipeline
import "github.com/elastic/beats/libbeat/monitoring"
// observer is used by many component in the publisher pipeline, to report
// internal events. The oberserver can call registered global event handlers or
// updated shared counters/metrics for reporting.
// All events required for reporting events/metrics on the pipeline-global level
// are defined by observer. The components are only allowed to serve localized
// event-handlers only (e.g. the client centric events callbacks)
type observer struct {
metrics *monitoring.Registry
// clients metrics
clients *monitoring.Uint
// events publish/dropped stats
events, filtered, published, failed *monitoring.Uint
dropped, retry *monitoring.Uint // (retryer) drop/retry counters
activeEvents *monitoring.Uint
// queue metrics
ackedQueue *monitoring.Uint
}
func (o *observer) init(metrics *monitoring.Registry) {
o.metrics = metrics
reg := metrics.GetRegistry("pipeline")
if reg == nil {
reg = metrics.NewRegistry("pipeline")
}
*o = observer{
metrics: metrics,
clients: monitoring.NewUint(reg, "clients"),
events: monitoring.NewUint(reg, "events.total"),
filtered: monitoring.NewUint(reg, "events.filtered"),
published: monitoring.NewUint(reg, "events.published"),
failed: monitoring.NewUint(reg, "events.failed"),
dropped: monitoring.NewUint(reg, "events.dropped"),
retry: monitoring.NewUint(reg, "events.retry"),
ackedQueue: monitoring.NewUint(reg, "queue.acked"),
activeEvents: monitoring.NewUint(reg, "events.active"),
}
}
func (o *observer) cleanup() {
o.metrics.Remove("pipeline") // drop all metrics from registry
}
//
// client connects/disconnects
//
// (pipeline) pipeline did finish creating a new client instance
func (o *observer) clientConnected() { o.clients.Inc() }
// (client) close being called on client
func (o *observer) clientClosing() {}
// (client) client finished processing close
func (o *observer) clientClosed() { o.clients.Dec() }
//
// client publish events
//
// (client) client is trying to publish a new event
func (o *observer) newEvent() {
o.events.Inc()
o.activeEvents.Inc()
}
// (client) event is filtered out (on purpose or failed)
func (o *observer) filteredEvent() {
o.filtered.Inc()
o.activeEvents.Dec()
}
// (client) managed to push an event into the publisher pipeline
func (o *observer) publishedEvent() {
o.published.Inc()
}
// (client) client closing down or DropIfFull is set
func (o *observer) failedPublishEvent() {
o.failed.Inc()
o.activeEvents.Dec()
}
//
// queue events
//
// (queue) number of events ACKed by the queue/broker in use
func (o *observer) queueACKed(n int) {
o.ackedQueue.Add(uint64(n))
o.activeEvents.Sub(uint64(n))
}
//
// pipeline output events
//
// (controller) new output group is about to be loaded
func (o *observer) updateOutputGroup() {}
// (retryer) new failed batch has been received
func (o *observer) eventsFailed(int) {}
// (retryer) number of events dropped by retryer
func (o *observer) eventsDropped(n int) {
o.dropped.Add(uint64(n))
}
// (retryer) number of events pushed to the output worker queue
func (o *observer) eventsRetry(n int) {
o.retry.Add(uint64(n))
}
// (output) number of events to be forwarded to the output client
func (o *observer) outBatchSend(int) {}
// (output) number of events acked by the output batch
func (o *observer) outBatchACKed(int) {}