-
Notifications
You must be signed in to change notification settings - Fork 2.7k
/
agent.go
442 lines (378 loc) · 12.8 KB
/
agent.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package agent
import (
"bytes"
"context"
"encoding/gob"
"errors"
"fmt"
"os"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/perf"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
"github.com/cilium/cilium/api/v1/models"
oldBPF "github.com/cilium/cilium/pkg/bpf"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/maps/eventsmap"
"github.com/cilium/cilium/pkg/monitor/agent/consumer"
"github.com/cilium/cilium/pkg/monitor/agent/listener"
"github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/monitor/payload"
"github.com/cilium/cilium/pkg/time"
)
// isCtxDone is a utility function that returns true when the context's Done()
// channel is closed. It is intended to simplify goroutines that need to check
// this multiple times in their loop.
func isCtxDone(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}
type Agent interface {
AttachToEventsMap(nPages int) error
SendEvent(typ int, event interface{}) error
RegisterNewListener(newListener listener.MonitorListener)
RemoveListener(ml listener.MonitorListener)
RegisterNewConsumer(newConsumer consumer.MonitorConsumer)
RemoveConsumer(mc consumer.MonitorConsumer)
State() *models.MonitorStatus
}
// Agent structure for centralizing the responsibilities of the main events
// reader.
// There is some racey-ness around perfReaderCancel since it replaces on every
// perf reader start. In the event that a MonitorListener from a previous
// generation calls its cleanup after the start of the new perf reader, we
// might call the new, and incorrect, cancel function. We guard for this by
// checking the number of listeners during the cleanup call. The perf reader
// must have at least one MonitorListener (since it started) so no cancel is called.
// If it doesn't, the cancel is the correct behavior (the older generation
// cancel must have been called for us to get this far anyway).
type agent struct {
lock.Mutex
models.MonitorStatus
ctx context.Context
perfReaderCancel context.CancelFunc
// listeners are external cilium monitor clients which receive raw
// gob-encoded payloads
listeners map[listener.MonitorListener]struct{}
// consumers are internal clients which receive decoded messages
consumers map[consumer.MonitorConsumer]struct{}
events *ebpf.Map
monitorEvents *perf.Reader
}
// newAgent starts a new monitor agent instance which distributes monitor events
// to registered listeners. Once the datapath is set up, AttachToEventsMap needs
// to be called to receive events from the perf ring buffer. Otherwise, only
// user space events received via SendEvent are distributed registered listeners.
// Internally, the agent spawns a singleton goroutine reading events from
// the BPF perf ring buffer and provides an interface to pass in non-BPF events.
// The instance can be stopped by cancelling ctx, which will stop the perf reader
// goroutine and close all registered listeners.
// Note that the perf buffer reader is started only when listeners are
// connected.
func newAgent(ctx context.Context) *agent {
return &agent{
ctx: ctx,
listeners: make(map[listener.MonitorListener]struct{}),
consumers: make(map[consumer.MonitorConsumer]struct{}),
perfReaderCancel: func() {}, // no-op to avoid doing null checks everywhere
}
}
// AttachToEventsMap opens the events perf ring buffer and makes it ready for
// consumption, such that any subscribed consumers may receive events
// from it. This function is to be called once the events map has been set up.
func (a *agent) AttachToEventsMap(nPages int) error {
a.Lock()
defer a.Unlock()
if a.events != nil {
return errors.New("events map already attached")
}
// assert that we can actually connect the monitor
path := oldBPF.MapPath(eventsmap.MapName)
eventsMap, err := ebpf.LoadPinnedMap(path, nil)
if err != nil {
return err
}
a.events = eventsMap
a.MonitorStatus = models.MonitorStatus{
Cpus: int64(eventsMap.MaxEntries()),
Npages: int64(nPages),
Pagesize: int64(os.Getpagesize()),
}
// start the perf reader if we already have subscribers
if a.hasSubscribersLocked() {
a.startPerfReaderLocked()
}
return nil
}
// SendEvent distributes an event to all monitor listeners
func (a *agent) SendEvent(typ int, event interface{}) error {
if a == nil {
return fmt.Errorf("monitor agent is not set up")
}
// Two types of clients are currently supported: consumers and listeners.
// The former ones expect decoded messages, so the notification does not
// require any additional marshalling operation before sending an event.
// Instead, the latter expect gob-encoded payloads, and the whole marshalling
// process may be quite expensive.
// While we want to avoid marshalling events if there are no active
// listeners, there's no need to check for active consumers ahead of time.
a.notifyAgentEvent(typ, event)
// do not marshal notifications if there are no active listeners
if !a.hasListeners() {
return nil
}
// marshal notifications into JSON format for legacy listeners
if typ == api.MessageTypeAgent {
msg, ok := event.(api.AgentNotifyMessage)
if !ok {
return errors.New("unexpected event type for MessageTypeAgent")
}
var err error
event, err = msg.ToJSON()
if err != nil {
return fmt.Errorf("unable to JSON encode agent notification: %w", err)
}
}
var buf bytes.Buffer
if err := buf.WriteByte(byte(typ)); err != nil {
return fmt.Errorf("unable to initialize buffer: %w", err)
}
if err := gob.NewEncoder(&buf).Encode(event); err != nil {
return fmt.Errorf("unable to gob encode: %w", err)
}
p := payload.Payload{Data: buf.Bytes(), CPU: 0, Lost: 0, Type: payload.EventSample}
a.sendToListeners(&p)
return nil
}
// hasSubscribersLocked returns true if there are listeners or consumers
// subscribed to the agent right now.
// Note: it is critical to hold the lock for this operation.
func (a *agent) hasSubscribersLocked() bool {
return len(a.listeners)+len(a.consumers) != 0
}
// hasListeners returns true if there are listeners subscribed to the
// agent right now.
func (a *agent) hasListeners() bool {
a.Lock()
defer a.Unlock()
return len(a.listeners) != 0
}
// startPerfReaderLocked starts the perf reader. This should only be
// called if there are no other readers already running.
// The goroutine is spawned with a context derived from m.Context() and the
// cancelFunc is assigned to perfReaderCancel. Note that cancelling m.Context()
// (e.g. on program shutdown) will also cancel the derived context.
// Note: it is critical to hold the lock for this operation.
func (a *agent) startPerfReaderLocked() {
if a.events == nil {
return // not attached to events map yet
}
a.perfReaderCancel() // don't leak any old readers, just in case.
perfEventReaderCtx, cancel := context.WithCancel(a.ctx)
a.perfReaderCancel = cancel
go a.handleEvents(perfEventReaderCtx)
}
// RegisterNewListener adds the new MonitorListener to the global list.
// It also spawns a singleton goroutine to read and distribute the events.
func (a *agent) RegisterNewListener(newListener listener.MonitorListener) {
if a == nil {
return
}
a.Lock()
defer a.Unlock()
if isCtxDone(a.ctx) {
log.Debug("RegisterNewListener called on stopped monitor")
newListener.Close()
return
}
// If this is the first listener, start the perf reader
if !a.hasSubscribersLocked() {
a.startPerfReaderLocked()
}
version := newListener.Version()
switch newListener.Version() {
case listener.Version1_2:
a.listeners[newListener] = struct{}{}
default:
newListener.Close()
log.WithField("version", version).Error("Closing listener from unsupported monitor client version")
}
log.WithFields(logrus.Fields{
"count.listener": len(a.listeners),
"version": version,
}).Debug("New listener connected")
}
// RemoveListener deletes the MonitorListener from the list, closes its queue,
// and stops perfReader if this is the last subscriber
func (a *agent) RemoveListener(ml listener.MonitorListener) {
if a == nil {
return
}
a.Lock()
defer a.Unlock()
// Remove the listener and close it.
delete(a.listeners, ml)
log.WithFields(logrus.Fields{
"count.listener": len(a.listeners),
"version": ml.Version(),
}).Debug("Removed listener")
ml.Close()
// If this was the final listener, shutdown the perf reader and unmap our
// ring buffer readers. This tells the kernel to not emit this data.
// Note: it is critical to hold the lock and check the number of listeners.
// This guards against an older generation listener calling the
// current generation perfReaderCancel
if !a.hasSubscribersLocked() {
a.perfReaderCancel()
}
}
// RegisterNewConsumer adds the new MonitorConsumer to the global list.
// It also spawns a singleton goroutine to read and distribute the events.
func (a *agent) RegisterNewConsumer(newConsumer consumer.MonitorConsumer) {
if a == nil {
return
}
if isCtxDone(a.ctx) {
log.Debug("RegisterNewConsumer called on stopped monitor")
return
}
a.Lock()
defer a.Unlock()
if !a.hasSubscribersLocked() {
a.startPerfReaderLocked()
}
a.consumers[newConsumer] = struct{}{}
}
// RemoveConsumer deletes the MonitorConsumer from the list, closes its queue,
// and stops perfReader if this is the last subscriber
func (a *agent) RemoveConsumer(mc consumer.MonitorConsumer) {
if a == nil {
return
}
a.Lock()
defer a.Unlock()
delete(a.consumers, mc)
if !a.hasSubscribersLocked() {
a.perfReaderCancel()
}
}
// handleEvents reads events from the perf buffer and processes them. It
// will exit when stopCtx is done. Note, however, that it will block in the
// Poll call but assumes enough events are generated that these blocks are
// short.
func (a *agent) handleEvents(stopCtx context.Context) {
scopedLog := log.WithField(logfields.StartTime, time.Now())
scopedLog.Info("Beginning to read perf buffer")
defer scopedLog.Info("Stopped reading perf buffer")
bufferSize := int(a.Pagesize * a.Npages)
monitorEvents, err := perf.NewReader(a.events, bufferSize)
if err != nil {
scopedLog.WithError(err).Fatal("Cannot initialise BPF perf ring buffer sockets")
}
defer func() {
monitorEvents.Close()
a.Lock()
a.monitorEvents = nil
a.Unlock()
}()
a.Lock()
a.monitorEvents = monitorEvents
a.Unlock()
for !isCtxDone(stopCtx) {
record, err := monitorEvents.Read()
switch {
case isCtxDone(stopCtx):
return
case err != nil:
if perf.IsUnknownEvent(err) {
a.Lock()
a.MonitorStatus.Unknown++
a.Unlock()
} else {
scopedLog.WithError(err).Warn("Error received while reading from perf buffer")
if errors.Is(err, unix.EBADFD) {
return
}
}
continue
}
a.processPerfRecord(scopedLog, record)
}
}
// processPerfRecord processes a record from the datapath and sends it to any
// registered subscribers
func (a *agent) processPerfRecord(scopedLog *logrus.Entry, record perf.Record) {
a.Lock()
defer a.Unlock()
if record.LostSamples > 0 {
a.MonitorStatus.Lost += int64(record.LostSamples)
a.notifyPerfEventLostLocked(record.LostSamples, record.CPU)
a.sendToListenersLocked(&payload.Payload{
CPU: record.CPU,
Lost: record.LostSamples,
Type: payload.RecordLost,
})
} else {
a.notifyPerfEventLocked(record.RawSample, record.CPU)
a.sendToListenersLocked(&payload.Payload{
Data: record.RawSample,
CPU: record.CPU,
Type: payload.EventSample,
})
}
}
// State returns the current status of the monitor
func (a *agent) State() *models.MonitorStatus {
if a == nil {
return nil
}
a.Lock()
defer a.Unlock()
if a.monitorEvents == nil {
return nil
}
// Shallow-copy the structure, then return the newly allocated copy.
status := a.MonitorStatus
return &status
}
// notifyAgentEvent notifies all consumers about an agent event.
func (a *agent) notifyAgentEvent(typ int, message interface{}) {
a.Lock()
defer a.Unlock()
for mc := range a.consumers {
mc.NotifyAgentEvent(typ, message)
}
}
// notifyPerfEventLocked notifies all consumers about a perf event.
// The caller must hold the monitor lock.
func (a *agent) notifyPerfEventLocked(data []byte, cpu int) {
for mc := range a.consumers {
mc.NotifyPerfEvent(data, cpu)
}
}
// notifyEventToConsumersLocked notifies all consumers about lost events.
// The caller must hold the monitor lock.
func (a *agent) notifyPerfEventLostLocked(numLostEvents uint64, cpu int) {
for mc := range a.consumers {
mc.NotifyPerfEventLost(numLostEvents, cpu)
}
}
// sendToListeners enqueues the payload to all listeners.
func (a *agent) sendToListeners(pl *payload.Payload) {
a.Lock()
defer a.Unlock()
a.sendToListenersLocked(pl)
}
// sendToListenersLocked enqueues the payload to all listeners while holding the monitor lock.
func (a *agent) sendToListenersLocked(pl *payload.Payload) {
for ml := range a.listeners {
ml.Enqueue(pl)
}
}