/
monitor.go
846 lines (682 loc) · 23.3 KB
/
monitor.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
package dockermonitor
import (
"context"
"errors"
"fmt"
"io"
"os"
"runtime"
"strconv"
"strings"
"time"
"github.com/aporeto-inc/trireme-lib/utils/contextstore"
"go.uber.org/zap"
"github.com/aporeto-inc/trireme-lib/collector"
"github.com/aporeto-inc/trireme-lib/constants"
"github.com/aporeto-inc/trireme-lib/policy"
"github.com/dchest/siphash"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/aporeto-inc/trireme-lib/internal/monitor/instance"
"github.com/aporeto-inc/trireme-lib/internal/monitor/rpc/registerer"
tevents "github.com/aporeto-inc/trireme-lib/rpc/events"
"github.com/aporeto-inc/trireme-lib/rpc/processor"
"github.com/aporeto-inc/trireme-lib/utils/cgnetcls"
"github.com/aporeto-inc/trireme-lib/utils/portspec"
dockerClient "github.com/docker/docker/client"
)
// Event is the type of various docker events.
type Event string
const (
// EventCreate represents the Docker "create" event.
EventCreate Event = "create"
// EventStart represents the Docker "start" event.
EventStart Event = "start"
// EventDie represents the Docker "die" event.
EventDie Event = "die"
// EventDestroy represents the Docker "destroy" event.
EventDestroy Event = "destroy"
// EventPause represents the Docker "pause" event.
EventPause Event = "pause"
// EventUnpause represents the Docker "unpause" event.
EventUnpause Event = "unpause"
// EventConnect represents the Docker "connect" event.
EventConnect Event = "connect"
// DockerClientVersion is the version sent out as the client
DockerClientVersion = "v1.23"
// dockerPingTimeout is the time to wait for a ping to succeed.
dockerPingTimeout = 2 * time.Second
// dockerRetryTimer is the time after which we will retry to bring docker up.
dockerRetryTimer = 10 * time.Second
// dockerInitializationWait is the time after which we will retry to bring docker up.
dockerInitializationWait = 2 * dockerRetryTimer
)
const (
cstorePath = "/var/run/trireme/docker"
)
//StoredContext is the format of the data stored in the contextstore
type StoredContext struct {
containerInfo *types.ContainerJSON
Tags *policy.TagStore
}
// A EventHandler is type of docker event handler functions.
type EventHandler func(event *events.Message) error
// A MetadataExtractor is a function used to extract a *policy.PURuntime from a given
// docker ContainerJSON.
type MetadataExtractor func(*types.ContainerJSON) (*policy.PURuntime, error)
func contextIDFromDockerID(dockerID string) (string, error) {
if dockerID == "" {
return "", errors.New("unable to generate context id: empty docker id")
}
if len(dockerID) < 12 {
return "", fmt.Errorf("unable to generate context id: dockerid smaller than 12 characters: %s", dockerID)
}
return dockerID[:12], nil
}
func initDockerClient(socketType string, socketAddress string) (*dockerClient.Client, error) {
var socket string
switch socketType {
case "tcp":
socket = "https://" + socketAddress
case "unix":
// Sanity check that this path exists
if _, oserr := os.Stat(socketAddress); os.IsNotExist(oserr) {
return nil, oserr
}
socket = "unix://" + socketAddress
default:
return nil, fmt.Errorf("bad socket type: %s", socketType)
}
defaultHeaders := map[string]string{"User-Agent": "engine-api-dockerClient-1.0"}
dockerClient, err := dockerClient.NewClient(socket, DockerClientVersion, nil, defaultHeaders)
if err != nil {
return nil, fmt.Errorf("unable to create docker client: %s", err)
}
return dockerClient, nil
}
// defaultMetadataExtractor is the default metadata extractor for Docker
func defaultMetadataExtractor(info *types.ContainerJSON) (*policy.PURuntime, error) {
tags := policy.NewTagStore()
tags.AppendKeyValue("@sys:image", info.Config.Image)
tags.AppendKeyValue("@sys:name", info.Name)
for k, v := range info.Config.Labels {
tags.AppendKeyValue("@usr:"+k, v)
}
ipa := policy.ExtendedMap{
"bridge": info.NetworkSettings.IPAddress,
}
if info.HostConfig.NetworkMode == constants.DockerHostMode {
return policy.NewPURuntime(info.Name, info.State.Pid, "", tags, ipa, constants.LinuxProcessPU, hostModeOptions(info)), nil
}
return policy.NewPURuntime(info.Name, info.State.Pid, "", tags, ipa, constants.ContainerPU, nil), nil
}
// hostModeOptions creates the default options for a host-mode container. This is done
// based on the policy and the metadata extractor logic and can very by implementation
func hostModeOptions(dockerInfo *types.ContainerJSON) *policy.OptionsType {
options := policy.OptionsType{
CgroupName: strconv.Itoa(dockerInfo.State.Pid),
CgroupMark: strconv.FormatUint(cgnetcls.MarkVal(), 10),
}
for p := range dockerInfo.Config.ExposedPorts {
if p.Proto() == "tcp" {
s, err := portspec.NewPortSpecFromString(p.Port(), nil)
if err != nil {
continue
}
options.Services = append(options.Services, policy.Service{
Protocol: uint8(6),
Ports: s,
})
}
}
return &options
}
// Config is the configuration options to start a CNI monitor
type Config struct {
EventMetadataExtractor MetadataExtractor
SocketType string
SocketAddress string
SyncAtStart bool
KillContainerOnPolicyError bool
NoProxyMode bool
}
// DefaultConfig provides a default configuration
func DefaultConfig() *Config {
return &Config{
EventMetadataExtractor: defaultMetadataExtractor,
SocketType: string(constants.DefaultDockerSocketType),
SocketAddress: constants.DefaultDockerSocket,
SyncAtStart: true,
KillContainerOnPolicyError: false,
NoProxyMode: false,
}
}
// SetupDefaultConfig adds defaults to a partial configuration
func SetupDefaultConfig(dockerConfig *Config) *Config {
defaultConfig := DefaultConfig()
if dockerConfig.EventMetadataExtractor == nil {
dockerConfig.EventMetadataExtractor = defaultConfig.EventMetadataExtractor
}
if dockerConfig.SocketType == "" {
dockerConfig.SocketType = defaultConfig.SocketType
}
if dockerConfig.SocketAddress == "" {
dockerConfig.SocketAddress = defaultConfig.SocketAddress
}
return dockerConfig
}
// dockerMonitor implements the connection to Docker and monitoring based on events
type dockerMonitor struct {
dockerClient *dockerClient.Client
socketType string
socketAddress string
metadataExtractor MetadataExtractor
handlers map[Event]func(event *events.Message) error
eventnotifications []chan *events.Message
stopprocessor []chan bool
numberOfQueues int
stoplistener chan bool
config *processor.Config
netcls cgnetcls.Cgroupnetcls
// killContainerError if enabled kills the container if a policy setting resulted in an error.
killContainerOnPolicyError bool
syncAtStart bool
NoProxyMode bool
cstore contextstore.ContextStore
}
// New returns a new docker monitor
func New() monitorinstance.Implementation {
return &dockerMonitor{}
}
// SetupConfig provides a configuration to implmentations. Every implmentation
// can have its own config type.
func (d *dockerMonitor) SetupConfig(registerer registerer.Registerer, cfg interface{}) (err error) {
defaultConfig := DefaultConfig()
if cfg == nil {
cfg = defaultConfig
}
dockerConfig, ok := cfg.(*Config)
if !ok {
return fmt.Errorf("Invalid configuration specified")
}
// Setup defaults
dockerConfig = SetupDefaultConfig(dockerConfig)
d.socketType = dockerConfig.SocketType
d.socketAddress = dockerConfig.SocketAddress
d.metadataExtractor = dockerConfig.EventMetadataExtractor
d.syncAtStart = dockerConfig.SyncAtStart
d.killContainerOnPolicyError = dockerConfig.KillContainerOnPolicyError
d.handlers = make(map[Event]func(event *events.Message) error)
d.stoplistener = make(chan bool)
d.netcls = cgnetcls.NewDockerCgroupNetController()
d.numberOfQueues = runtime.NumCPU() * 8
d.eventnotifications = make([]chan *events.Message, d.numberOfQueues)
d.stopprocessor = make([]chan bool, d.numberOfQueues)
d.NoProxyMode = dockerConfig.NoProxyMode
d.cstore = contextstore.NewFileContextStore(cstorePath, nil)
for i := 0; i < d.numberOfQueues; i++ {
d.eventnotifications[i] = make(chan *events.Message, 1000)
d.stopprocessor[i] = make(chan bool)
}
// Add handlers for the events that we know how to process
d.addHandler(EventCreate, d.handleCreateEvent)
d.addHandler(EventStart, d.handleStartEvent)
d.addHandler(EventDie, d.handleDieEvent)
d.addHandler(EventDestroy, d.handleDestroyEvent)
d.addHandler(EventPause, d.handlePauseEvent)
d.addHandler(EventUnpause, d.handleUnpauseEvent)
return nil
}
// SetupHandlers sets up handlers for monitors to invoke for various events such as
// processing unit events and synchronization events. This will be called before Start()
// by the consumer of the monitor
func (d *dockerMonitor) SetupHandlers(c *processor.Config) {
d.config = c
}
// addHandler adds a callback handler for the given docker event.
// Interesting event names include 'start' and 'die'. For more on events see
// https://docs.docker.com/engine/reference/api/docker_remote_api/
// under the section 'Docker Events'.
func (d *dockerMonitor) addHandler(event Event, handler EventHandler) {
d.handlers[event] = handler
}
// sendRequestToQueue sends a request to a channel based on a hash function
func (d *dockerMonitor) sendRequestToQueue(r *events.Message) {
key0 := uint64(256203161)
key1 := uint64(982451653)
h := siphash.Hash(key0, key1, []byte(r.ID))
d.eventnotifications[int(h%uint64(d.numberOfQueues))] <- r
}
func (d *dockerMonitor) setupDockerDaemon() (err error) {
if d.dockerClient == nil {
// Initialize client
if d.dockerClient, err = initDockerClient(d.socketType, d.socketAddress); err != nil {
return err
}
}
ctx, cancel := context.WithTimeout(context.Background(), dockerPingTimeout)
defer cancel()
_, err = d.dockerClient.Ping(ctx)
return err
}
// waitForDockerDaemon is a blocking call which will try to bring up docker, if not return err
// with timeout
func (d *dockerMonitor) waitForDockerDaemon(ctx context.Context) (err error) {
done := make(chan bool)
go func() {
for errg := d.setupDockerDaemon(); errg != nil; {
zap.L().Debug("Unable to init docker client. Retrying...", zap.Error(errg))
<-time.After(dockerRetryTimer)
continue
}
done <- true
}()
select {
case <-ctx.Done():
err = ctx.Err()
case <-done:
}
if err == nil {
// Starting the eventListener and wait to hear on channel for it to be ready.
listenerReady := make(chan struct{})
go d.eventListener(listenerReady)
<-listenerReady
}
return err
}
// Start will start the DockerPolicy Enforcement.
// It applies a policy to each Container already Up and Running.
// It listens to all ContainerEvents
func (d *dockerMonitor) Start() error {
if err := d.config.IsComplete(); err != nil {
return fmt.Errorf("docker: %s", err)
}
ctx, cancel := context.WithTimeout(context.Background(), dockerInitializationWait)
defer cancel()
// Processing the events received during the time of Sync.
go d.eventProcessors()
err := d.waitForDockerDaemon(ctx)
if err == nil {
zap.L().Debug("Docker daemon setup")
// Syncing all Existing containers depending on MonitorSetting
if err := d.ReSync(); err != nil {
zap.L().Error("Unable to sync existing containers", zap.Error(err))
}
} else {
zap.L().Info("Docker resync skipped")
}
return nil
}
// Stop monitoring docker events.
func (d *dockerMonitor) Stop() error {
zap.L().Debug("Stopping the docker monitor")
d.stoplistener <- true
for i := 0; i < d.numberOfQueues; i++ {
d.stopprocessor[i] <- true
}
return nil
}
// eventProcessor processes docker events
func (d *dockerMonitor) eventProcessors() {
for i := 0; i < d.numberOfQueues; i++ {
go func(i int) {
for {
select {
case event := <-d.eventnotifications[i]:
if event.Action != "" {
f, ok := d.handlers[Event(event.Action)]
if ok {
err := f(event)
if err != nil {
zap.L().Error("Unable to handle docker event",
zap.String("action", event.Action),
zap.Error(err),
)
}
} else {
zap.L().Debug("Docker event not handled",
zap.String("action", event.Action),
zap.String("ID", event.ID),
)
}
} else {
zap.L().Info("Empty event",
zap.String("ID", event.ID),
)
}
case <-d.stopprocessor[i]:
return
}
}
}(i)
}
}
// eventListener listens to Docker events from the daemon and passes to
// to the processor through a buffered channel. This minimizes the chances
// that we will miss events because the processor is delayed
func (d *dockerMonitor) eventListener(listenerReady chan struct{}) {
options := types.EventsOptions{}
options.Filters = filters.NewArgs()
options.Filters.Add("type", "container")
messages, errs := d.dockerClient.Events(context.Background(), options)
// Once the buffered event channel was returned by Docker we return the ready status.
listenerReady <- struct{}{}
for {
select {
case message := <-messages:
zap.L().Debug("Got message from docker client",
zap.String("action", message.Action),
zap.String("ID", message.ID),
)
d.sendRequestToQueue(&message)
case err := <-errs:
if err != nil && err != io.EOF {
zap.L().Warn("Received docker event error",
zap.Error(err),
)
}
case stop := <-d.stoplistener:
if stop {
return
}
}
}
}
// ReSync resyncs all the existing containers on the Host, using the
// same process as when a container is initially spawn up
func (d *dockerMonitor) ReSync() error {
if !d.syncAtStart {
zap.L().Debug("No synchronization of containers performed")
return nil
}
zap.L().Debug("Syncing all existing containers")
options := types.ContainerListOptions{All: true}
containers, err := d.dockerClient.ContainerList(context.Background(), options)
if err != nil {
return fmt.Errorf("unable to get container list: %s", err)
}
if d.config.SyncHandler != nil {
for _, c := range containers {
container, err := d.dockerClient.ContainerInspect(context.Background(), c.ID)
if err != nil {
zap.L().Error("unable to sync existing container",
zap.String("dockerID", c.ID),
zap.Error(err),
)
continue
}
contextID, _ := contextIDFromDockerID(container.ID)
if d.NoProxyMode {
storedContext := &StoredContext{}
if err = d.cstore.Retrieve(contextID, &storedContext); err == nil {
container.Config.Labels["storedTags"] = strings.Join(storedContext.Tags.GetSlice(), ",")
} else {
if err = d.startDockerContainer(&container); err != nil {
zap.L().Debug("Could Not restart docker container", zap.String("ID", container.ID), zap.Error(err))
}
continue
}
}
PURuntime, _ := d.extractMetadata(&container)
var state tevents.State
if container.State.Running {
if !container.State.Paused {
state = tevents.StateStarted
} else {
state = tevents.StatePaused
}
} else {
state = tevents.StateStopped
}
if d.config.SyncHandler != nil {
if d.NoProxyMode {
storedContext := &StoredContext{}
if err = d.cstore.Retrieve(contextID, &storedContext); err != nil {
//We don't know about this container lets not sync
continue
}
t := PURuntime.Tags()
if t != nil && storedContext.Tags != nil {
t.Merge(storedContext.Tags)
PURuntime.SetTags(t)
}
}
if err := d.config.SyncHandler.HandleSynchronization(
contextID,
state,
PURuntime,
processor.SynchronizationTypeInitial,
); err != nil {
zap.L().Error("Unable to sync existing Container",
zap.String("dockerID", c.ID),
zap.Error(err),
)
}
}
}
}
for _, c := range containers {
container, err := d.dockerClient.ContainerInspect(context.Background(), c.ID)
if err != nil {
zap.L().Error("Unable to sync existing container during inspect",
zap.String("dockerID", c.ID),
zap.Error(err),
)
continue
}
contextID, _ := contextIDFromDockerID(container.ID)
if d.NoProxyMode {
storedContext := &StoredContext{}
if err = d.cstore.Retrieve(contextID, &storedContext); err == nil {
container.Config.Labels["storedTags"] = strings.Join(storedContext.Tags.GetSlice(), ",")
}
}
if err := d.startDockerContainer(&container); err != nil {
zap.L().Error("Unable to sync existing container during start handling",
zap.String("dockerID", c.ID),
zap.Error(err),
)
continue
}
zap.L().Debug("Successfully synced container", zap.String("dockerID", container.ID))
}
return nil
}
// setupHostMode sets up the net_cls cgroup for the host mode
func (d *dockerMonitor) setupHostMode(contextID string, runtimeInfo *policy.PURuntime, dockerInfo *types.ContainerJSON) error {
if err := d.netcls.Creategroup(contextID); err != nil {
return err
}
markval := runtimeInfo.Options().CgroupMark
if markval == "" {
if derr := d.netcls.DeleteCgroup(contextID); derr != nil {
zap.L().Warn("Failed to clean cgroup",
zap.String("contextID", contextID),
zap.Error(derr),
)
}
return errors.New("mark value not found")
}
mark, _ := strconv.ParseUint(markval, 10, 32)
if err := d.netcls.AssignMark(contextID, mark); err != nil {
if derr := d.netcls.DeleteCgroup(contextID); derr != nil {
zap.L().Warn("Failed to clean cgroup",
zap.String("contextID", contextID),
zap.Error(derr),
)
}
return err
}
if err := d.netcls.AddProcess(contextID, dockerInfo.State.Pid); err != nil {
if derr := d.netcls.DeleteCgroup(contextID); derr != nil {
zap.L().Warn("Failed to clean cgroup",
zap.String("contextID", contextID),
zap.Error(derr),
)
}
return err
}
return nil
}
func (d *dockerMonitor) startDockerContainer(dockerInfo *types.ContainerJSON) error {
if !dockerInfo.State.Running {
return nil
}
contextID, err := contextIDFromDockerID(dockerInfo.ID)
if err != nil {
return err
}
storedContext := &StoredContext{}
if d.cstore != nil {
if err = d.cstore.Retrieve(contextID, &storedContext); err == nil {
if storedContext.Tags != nil {
dockerInfo.Config.Labels["storedTags"] = strings.Join(storedContext.Tags.GetSlice(), ",")
}
}
}
runtimeInfo, err := d.extractMetadata(dockerInfo)
if err != nil {
return err
}
t := runtimeInfo.Tags()
if t != nil && storedContext.Tags != nil {
t.Merge(storedContext.Tags)
runtimeInfo.SetTags(t)
}
if err = d.config.PUHandler.CreatePURuntime(contextID, runtimeInfo); err != nil {
return err
}
var event tevents.Event
switch dockerInfo.State.Status {
case "paused":
event = tevents.EventPause
case "running":
event = tevents.EventStart
case "dead":
event = tevents.EventStop
default:
//We are restarting.Feeding start here. might as well be stop since we will get start notification when the
//container finishes restarting
event = tevents.EventStart
}
if err = d.config.PUHandler.HandlePUEvent(contextID, event); err != nil {
if d.killContainerOnPolicyError {
if derr := d.dockerClient.ContainerRemove(context.Background(), dockerInfo.ID, types.ContainerRemoveOptions{Force: true}); derr != nil {
return fmt.Errorf("unable to set policy: unable to remove container %s: %s, %s", contextID, err, derr)
}
return fmt.Errorf("unable to set policy: removed container %s: %s", contextID, err)
}
return fmt.Errorf("unable to set policy: container %s kept alive per policy: %s", contextID, err)
}
if dockerInfo.HostConfig.NetworkMode == constants.DockerHostMode {
if err = d.setupHostMode(contextID, runtimeInfo, dockerInfo); err != nil {
return fmt.Errorf("unable to setup host mode for container %s: %s", contextID, err)
}
}
storedContext = &StoredContext{
containerInfo: dockerInfo,
Tags: runtimeInfo.Tags(),
}
return d.cstore.Store(contextID, storedContext)
}
func (d *dockerMonitor) stopDockerContainer(dockerID string) error {
contextID, err := contextIDFromDockerID(dockerID)
if err != nil {
return err
}
if err = d.cstore.Remove(contextID); err != nil {
return err
}
return d.config.PUHandler.HandlePUEvent(contextID, tevents.EventStop)
}
// ExtractMetadata generates the RuntimeInfo based on Docker primitive
func (d *dockerMonitor) extractMetadata(dockerInfo *types.ContainerJSON) (*policy.PURuntime, error) {
if dockerInfo == nil {
return nil, errors.New("docker info is empty")
}
if d.metadataExtractor != nil {
return d.metadataExtractor(dockerInfo)
}
return defaultMetadataExtractor(dockerInfo)
}
// handleCreateEvent generates a create event type.
func (d *dockerMonitor) handleCreateEvent(event *events.Message) error {
contextID, err := contextIDFromDockerID(event.ID)
if err != nil {
return err
}
return d.config.PUHandler.HandlePUEvent(contextID, tevents.EventCreate)
}
// handleStartEvent will notify the agent immediately about the event in order
//to start the implementation of the functions. The agent must query
//the policy engine for details on what to do with this container.
func (d *dockerMonitor) handleStartEvent(event *events.Message) error {
timeout := time.Second * 0
contextID, err := contextIDFromDockerID(event.ID)
if err != nil {
return err
}
info, err := d.dockerClient.ContainerInspect(context.Background(), event.ID)
if err != nil {
// If we see errors, we will kill the container for security reasons if DockerMonitor was configured to do so.
if d.killContainerOnPolicyError {
if err1 := d.dockerClient.ContainerStop(context.Background(), event.ID, &timeout); err1 != nil {
zap.L().Warn("Unable to stop illegal container",
zap.String("dockerID", contextID),
zap.Error(err1),
)
}
d.config.Collector.CollectContainerEvent(&collector.ContainerRecord{
ContextID: contextID,
IPAddress: nil,
Tags: nil,
Event: collector.ContainerFailed,
})
return fmt.Errorf("unable to read container information: container %s killed: %s", contextID, err)
}
return fmt.Errorf("unable to read container information: container %s kept alive per policy: %s", contextID, err)
}
return d.startDockerContainer(&info)
}
//handleDie event is called when a container dies. It generates a "Stop" event.
func (d *dockerMonitor) handleDieEvent(event *events.Message) error {
return d.stopDockerContainer(event.ID)
}
// handleDestroyEvent handles destroy events from Docker. It generated a "Destroy event"
func (d *dockerMonitor) handleDestroyEvent(event *events.Message) error {
contextID, err := contextIDFromDockerID(event.ID)
if err != nil {
return err
}
err = d.config.PUHandler.HandlePUEvent(contextID, tevents.EventDestroy)
if err != nil {
zap.L().Error("Failed to handle delete event",
zap.Error(err),
)
}
if err := d.netcls.DeleteCgroup(contextID); err != nil {
zap.L().Warn("Failed to clean netcls group",
zap.String("contextID", contextID),
zap.Error(err),
)
}
return nil
}
// handlePauseEvent generates a create event type.
func (d *dockerMonitor) handlePauseEvent(event *events.Message) error {
zap.L().Info("UnPause Event for nativeID", zap.String("ID", event.ID))
contextID, err := contextIDFromDockerID(event.ID)
if err != nil {
return err
}
return d.config.PUHandler.HandlePUEvent(contextID, tevents.EventPause)
}
// handleCreateEvent generates a create event type.
func (d *dockerMonitor) handleUnpauseEvent(event *events.Message) error {
contextID, err := contextIDFromDockerID(event.ID)
if err != nil {
return err
}
return d.config.PUHandler.HandlePUEvent(contextID, tevents.EventUnpause)
}