-
Notifications
You must be signed in to change notification settings - Fork 51
/
monitor.go
151 lines (128 loc) · 4.81 KB
/
monitor.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
package k8smonitor
import (
"context"
"errors"
"fmt"
"sync"
criapi "k8s.io/cri-api/pkg/apis"
"go.aporeto.io/enforcerd/internal/extractors/containermetadata"
"go.aporeto.io/enforcerd/trireme-lib/monitor/config"
"go.aporeto.io/enforcerd/trireme-lib/monitor/constants"
"go.aporeto.io/enforcerd/trireme-lib/monitor/extractors"
"go.aporeto.io/enforcerd/trireme-lib/monitor/registerer"
"k8s.io/client-go/kubernetes"
listersv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// K8sMonitor is the monitor for Kubernetes.
type K8sMonitor struct {
nodename string
handlers *config.ProcessorConfig
metadataExtractor extractors.PodMetadataExtractor
kubeClient kubernetes.Interface
podLister listersv1.PodLister
criRuntimeService criapi.RuntimeService
podCache podCacheInterface
runtimeCache runtimeCacheInterface
startEventRetry startEventRetryFunc
cniInstalledOrRuncProxyStartedCh chan struct{}
cniInstalledOrRuncProxyStarted bool
extMonitorStartedLock sync.RWMutex
}
// New returns a new kubernetes monitor.
func New(ctx context.Context) *K8sMonitor {
m := &K8sMonitor{}
m.podCache = newPodCache(m.updateEvent)
m.runtimeCache = newRuntimeCache(ctx, m.stopEvent)
m.cniInstalledOrRuncProxyStartedCh = make(chan struct{})
return m
}
// SetupConfig provides a configuration to implmentations. Every implmentation
// can have its own config type.
func (m *K8sMonitor) SetupConfig(_ registerer.Registerer, cfg interface{}) error {
defaultConfig := DefaultConfig()
if cfg == nil {
cfg = defaultConfig
}
kubernetesconfig, ok := cfg.(*Config)
if !ok {
return fmt.Errorf("Invalid configuration specified (type '%T')", cfg)
}
kubernetesconfig = SetupDefaultConfig(kubernetesconfig)
// simple config checks
if kubernetesconfig.MetadataExtractor == nil {
return fmt.Errorf("missing metadata extractor")
}
if kubernetesconfig.CRIRuntimeService == nil {
return fmt.Errorf("missing CRIRuntimeService implementation")
}
// Initialize most of our monitor
m.nodename = kubernetesconfig.Nodename
m.metadataExtractor = kubernetesconfig.MetadataExtractor
m.criRuntimeService = kubernetesconfig.CRIRuntimeService
// build kubernetes client config
var kubeCfg *rest.Config
if len(kubernetesconfig.Kubeconfig) > 0 {
var err error
kubeCfg, err = clientcmd.BuildConfigFromFlags("", kubernetesconfig.Kubeconfig)
if err != nil {
return err
}
} else {
var err error
kubeCfg, err = rest.InClusterConfig()
if err != nil {
return err
}
}
// and initialize client from it
var err error
m.kubeClient, err = kubernetes.NewForConfig(kubeCfg)
return err
}
// SetupHandlers sets up handlers for monitors to invoke for various events such as
// processing unit events and synchronization events. This will be called before Start()
// by the consumer of the monitor
func (m *K8sMonitor) SetupHandlers(c *config.ProcessorConfig) {
m.handlers = c
}
// Run starts the monitor implementation.
func (m *K8sMonitor) Run(ctx context.Context) error {
m.startEventRetry = newStartEventRetryFunc(ctx, containermetadata.AutoDetect(), m.startEvent)
if m.kubeClient == nil {
return errors.New("K8sMonitor: missing Kubernetes client")
}
if err := m.handlers.IsComplete(); err != nil {
return fmt.Errorf("K8sMonitor: handlers are not complete: %s", err.Error())
}
if m.handlers.ExternalEventSender == nil {
return fmt.Errorf("K8sMonitor: external event sender option must be used together with this monitor")
}
// setup informer for update events (this starts the informer as well)
// this also returns a pod lister which uses the same underlying cache as the informer
m.podLister = m.podCache.SetupInformer(ctx, m.kubeClient, m.nodename, defaultNeedsUpdate)
// register ourselves with the gRPC server to receive events
var registered bool
for _, evs := range m.handlers.ExternalEventSender {
if evs.SenderName() == constants.MonitorExtSenderName {
if err := evs.Register(constants.K8sMonitorRegistrationName, m); err != nil {
return fmt.Errorf("K8sMonitor: failed to register with the grpcMonitorServer external events sender: %w", err)
}
registered = true
break
}
}
if !registered {
return fmt.Errorf("K8sMonitor: failed to register with the grpcMonitorServer external events sender: unavailable")
}
// get list of pods on node, and handle them
if err := m.onStartup(ctx, m.startEvent); err != nil {
return fmt.Errorf("K8sMonitor: failed to get list of pods running sandboxes from CRI and generating events for them: %s", err)
}
return nil
}
// Resync should resynchronize PUs. This should be done while starting up.
func (m *K8sMonitor) Resync(ctx context.Context) error {
return nil
}