forked from keel-hq/keel
/
watcher.go
125 lines (105 loc) · 3.5 KB
/
watcher.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
package k8s
import (
"time"
"github.com/alwinius/bow/internal/workgroup"
"github.com/sirupsen/logrus"
apps_v1 "k8s.io/api/apps/v1"
v1beta1 "k8s.io/api/batch/v1beta1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
)
// WatchDeployments creates a SharedInformer for apps/v1.Deployments and registers it with g.
func WatchDeployments(g *workgroup.Group, client *kubernetes.Clientset, log logrus.FieldLogger, rs ...cache.ResourceEventHandler) {
watch(g, client.AppsV1().RESTClient(), log, "deployments", new(apps_v1.Deployment), rs...)
}
// WatchStatefulSets creates a SharedInformer for apps/v1.StatefulSet and registers it with g.
func WatchStatefulSets(g *workgroup.Group, client *kubernetes.Clientset, log logrus.FieldLogger, rs ...cache.ResourceEventHandler) {
watch(g, client.AppsV1().RESTClient(), log, "statefulsets", new(apps_v1.StatefulSet), rs...)
}
// WatchDaemonSets creates a SharedInformer for apps/v1.DaemonSet and registers it with g.
func WatchDaemonSets(g *workgroup.Group, client *kubernetes.Clientset, log logrus.FieldLogger, rs ...cache.ResourceEventHandler) {
watch(g, client.AppsV1().RESTClient(), log, "daemonsets", new(apps_v1.DaemonSet), rs...)
}
// WatchCronJobs creates a SharedInformer for v1beta1.CronJob and registers it with g.
func WatchCronJobs(g *workgroup.Group, client *kubernetes.Clientset, log logrus.FieldLogger, rs ...cache.ResourceEventHandler) {
watch(g, client.BatchV1beta1().RESTClient(), log, "cronjobs", new(v1beta1.CronJob), rs...)
}
func watch(g *workgroup.Group, c cache.Getter, log logrus.FieldLogger, resource string, objType runtime.Object, rs ...cache.ResourceEventHandler) {
lw := cache.NewListWatchFromClient(c, resource, v1.NamespaceAll, fields.Everything())
sw := cache.NewSharedInformer(lw, objType, 30*time.Minute)
for _, r := range rs {
sw.AddEventHandler(r)
}
g.Add(func(stop <-chan struct{}) {
log := log.WithField("resource", resource)
log.Println("started")
defer log.Println("stopped")
sw.Run(stop)
})
}
type buffer struct {
ev chan interface{}
logrus.StdLogger
rh cache.ResourceEventHandler
}
type addEvent struct {
obj interface{}
}
type updateEvent struct {
oldObj, newObj interface{}
}
type deleteEvent struct {
obj interface{}
}
// NewBuffer returns a ResourceEventHandler which buffers and serialises ResourceEventHandler events.
func NewBuffer(g *workgroup.Group, rh cache.ResourceEventHandler, log logrus.FieldLogger, size int) cache.ResourceEventHandler {
buf := &buffer{
ev: make(chan interface{}, size),
StdLogger: log.WithField("context", "buffer"),
rh: rh,
}
g.Add(buf.loop)
return buf
}
func (b *buffer) loop(stop <-chan struct{}) {
b.Println("started")
defer b.Println("stopped")
for {
select {
case ev := <-b.ev:
switch ev := ev.(type) {
case *addEvent:
b.rh.OnAdd(ev.obj)
case *updateEvent:
b.rh.OnUpdate(ev.oldObj, ev.newObj)
case *deleteEvent:
b.rh.OnDelete(ev.obj)
default:
b.Printf("unhandled event type: %T: %v", ev, ev)
}
case <-stop:
return
}
}
}
func (b *buffer) OnAdd(obj interface{}) {
b.send(&addEvent{obj})
}
func (b *buffer) OnUpdate(oldObj, newObj interface{}) {
b.send(&updateEvent{oldObj, newObj})
}
func (b *buffer) OnDelete(obj interface{}) {
b.send(&deleteEvent{obj})
}
func (b *buffer) send(ev interface{}) {
select {
case b.ev <- ev:
// all good
default:
b.Printf("event channel is full, len: %v, cap: %v", len(b.ev), cap(b.ev))
b.ev <- ev
}
}