forked from kyma-project/kyma
/
controller.go
302 lines (253 loc) · 8.95 KB
/
controller.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
package labeler
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/kyma-project/kyma/components/remote-environment-broker/internal"
"github.com/kyma-project/kyma/components/remote-environment-broker/internal/storage"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const (
// maxEnvironmentMappingProcessRetries is the number of times a environment mapping CR will be retried before it is dropped out of the queue.
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times
// a deployment is going to be requeued:
//
// 5ms, 10ms, 20ms, 40ms, 80ms
maxEnvironmentMappingProcessRetries = 15
)
//go:generate mockery -name=reGetter -output=automock -outpkg=automock -case=underscore
type reGetter interface {
Get(internal.RemoteEnvironmentName) (*internal.RemoteEnvironment, error)
}
//go:generate mockery -name=nsPatcher -output=automock -outpkg=automock -case=underscore
type nsPatcher interface {
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Namespace, err error)
}
// Controller populates local storage with all EnvironmentMapping custom resources created in k8s cluster.
type Controller struct {
log logrus.FieldLogger
queue workqueue.RateLimitingInterface
emInformer cache.SharedIndexInformer
nsInformer cache.SharedIndexInformer
nsPatcher nsPatcher
reGetter reGetter
}
// New creates new environment mapping controller
func New(emInformer cache.SharedIndexInformer, nsInformer cache.SharedIndexInformer, nsPatcher nsPatcher, reGetter reGetter, log logrus.FieldLogger) *Controller {
c := &Controller{
log: log.WithField("service", "labeler:controller"),
emInformer: emInformer,
nsInformer: nsInformer,
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
nsPatcher: nsPatcher,
reGetter: reGetter,
}
// EventHandler reacts every time when we add, update or delete EnvironmentMapping
emInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addEM,
UpdateFunc: c.updateEM,
DeleteFunc: c.deleteEM,
})
return c
}
func (c *Controller) addEM(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
c.log.Errorf("while handling adding event: while adding new environment mapping custom resource to queue: couldn't get key: %v", err)
return
}
c.queue.Add(key)
}
func (c *Controller) updateEM(old, cur interface{}) {
key, err := cache.MetaNamespaceKeyFunc(cur)
if err != nil {
c.log.Errorf("while handling update event: while adding new environment mapping custom resource to queue: couldn't get key: %v", err)
return
}
c.queue.Add(key)
}
func (c *Controller) deleteEM(obj interface{}) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
c.log.Errorf("while handling deletion event: while adding new environment mapping custom resource to queue: couldn't get key: %v", err)
return
}
c.queue.Add(key)
}
// Run starts the controller
func (c *Controller) Run(stopCh <-chan struct{}) {
go c.shutdownQueueOnStop(stopCh)
c.log.Info("Starting Environment Mappings controller")
defer c.log.Infof("Shutting down Environment Mappings controller")
if !cache.WaitForCacheSync(stopCh, c.emInformer.HasSynced) {
c.log.Error("Timeout occurred on waiting for EM informer caches to sync. Shutdown the controller.")
return
}
if !cache.WaitForCacheSync(stopCh, c.nsInformer.HasSynced) {
c.log.Error("Timeout occurred on waiting for NS informer caches to sync. Shutdown the controller.")
return
}
c.log.Info("EM controller synced and ready")
wait.Until(c.runWorker, time.Second, stopCh)
}
func (c *Controller) shutdownQueueOnStop(stopCh <-chan struct{}) {
<-stopCh
c.queue.ShutDown()
}
func (c *Controller) runWorker() {
for c.processNextItem() {
// continue looping
}
}
func (c *Controller) processNextItem() bool {
key, shutdown := c.queue.Get()
if shutdown {
return false
}
defer c.queue.Done(key)
err := c.processItem(key.(string))
switch {
case err == nil:
c.queue.Forget(key)
case isTemporaryError(err) && c.queue.NumRequeues(key) < maxEnvironmentMappingProcessRetries:
c.log.Errorf("Error processing %q (will retry): %v", key, err)
c.queue.AddRateLimited(key)
default: // err != nil and err != temporary and too many retries
c.log.Errorf("Error processing %q (giving up): %v", key, err)
c.queue.Forget(key)
}
return true
}
func (c *Controller) processItem(key string) error {
// TODO: In prometheus-operator they use exists to check if we should delete resources, see:
// https://github.com/coreos/prometheus-operator/blob/master/pkg/alertmanager/operator.go#L364
// but in k8s they use Lister to check if event should be delete, see:
// https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/service/service_controller.go#L725
// We need to check the guarantees of such solutions and choose the best one.
_, exists, err := c.emInformer.GetIndexer().GetByKey(key)
if err != nil {
return errors.Wrapf(err, "while getting object with key %q from the store", key)
}
var name, namespace string
namespace, name, err = cache.SplitMetaNamespaceKey(key)
if err != nil {
return errors.Wrapf(err, "while getting name and namespace from key %q", key)
}
nsObj, nsExist, nsErr := c.nsInformer.GetIndexer().GetByKey(namespace)
if nsErr != nil || !nsExist {
return errors.Wrapf(err, "cannot get the namespace: %q", namespace)
}
reNs, ok := nsObj.(*corev1.Namespace)
if !ok {
return errors.New("cannot cast received object to corev1.Namespace type")
}
if !exists {
if err = c.deleteNsAccLabel(reNs); err != nil {
return errors.Wrapf(err, "cannot delete AccessLabel from the namespace: %q", namespace)
}
return nil
}
var label string
label, err = c.getReAccLabel(name)
if err != nil {
return errors.Wrapf(err, "cannot get AccessLabel from RE: %q", name)
}
err = c.applyNsAccLabel(reNs, label)
if err != nil {
return errors.Wrapf(err, "cannot apply AccessLabel to the namespace: %q", namespace)
}
return nil
}
func (c *Controller) deleteNsAccLabel(ns *corev1.Namespace) error {
nsCopy := ns.DeepCopy()
c.log.Infof("Deleting AccessLabel: %q, from the namespace - %q", nsCopy.Labels["accessLabel"], nsCopy.Name)
delete(nsCopy.Labels, "accessLabel")
err := c.patchNs(ns, nsCopy)
if err != nil {
return fmt.Errorf("failed to delete AccessLabel from the namespace: %q, %v", nsCopy.Name, err)
}
return nil
}
func (c *Controller) applyNsAccLabel(ns *corev1.Namespace, label string) error {
nsCopy := ns.DeepCopy()
if nsCopy.Labels == nil {
nsCopy.Labels = make(map[string]string)
}
nsCopy.Labels["accessLabel"] = label
c.log.Infof("Applying AccessLabel: %q to namespace - %q", label, nsCopy.Name)
err := c.patchNs(ns, nsCopy)
if err != nil {
return fmt.Errorf("failed to apply AccessLabel: %q to the namespace: %q, %v", label, nsCopy.Name, err)
}
return nil
}
func (c *Controller) patchNs(nsOrig, nsMod *corev1.Namespace) error {
oldData, err := json.Marshal(nsOrig)
if err != nil {
return errors.Wrapf(err, "while marshalling original namespace")
}
newData, err2 := json.Marshal(nsMod)
if err2 != nil {
return errors.Wrapf(err, "while marshalling modified namespace")
}
patch, err3 := strategicpatch.CreateTwoWayMergePatch(oldData, newData, corev1.Namespace{})
if err3 != nil {
return errors.Wrapf(err, "while creating patch")
}
if _, err := c.nsPatcher.Patch(nsMod.Name, types.StrategicMergePatchType, patch); err != nil {
return fmt.Errorf("failed to patch namespace: %q: %v", nsMod.Name, err)
}
return nil
}
func (c *Controller) getReAccLabel(name string) (string, error) {
// get RE from storage
re, err := c.reGetter.Get(internal.RemoteEnvironmentName(name))
if err != nil {
switch {
// We consider IsNotFoundError as Temporary error because EM can reference to existing but not already stored RE.
// In this case we want from Controller to retry processing this EM.
case storage.IsNotFoundError(err):
return "", errors.Wrapf(&tmpError{err}, "while getting remote environment with name: %q", name)
default:
return "", errors.Wrapf(err, "while getting remote environment with name: %q", name)
}
}
if re.AccessLabel == "" {
return "", fmt.Errorf("RE %q access label is empty", name)
}
return re.AccessLabel, nil
}
func (c *Controller) closeChanOnCtxCancellation(ctx context.Context, ch chan<- struct{}) {
for {
select {
case <-ctx.Done():
close(ch)
return
}
}
}
// and Temporary() method return true. Otherwise false will be returned.
func isTemporaryError(err error) bool {
type temporary interface {
Temporary() bool
}
te, ok := errors.Cause(err).(temporary)
return ok && te.Temporary()
}
type tmpError struct {
err error
}
func (t *tmpError) Error() string {
return t.err.Error()
}
func (t *tmpError) Temporary() bool {
return true
}