/
controller.go
247 lines (203 loc) · 7.4 KB
/
controller.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
package syncer
import (
"context"
"time"
appTypes "github.com/kyma-project/kyma/components/application-operator/pkg/apis/applicationconnector/v1alpha1"
informers "github.com/kyma-project/kyma/components/application-operator/pkg/client/informers/externalversions/applicationconnector/v1alpha1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"github.com/kyma-project/kyma/components/application-broker/internal"
)
const (
// maxApplicationProcessRetries is the number of times a application CR will be retried before it is dropped out of the queue.
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times
// a deployment is going to be requeued:
//
// 5ms, 10ms, 20ms, 40ms, 80ms
maxApplicationProcessRetries = 5
)
//go:generate mockery -name=applicationUpserter -output=automock -outpkg=automock -case=underscore
//go:generate mockery -name=applicationRemover -output=automock -outpkg=automock -case=underscore
//go:generate mockery -name=applicationCRValidator -output=automock -outpkg=automock -case=underscore
//go:generate mockery -name=applicationCRMapper -output=automock -outpkg=automock -case=underscore
//go:generate mockery -name=scRelistRequester -output=automock -outpkg=automock -case=underscore
type (
applicationUpserter interface {
Upsert(app *internal.Application) (bool, error)
}
applicationRemover interface {
Remove(name internal.ApplicationName) error
}
applicationCRValidator interface {
Validate(dto *appTypes.Application) error
}
applicationCRMapper interface {
ToModel(dto *appTypes.Application) (*internal.Application, error)
}
scRelistRequester interface {
RequestRelist()
}
)
// Controller populates local storage with all Application custom resources created in k8s cluster.
type Controller struct {
log logrus.FieldLogger
queue workqueue.RateLimitingInterface
informer informers.ApplicationInformer
appUpserter applicationUpserter
appRemover applicationRemover
appCRValidator applicationCRValidator
appCRMapper applicationCRMapper
scRelistRequester scRelistRequester
}
// New creates new application controller
func New(applicationInformer informers.ApplicationInformer, appUpserter applicationUpserter, appRemover applicationRemover, scRelistRequester scRelistRequester, log logrus.FieldLogger, apiPackagesSupport bool) *Controller {
c := &Controller{
informer: applicationInformer,
appUpserter: appUpserter,
appRemover: appRemover,
scRelistRequester: scRelistRequester,
log: log.WithField("service", "syncer:controller"),
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
}
if apiPackagesSupport {
c.appCRValidator = &appCRValidatorV2{}
c.appCRMapper = &appCRMapperV2{}
} else {
c.appCRValidator = &appCRValidator{}
c.appCRMapper = &appCRMapper{}
}
applicationInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addApp,
DeleteFunc: c.deleteApp,
UpdateFunc: c.updateApp,
})
return c
}
func (c *Controller) addApp(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
c.log.Errorf("while handling adding event: while adding new application custom resource to queue: couldn't get key: %v", err)
return
}
c.queue.Add(key)
}
func (c *Controller) deleteApp(obj interface{}) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
c.log.Errorf("while handling deletion event: while adding new application custom resource to queue: couldn't get key: %v", err)
return
}
c.queue.Add(key)
}
func (c *Controller) updateApp(old, cur interface{}) {
key, err := cache.MetaNamespaceKeyFunc(cur)
if err != nil {
c.log.Errorf("while handling update event: while adding new application custom resource to queue: couldn't get key: %v", err)
return
}
c.queue.Add(key)
}
// Run starts the controller
func (c *Controller) Run(stopCh <-chan struct{}) {
go c.shutdownQueueOnStop(stopCh)
c.log.Info("Starting application CR sync-controller")
defer c.log.Infof("Shutting down application CR sync-controller")
if !cache.WaitForCacheSync(stopCh, c.informer.Informer().HasSynced) {
c.log.Error("Timeout occurred on waiting for caches to sync. Shutdown the controller.")
return
}
c.log.Info("Application controller synced and ready")
wait.Until(c.runWorker, time.Second, stopCh)
}
func (c *Controller) shutdownQueueOnStop(stopCh <-chan struct{}) {
<-stopCh
c.queue.ShutDown()
}
func (c *Controller) runWorker() {
for c.processNextItem() {
// continue looping
}
}
func (c *Controller) processNextItem() bool {
key, shutdown := c.queue.Get()
if shutdown {
return false
}
defer c.queue.Done(key)
strKey := key.(string)
err := c.processItem(strKey)
switch {
case err == nil:
c.queue.Forget(key)
c.scRelistRequester.RequestRelist()
c.log.Infof("Relist requested after successful processing of the %q", strKey)
case isTemporaryError(err) && c.queue.NumRequeues(key) < maxApplicationProcessRetries:
c.log.Errorf("Error processing %q (will retry): %v", key, err)
c.queue.AddRateLimited(key)
default: // err != nil and err != temporary and too many retries
c.log.Errorf("Error processing %q (giving up): %v", key, err)
c.queue.Forget(key)
}
return true
}
func (c *Controller) processItem(key string) error {
// TODO: In prometheus-operator they use exists to check if we should delete resources, see:
// https://github.com/coreos/prometheus-operator/blob/master/pkg/alertmanager/operator.go#L364
// but in k8s they use Lister to check if event should be delete, see:
// https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/service/service_controller.go#L725
// We need to check the guarantees of such solutions and choose the best one.
obj, exists, err := c.informer.Informer().GetIndexer().GetByKey(key)
if err != nil {
return errors.Wrapf(err, "while getting object with key %q from store", key)
}
if !exists {
err := c.appRemover.Remove(internal.ApplicationName(key))
if err != nil {
return errors.Wrapf(err, "while removing application with name %q from storage", key)
}
c.log.Infof("Application %q was removed from storage", key)
return nil
}
app, ok := obj.(*appTypes.Application)
if !ok {
return errors.New("cannot cast received object to v1alpha1.Application type")
}
if err := c.appCRValidator.Validate(app); err != nil {
return errors.Wrapf(err, "while validating application %q", key)
}
dm, err := c.appCRMapper.ToModel(app)
if err != nil {
return errors.Wrap(err, "while mapping Application CR to model")
}
replaced, err := c.appUpserter.Upsert(dm)
if err != nil {
return errors.Wrapf(err, "while upserting application with name %q into storage", key)
}
c.log.Infof("Application %q was added into storage (replaced: %v)", key, replaced)
return nil
}
func (c *Controller) closeChanOnCtxCancellation(ctx context.Context, ch chan<- struct{}) {
for {
select {
case <-ctx.Done():
close(ch)
return
}
}
}
// isTemporaryError returns true if error implements following interface:
// type temporary interface {
// Temporary() bool
// }
//
// and Temporary() method return true. Otherwise false will be returned.
func isTemporaryError(err error) bool {
type temporary interface {
Temporary() bool
}
te, ok := err.(temporary)
return ok && te.Temporary()
}