forked from openshift/library-go
-
Notifications
You must be signed in to change notification settings - Fork 0
/
remove_stale_conditions.go
116 lines (90 loc) · 2.95 KB
/
remove_stale_conditions.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
package staleconditions
import (
"fmt"
"time"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog"
operatorv1 "github.com/openshift/api/operator/v1"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/v1helpers"
)
const workQueueKey = "key"
type RemoveStaleConditions struct {
conditions []string
operatorClient v1helpers.OperatorClient
cachesToSync []cache.InformerSynced
eventRecorder events.Recorder
// queue only ever has one item, but it has nice error handling backoff/retry semantics
queue workqueue.RateLimitingInterface
}
func NewRemoveStaleConditions(
conditions []string,
operatorClient v1helpers.OperatorClient,
eventRecorder events.Recorder,
) *RemoveStaleConditions {
c := &RemoveStaleConditions{
conditions: conditions,
operatorClient: operatorClient,
eventRecorder: eventRecorder,
cachesToSync: []cache.InformerSynced{operatorClient.Informer().HasSynced},
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "RemoveStaleConditions"),
}
operatorClient.Informer().AddEventHandler(c.eventHandler())
return c
}
func (c RemoveStaleConditions) sync() error {
removeStaleConditionsFn := func(status *operatorv1.OperatorStatus) error {
for _, condition := range c.conditions {
v1helpers.RemoveOperatorCondition(&status.Conditions, condition)
}
return nil
}
if _, _, err := v1helpers.UpdateStatus(c.operatorClient, removeStaleConditionsFn); err != nil {
return err
}
return nil
}
// Run starts the kube-scheduler and blocks until stopCh is closed.
func (c *RemoveStaleConditions) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
klog.Infof("Starting RemoveStaleConditions")
defer klog.Infof("Shutting down RemoveStaleConditions")
if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) {
utilruntime.HandleError(fmt.Errorf("caches did not sync"))
return
}
// doesn't matter what workers say, only start one.
go wait.Until(c.runWorker, time.Second, stopCh)
<-stopCh
}
func (c *RemoveStaleConditions) runWorker() {
for c.processNextWorkItem() {
}
}
func (c *RemoveStaleConditions) processNextWorkItem() bool {
dsKey, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(dsKey)
err := c.sync()
if err == nil {
c.queue.Forget(dsKey)
return true
}
utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err))
c.queue.AddRateLimited(dsKey)
return true
}
// eventHandler queues the operator to check spec and status
func (c *RemoveStaleConditions) eventHandler() cache.ResourceEventHandler {
return cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { c.queue.Add(workQueueKey) },
UpdateFunc: func(old, new interface{}) { c.queue.Add(workQueueKey) },
DeleteFunc: func(obj interface{}) { c.queue.Add(workQueueKey) },
}
}