/
controller.go
343 lines (291 loc) · 10.1 KB
/
controller.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
package controller
import (
"fmt"
"time"
"github.com/YoYoContainerService/xpu-scheduler-extender/pkg/cache"
"github.com/YoYoContainerService/xpu-scheduler-extender/pkg/utils"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
clientgocache "k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"log"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/record"
)
var (
KeyFunc = clientgocache.DeletionHandlingMetaNamespaceKeyFunc
)
type Controller struct {
clientset *kubernetes.Clientset
// podLister can list/get pods from the shared informer's store.
podLister corelisters.PodLister
// nodeLister can list/get nodes from the shared informer's store.
nodeLister corelisters.NodeLister
// podQueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
podQueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
// podInformerSynced returns true if the pod store has been synced at least once.
podInformerSynced clientgocache.InformerSynced
// nodeInformerSynced returns true if the service store has been synced at least once.
nodeInformerSynced clientgocache.InformerSynced
schedulerCache *cache.SchedulerCache
// The cache to store the pod to be removed
removePodCache map[string]*v1.Pod
}
func NewController(clientset *kubernetes.Clientset, kubeInformerFactory kubeinformers.SharedInformerFactory, stopCh <-chan struct{}) (*Controller, error) {
log.Printf("info: creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
// eventBroadcaster.StartLogging(log.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: clientset.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "xpu-scheduler-extender"})
c := &Controller{
clientset: clientset,
podQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "podQueue"),
recorder: recorder,
removePodCache: map[string]*v1.Pod{},
}
// Create pod informer.
podInformer := kubeInformerFactory.Core().V1().Pods()
podInformer.Informer().AddEventHandler(clientgocache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
case *v1.Pod:
return utils.IsGPUsharingPod(t)
case clientgocache.DeletedFinalStateUnknown:
if pod, ok := t.Obj.(*v1.Pod); ok {
log.Printf("debug: delete pod %s in namespace %s", pod.Name, pod.Namespace)
return utils.IsGPUsharingPod(pod)
}
runtime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, c))
return false
default:
runtime.HandleError(fmt.Errorf("unable to handle object in %T: %T", c, obj))
return false
}
},
Handler: clientgocache.ResourceEventHandlerFuncs{
AddFunc: c.addPodToCache,
UpdateFunc: c.updatePodInCache,
DeleteFunc: c.deletePodFromCache,
},
})
c.podLister = podInformer.Lister()
c.podInformerSynced = podInformer.Informer().HasSynced
// Create node informer
nodeInformer := kubeInformerFactory.Core().V1().Nodes()
c.nodeLister = nodeInformer.Lister()
c.nodeInformerSynced = nodeInformer.Informer().HasSynced
// Create configMap informer
cmInformer := kubeInformerFactory.Core().V1().ConfigMaps()
cache.ConfigMapLister = cmInformer.Lister()
cache.ConfigMapInformerSynced = cmInformer.Informer().HasSynced
// Start informer goroutines.
go kubeInformerFactory.Start(stopCh)
// Create scheduler Cache
c.schedulerCache = cache.NewSchedulerCache(c.nodeLister, c.podLister)
log.Println("info: begin to wait for cache")
if ok := clientgocache.WaitForCacheSync(stopCh, c.nodeInformerSynced); !ok {
return nil, fmt.Errorf("failed to wait for node caches to sync")
} else {
log.Println("info: init the node cache successfully")
}
if ok := clientgocache.WaitForCacheSync(stopCh, c.podInformerSynced); !ok {
return nil, fmt.Errorf("failed to wait for pod caches to sync")
} else {
log.Println("info: init the pod cache successfully")
}
if ok := clientgocache.WaitForCacheSync(stopCh, cache.ConfigMapInformerSynced); !ok {
return nil, fmt.Errorf("failed to wait for configmap caches to sync")
} else {
log.Println("info: init the configmap cache successfully")
}
log.Println("info: end to wait for cache")
return c, nil
}
func (c *Controller) BuildCache() error {
return c.schedulerCache.BuildCache()
}
func (c *Controller) GetSchedulerCache() *cache.SchedulerCache {
return c.schedulerCache
}
// Run will set up the event handlers
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer runtime.HandleCrash()
defer c.podQueue.ShutDown()
log.Println("info: starting controller.")
log.Println("info: waiting for informer caches to sync")
log.Printf("info: starting %v workers.", threadiness)
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
log.Println("info: started workers")
<-stopCh
log.Println("info: shutting down workers")
return nil
}
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
}
}
// syncPod will sync the pod with the given key if it has had its expectations fulfilled,
// meaning it did not expect to see any more of its pods created or deleted. This function is not meant to be
// invoked concurrently with the same key.
func (c *Controller) syncPod(key string) (forget bool, err error) {
ns, name, err := clientgocache.SplitMetaNamespaceKey(key)
log.Printf("debug: begin to sync pod [%s] in namespace [%s]", name, ns)
if err != nil {
return false, err
}
pod, err := c.podLister.Pods(ns).Get(name)
switch {
case errors.IsNotFound(err):
log.Printf("debug: pod [%s] in namespace [%s] has been deleted", name, ns)
pod, found := c.removePodCache[key]
if found {
c.schedulerCache.RemovePod(pod)
delete(c.removePodCache, key)
}
case err != nil:
log.Printf("warn: unable to retrieve pod %v from the store: %v", key, err)
default:
if utils.IsCompletePod(pod) {
log.Printf("debug: pod %s in namespace [%s] has completed.", name, ns)
c.schedulerCache.RemovePod(pod)
} else {
err := c.schedulerCache.AddOrUpdatePod(pod)
if err != nil {
return false, err
}
}
}
return true, nil
}
// processNextWorkItem will read a single work item off the podQueue and
// attempt to process it.
func (c *Controller) processNextWorkItem() bool {
log.Println("debug: begin of the controller to process")
key, quit := c.podQueue.Get()
if quit {
return false
}
defer c.podQueue.Done(key)
defer log.Println("debug: end of the controller to process")
forget, err := c.syncPod(key.(string))
if err == nil {
// log.Printf("Error syncing pods: %v", err)
if forget {
c.podQueue.Forget(key)
}
return false
}
log.Printf("Error syncing pods: %v", err)
runtime.HandleError(fmt.Errorf("error syncing pod: %v", err))
c.podQueue.AddRateLimited(key)
return true
}
func (c *Controller) addPodToCache(obj interface{}) {
pod, ok := obj.(*v1.Pod)
if !ok {
log.Printf("warn: cannot convert to *v1.Pod: %v", obj)
return
}
// if !assignedNonTerminatedPod(t) {
// log.Printf("debug: skip pod %s due to it's terminated.", pod.Name)
// return
// }
podKey, err := KeyFunc(pod)
if err != nil {
log.Printf("warn: failed to get the jobkey: %v", err)
return
}
c.podQueue.Add(podKey)
// NOTE: Updating equivalence cache of addPodToCache has been
// handled optimistically in: pkg/scheduler/scheduler.go#assume()
}
func (c *Controller) updatePodInCache(oldObj, newObj interface{}) {
oldPod, ok := oldObj.(*v1.Pod)
if !ok {
log.Printf("warn: cannot convert oldObj to *v1.Pod: %v", oldObj)
return
}
newPod, ok := newObj.(*v1.Pod)
if !ok {
log.Printf("warn: cannot convert newObj to *v1.Pod: %v", newObj)
return
}
needUpdate := false
podUID := oldPod.UID
// 1. Need update when pod is turned to complete or failed
if c.schedulerCache.KnownPod(podUID) && utils.IsCompletePod(newPod) {
needUpdate = true
}
// 2. Need update when it's unknown pod, and GPU annotation has been set
if !c.schedulerCache.KnownPod(podUID) && utils.GetGPUIDFromAnnotation(newPod) >= 0 {
needUpdate = true
}
if needUpdate {
podKey, err := KeyFunc(newPod)
if err != nil {
log.Printf("warn: failed to get the jobkey: %v", err)
return
}
log.Printf("info: need to update pod [%s] in namespace [%s] and old status is %v, new status is %v; its old annotation %v and new annotation %v",
newPod.Name,
newPod.Namespace,
oldPod.Status.Phase,
newPod.Status.Phase,
oldPod.Annotations,
newPod.Annotations)
c.podQueue.Add(podKey)
}
/*else {
log.Printf("debug: No need to update pod name %s in ns %s and old status is %v, new status is %v; its old annotation %v and new annotation %v",
newPod.Name,
newPod.Namespace,
oldPod.Status.Phase,
newPod.Status.Phase,
oldPod.Annotations,
newPod.Annotations)
}*/
return
}
func (c *Controller) deletePodFromCache(obj interface{}) {
var pod *v1.Pod
switch t := obj.(type) {
case *v1.Pod:
pod = t
case clientgocache.DeletedFinalStateUnknown:
var ok bool
pod, ok = t.Obj.(*v1.Pod)
if !ok {
log.Printf("warn: cannot convert to *v1.Pod: %v", t.Obj)
return
}
default:
log.Printf("warn: cannot convert to *v1.Pod: %v", t)
return
}
log.Printf("debug: delete pod [%s] in namespace [%s]", pod.Name, pod.Namespace)
podKey, err := KeyFunc(pod)
if err != nil {
log.Printf("warn: failed to get the jobkey: %v", err)
return
}
c.podQueue.Add(podKey)
c.removePodCache[podKey] = pod
}