-
Notifications
You must be signed in to change notification settings - Fork 0
/
leaderelection.go
80 lines (70 loc) · 1.65 KB
/
leaderelection.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
package common
import (
"context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/klog"
"os"
"os/signal"
"syscall"
"time"
)
func LeaderElectionRunOrDie(leaseName string) {
cfg, err := rest.InClusterConfig()
if err != nil {
klog.Error(err)
return
}
client := kubernetes.NewForConfigOrDie(cfg)
if client == nil {
klog.Error("k8s client init fail")
return
}
run := func(ctx context.Context) {
select {}
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt, syscall.SIGTERM)
go func() {
<-ch
klog.Info("Received termination, signaling shutdown")
cancel()
}()
lock := &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Name: leaseName,
Namespace: Namespace,
},
Client: client.CoordinationV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: Hostname,
},
}
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: lock,
ReleaseOnCancel: true,
LeaseDuration: 60 * time.Second,
RenewDeadline: 15 * time.Second,
RetryPeriod: 5 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
run(ctx)
},
OnStoppedLeading: func() {
klog.Infof("leader lost: %s", Hostname)
os.Exit(0)
},
OnNewLeader: func(identity string) {
if IsLeader = identity == Hostname; IsLeader {
return
}
klog.Infof("new leader elected: %s", identity)
},
},
})
}