-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathmain.go
94 lines (85 loc) · 2.3 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
package main
import (
"context"
"flag"
"sync/atomic"
"time"
"github.com/apex/log"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
)
func main() {
var kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
var nodeID = flag.String("node-id", "", "node id used for leader election")
flag.Parse()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
clientset, err := newClientset(*kubeconfig)
if err != nil {
log.WithError(err).Fatal("failed to connect to cluster")
}
var lock = &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Name: "my-lock",
Namespace: "default",
},
Client: clientset.CoordinationV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: *nodeID,
},
}
var ticker = time.NewTicker(time.Second)
defer ticker.Stop()
var leading int32
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: lock,
ReleaseOnCancel: true,
LeaseDuration: 15 * time.Second,
RenewDeadline: 10 * time.Second,
RetryPeriod: 2 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
atomic.StoreInt32(&leading, 1)
log.WithField("id", *nodeID).Info("started leading")
for range ticker.C {
if atomic.LoadInt32(&leading) == 0 {
log.Info("stopped working")
return
}
log.Info("working...")
time.Sleep(time.Second)
}
},
OnStoppedLeading: func() {
atomic.StoreInt32(&leading, 0)
log.WithField("id", *nodeID).Info("stopped leading")
},
OnNewLeader: func(identity string) {
if identity == *nodeID {
return
}
log.WithField("id", *nodeID).
WithField("leader", identity).
Info("new leader")
},
},
})
}
func newClientset(filename string) (*kubernetes.Clientset, error) {
config, err := getConfig(filename)
if err != nil {
return nil, err
}
return kubernetes.NewForConfig(config)
}
func getConfig(cfg string) (*rest.Config, error) {
if cfg == "" {
return rest.InClusterConfig()
}
return clientcmd.BuildConfigFromFlags("", cfg)
}