-
Notifications
You must be signed in to change notification settings - Fork 5
/
leaderelection.go
120 lines (105 loc) · 3.84 KB
/
leaderelection.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
package leaderelection
// This package draws heavily from the controller-runtime's leaderelection package
// (https://github.com/kubernetes-sigs/controller-runtime/tree/v0.12.3/pkg/leaderelection)
// but has some changes to bring it in line with my package style
import (
"context"
"errors"
"fmt"
"os"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
)
const inClusterNamespacePath = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
// RunOrDie
func RunOrDie(ctx context.Context, config *rest.Config, options Options) {
lec, err := NewLeaderElectionConfig(config, options)
if err != nil {
panic(err)
}
leaderelection.RunOrDie(ctx, *lec)
}
// NewLeaderElectionConfig
func NewLeaderElectionConfig(config *rest.Config, options Options) (*leaderelection.LeaderElectionConfig, error) {
lock, err := NewResourceLock(config, nil, options)
if err != nil {
return nil, err
}
return &leaderelection.LeaderElectionConfig{
Lock: lock,
// IMPORTANT: you MUST ensure that any code you have that
// is protected by the lease must terminate **before**
// you call cancel. Otherwise, you could have a background
// loop still running and another process could
// get elected before your background loop finished, violating
// the stated goal of the lease.
ReleaseOnCancel: true,
LeaseDuration: options.LeaseDuration,
RenewDeadline: options.RenewDeadline,
RetryPeriod: options.RetryPeriod,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) { options.OnStartedLeading() },
OnStoppedLeading: options.OnStoppedLeading,
OnNewLeader: options.OnNewLeader,
},
}, nil
}
// NewResourceLock creates a resourcelock.Interface object with provided parameters.
func NewResourceLock(config *rest.Config, eventRecorder record.EventRecorder, options Options) (resourcelock.Interface, error) {
// LeaderElectionName must be provided to prevent clashes
if len(options.LeaderElectionName) == 0 {
return nil, errors.New("LeaderElectionName must be configured")
}
// Set the default namespace(if running in cluster)
if len(options.LeaderElectionNamespace) == 0 {
var err error
options.LeaderElectionNamespace, err = getInClusterNamespace()
if err != nil {
return nil, fmt.Errorf("unable to find leader election namespace: %w", err)
}
}
// Set the leader id and must be unique.
if len(options.LeaderElectionID) == 0 {
var err error
options.LeaderElectionID, err = os.Hostname()
if err != nil {
return nil, err
}
options.LeaderElectionID = options.LeaderElectionID + "_" + string(uuid.NewUUID())
}
// Construct clientset for leader election.
rest.AddUserAgent(config, "leader-election")
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
return resourcelock.New(
resourcelock.LeasesResourceLock,
options.LeaderElectionNamespace,
options.LeaderElectionName,
clientset.CoreV1(),
clientset.CoordinationV1(),
resourcelock.ResourceLockConfig{
Identity: options.LeaderElectionID,
EventRecorder: eventRecorder,
})
}
// getInClusterNamespace will get the namespace of currently running election node(a k8s pod)
func getInClusterNamespace() (string, error) {
// Check whether the namespace file exists.
if _, err := os.Stat(inClusterNamespacePath); os.IsNotExist(err) {
return "", errors.New("not running in-cluster, please specify LeaderElectionNamespace")
} else if err != nil {
return "", fmt.Errorf("error checking namespace file: %w", err)
}
// Load the namespace file and return its content.
namespace, err := os.ReadFile(inClusterNamespacePath)
if err != nil {
return "", fmt.Errorf("error reading namespace file: %w", err)
}
return string(namespace), nil
}