forked from openshift/machine-api-operator
/
helpers.go
61 lines (51 loc) · 1.64 KB
/
helpers.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
package main
import (
"math/rand"
"os"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
)
const (
// LeaseDuration is the default duration for the leader election lease.
LeaseDuration = 90 * time.Second
// RenewDeadline is the default duration for the leader renewal.
RenewDeadline = 60 * time.Second
// RetryPeriod is the default duration for the leader electrion retrial.
RetryPeriod = 30 * time.Second
minResyncPeriod = 10 * time.Minute
)
func resyncPeriod() func() time.Duration {
return func() time.Duration {
factor := rand.Float64() + 1
return time.Duration(float64(minResyncPeriod.Nanoseconds()) * factor)
}
}
// CreateResourceLock returns an interface for the resource lock.
func CreateResourceLock(cb *ClientBuilder, componentNamespace, componentName string) resourcelock.Interface {
recorder := record.
NewBroadcaster().
NewRecorder(scheme.Scheme, v1.EventSource{Component: componentName})
id, err := os.Hostname()
if err != nil {
glog.Fatalf("error creating lock: %v", err)
}
// add a uniquifier so that two processes on the same host don't accidentally both become active
id = id + "_" + string(uuid.NewUUID())
return &resourcelock.ConfigMapLock{
ConfigMapMeta: metav1.ObjectMeta{
Namespace: componentNamespace,
Name: componentName,
},
Client: cb.KubeClientOrDie("leader-election").CoreV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: id,
EventRecorder: recorder,
},
}
}