-
Notifications
You must be signed in to change notification settings - Fork 1.1k
/
main.go
108 lines (98 loc) · 2.64 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
package purging
import (
"context"
"github.com/chroma-core/chroma/go/pkg/log/repository"
"github.com/pingcap/log"
"os"
"time"
"go.uber.org/zap"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
)
func RunPurging(ctx context.Context, lg *repository.LogRepository) {
log.Info("starting purging")
podName, _ := os.LookupEnv("POD_NAME")
if podName == "" {
log.Error("POD_NAME environment variable is not set")
return
}
namespace, _ := os.LookupEnv("POD_NAMESPACE")
if namespace == "" {
log.Error("POD_NAMESPACE environment variable is not set")
return
}
client, err := createKubernetesClient()
if err != nil {
log.Error("failed to create kubernetes client", zap.Error(err))
return
}
elector, err := setupLeaderElection(client, namespace, podName, lg)
if err != nil {
log.Error("failed to setup leader election", zap.Error(err))
return
}
elector.Run(ctx)
return
}
func createKubernetesClient() (*kubernetes.Clientset, error) {
config, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
return kubernetes.NewForConfig(config)
}
func setupLeaderElection(client *kubernetes.Clientset, namespace, podName string, lg *repository.LogRepository) (lr *leaderelection.LeaderElector, err error) {
lock := &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Name: "log-purging-lock",
Namespace: namespace,
},
Client: client.CoordinationV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: podName,
},
}
lr, err = leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
Lock: lock,
ReleaseOnCancel: true,
LeaseDuration: 15 * time.Second,
RenewDeadline: 10 * time.Second,
RetryPeriod: 2 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
log.Info("started leading")
performPurgingLoop(ctx, lr, lg)
},
OnStoppedLeading: func() {
log.Info("stopped leading")
},
},
})
return
}
func performPurgingLoop(ctx context.Context, le *leaderelection.LeaderElector, lg *repository.LogRepository) {
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
log.Info("checking leader status")
if le.IsLeader() {
log.Info("leader is active")
if err := lg.PurgeRecords(ctx); err != nil {
log.Error("failed to purge records", zap.Error(err))
continue
}
log.Info("purged records")
} else {
log.Info("leader is inactive")
break
}
}
}
}