-
Notifications
You must be signed in to change notification settings - Fork 216
/
cluster.go
155 lines (129 loc) · 3.48 KB
/
cluster.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
package cluster
import (
"context"
"encoding/json"
"sort"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/sirupsen/logrus"
"github.com/rancher/wrangler/pkg/ticker"
"k8s.io/apimachinery/pkg/types"
fleetcontrollers "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io/v1alpha1"
"k8s.io/apimachinery/pkg/api/equality"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
corecontrollers "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
)
type handler struct {
agentNamespace string
clusterName string
clusterNamespace string
nodes corecontrollers.NodeCache
clusters fleetcontrollers.ClusterClient
reported fleet.AgentStatus
}
func Register(ctx context.Context,
agentNamespace string,
clusterNamespace string,
clusterName string,
checkinInterval time.Duration,
nodes corecontrollers.NodeCache,
clusters fleetcontrollers.ClusterClient) {
h := handler{
agentNamespace: agentNamespace,
clusterName: clusterName,
clusterNamespace: clusterNamespace,
nodes: nodes,
clusters: clusters,
}
go func() {
time.Sleep(15 * time.Second)
if err := h.Update(); err != nil {
logrus.Errorf("failed to report cluster node status: %v", err)
}
}()
go func() {
if checkinInterval == 0 {
checkinInterval = 15 * time.Minute
}
for range ticker.Context(ctx, checkinInterval) {
if err := h.Update(); err != nil {
logrus.Errorf("failed to report cluster node status: %v", err)
}
}
}()
}
func (h *handler) Update() error {
nodes, err := h.nodes.List(labels.Everything())
if err != nil {
return err
}
ready, nonReady := sortReadyUnready(nodes)
agentStatus := fleet.AgentStatus{
LastSeen: metav1.Now(),
Namespace: h.agentNamespace,
NonReadyNodes: len(nonReady),
ReadyNodes: len(ready),
}
if len(ready) > 3 {
ready = ready[:3]
}
if len(nonReady) > 3 {
nonReady = nonReady[:3]
}
agentStatus.ReadyNodeNames = ready
agentStatus.NonReadyNodeNames = nonReady
if equality.Semantic.DeepEqual(h.reported, agentStatus) {
return nil
}
data, err := json.Marshal(fleet.Cluster{
Status: fleet.ClusterStatus{
Agent: agentStatus,
},
})
if err != nil {
return err
}
_, err = h.clusters.Patch(h.clusterNamespace, h.clusterName, types.MergePatchType, data, "status")
if err != nil {
return err
}
h.reported = agentStatus
return nil
}
func sortReadyUnready(nodes []*corev1.Node) (ready []string, nonReady []string) {
var (
masterNodeNames []string
nonReadyMasterNodeNames []string
readyNodes []string
nonReadyNodes []string
)
for _, node := range nodes {
ready := false
for _, cond := range node.Status.Conditions {
if cond.Type == corev1.NodeReady && cond.Status == corev1.ConditionTrue {
ready = true
break
}
}
if node.Annotations["node-role.kubernetes.io/master"] == "true" {
if ready {
masterNodeNames = append(masterNodeNames, node.Name)
} else {
nonReadyMasterNodeNames = append(nonReadyMasterNodeNames, node.Name)
}
} else {
if ready {
readyNodes = append(readyNodes, node.Name)
} else {
nonReadyNodes = append(nonReadyNodes, node.Name)
}
}
}
sort.Strings(masterNodeNames)
sort.Strings(nonReadyMasterNodeNames)
sort.Strings(readyNodes)
sort.Strings(nonReadyNodes)
return append(masterNodeNames, readyNodes...), append(nonReadyMasterNodeNames, nonReadyNodes...)
}