forked from rancher/rancher
-
Notifications
You must be signed in to change notification settings - Fork 0
/
labels.go
112 lines (89 loc) · 2.28 KB
/
labels.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
package node
import (
"context"
"github.com/rancher/rancher/pkg/librke"
"github.com/rancher/rke/services"
"github.com/rancher/types/apis/management.cattle.io/v3"
)
func (m *Lifecycle) checkLabels(node *v3.Node) (*v3.Node, error) {
if !hasCheckedIn(node) ||
!isWorkerOnlyNode(node) {
return node, nil
}
cluster, err := m.clusterLister.Get("", node.Namespace)
if err != nil {
return node, err
}
if cluster.Status.Driver != v3.ClusterDriverRKE || cluster.Status.AppliedSpec.RancherKubernetesEngineConfig == nil {
return node, nil
}
if len(node.Spec.DesiredNodeAnnotations) > 0 || len(node.Spec.DesiredNodeLabels) > 0 {
return node, nil
}
nodePlan, err := getNodePlan(cluster, node)
if err != nil {
return node, err
}
if nodePlan == nil {
return node, nil
}
update := false
for k, v := range nodePlan.Labels {
if node.Status.NodeLabels[k] != v {
update = true
break
}
}
for k, v := range nodePlan.Annotations {
if node.Status.NodeAnnotations[k] != v {
update = true
break
}
}
if !update {
return node, nil
}
node.Spec.DesiredNodeLabels = copyMap(node.Status.NodeLabels)
node.Spec.DesiredNodeAnnotations = copyMap(node.Status.NodeAnnotations)
for k, v := range nodePlan.Labels {
node.Spec.DesiredNodeLabels[k] = v
}
for k, v := range nodePlan.Annotations {
node.Spec.DesiredNodeAnnotations[k] = v
}
return node, nil
}
func copyMap(in map[string]string) map[string]string {
out := map[string]string{}
for k, v := range in {
out[k] = v
}
return out
}
func getNodePlan(cluster *v3.Cluster, node *v3.Node) (*v3.RKEConfigNodePlan, error) {
dockerInfo, err := librke.GetDockerInfo(node)
if err != nil {
return nil, err
}
plan, err := librke.New().GeneratePlan(context.Background(), cluster.Status.AppliedSpec.RancherKubernetesEngineConfig, dockerInfo)
if err != nil {
return nil, err
}
for _, nodePlan := range plan.Nodes {
if nodePlan.Address == node.Status.NodeConfig.Address {
return &nodePlan, nil
}
}
return nil, nil
}
func hasCheckedIn(node *v3.Node) bool {
return len(node.Status.NodeAnnotations) > 0
}
func isWorkerOnlyNode(node *v3.Node) bool {
if node.Status.NodeConfig == nil ||
len(node.Status.NodeConfig.Role) != 1 ||
node.Status.NodeConfig.Role[0] != services.WorkerRole {
return false
}
return true
}