-
Notifications
You must be signed in to change notification settings - Fork 18
/
status.go
101 lines (84 loc) · 3.58 KB
/
status.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
package controllers
import (
"context"
"sort"
"strings"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
corev1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func (r *EtcdadmClusterReconciler) updateStatus(ctx context.Context, ec *etcdv1.EtcdadmCluster, cluster *clusterv1.Cluster, ownedMachines etcdMachines) error {
log := r.Log.WithName(ec.Name)
selector := EtcdMachinesSelectorForCluster(cluster.Name, ec.Name)
// Copy label selector to its status counterpart in string format.
// This is necessary for CRDs including scale subresources.
ec.Status.Selector = selector.String()
machines := make([]*clusterv1.Machine, 0, len(ownedMachines))
for _, machine := range ownedMachines {
machines = append(machines, machine.Machine)
}
log.Info("Following machines owned by this etcd cluster", "machines", klog.KObjSlice(machines))
desiredReplicas := *ec.Spec.Replicas
ec.Status.ReadyReplicas = int32(len(ownedMachines))
if !ec.DeletionTimestamp.IsZero() {
return nil
}
readyReplicas := ec.Status.ReadyReplicas
if readyReplicas < desiredReplicas {
conditions.MarkFalse(ec, etcdv1.EtcdClusterResizeCompleted, etcdv1.EtcdScaleUpInProgressReason, clusterv1.ConditionSeverityWarning, "Scaling up etcd cluster to %d replicas (actual %d)", desiredReplicas, readyReplicas)
ec.Status.Ready = false
return nil
}
if readyReplicas > desiredReplicas {
conditions.MarkFalse(ec, etcdv1.EtcdClusterResizeCompleted, etcdv1.EtcdScaleDownInProgressReason, clusterv1.ConditionSeverityWarning, "Scaling up etcd cluster to %d replicas (actual %d)", desiredReplicas, readyReplicas)
ec.Status.Ready = false
return nil
}
for _, m := range ownedMachines {
if !m.healthy() {
if m.listening {
// The machine is listening but not ready/unhealthy
ec.Status.Ready = false
return m.healthError
} else {
// The machine is not listening, probably transient while etcd starts
return nil
}
}
}
conditions.MarkTrue(ec, etcdv1.EtcdClusterResizeCompleted)
// etcd ready when all machines have address set
ec.Status.Ready = true
conditions.MarkTrue(ec, etcdv1.EtcdEndpointsAvailable)
endpoints := ownedMachines.endpoints()
sort.Strings(endpoints)
currEndpoints := strings.Join(endpoints, ",")
log.Info("Comparing current and previous endpoints", "current endpoints", currEndpoints, "previous endpoints", ec.Status.Endpoints)
// Checking if endpoints have changed. This avoids unnecessary client calls
// to get and update the Secret containing the endpoints
if ec.Status.Endpoints != currEndpoints {
log.Info("Updating endpoints annotation, and the Secret containing etcdadm join address")
ec.Status.Endpoints = currEndpoints
secretNameNs := client.ObjectKey{Name: ec.Status.InitMachineAddress, Namespace: cluster.Namespace}
secretInitAddress := &corev1.Secret{}
if err := r.Client.Get(ctx, secretNameNs, secretInitAddress); err != nil {
return err
}
if len(endpoints) > 0 {
secretInitAddress.Data["address"] = []byte(getEtcdMachineAddressFromClientURL(endpoints[0]))
} else {
secretInitAddress.Data["address"] = []byte("")
}
secretInitAddress.Data["clientUrls"] = []byte(ec.Status.Endpoints)
r.Log.Info("Updating init secret with endpoints")
if err := r.Client.Update(ctx, secretInitAddress); err != nil {
return err
}
}
// set creationComplete to true, this is only set once after the first set of endpoints are ready and never unset, to indicate that the cluster has been created
ec.Status.CreationComplete = true
return nil
}