-
Notifications
You must be signed in to change notification settings - Fork 93
/
cluster_restart.go
238 lines (214 loc) · 9.74 KB
/
cluster_restart.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
/*
Copyright 2021 The Cockroach Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package actor
import (
"context"
"fmt"
"strings"
"time"
api "github.com/cockroachdb/cockroach-operator/apis/v1alpha1"
"github.com/cockroachdb/cockroach-operator/pkg/condition"
"github.com/cockroachdb/cockroach-operator/pkg/features"
"github.com/cockroachdb/cockroach-operator/pkg/healthchecker"
"github.com/cockroachdb/cockroach-operator/pkg/resource"
"github.com/cockroachdb/cockroach-operator/pkg/scale"
"github.com/cockroachdb/cockroach-operator/pkg/utilfeature"
"github.com/go-logr/logr"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/apps/v1"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
kubetypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const sleepDuration = 1 * time.Minute
func newClusterRestart(scheme *runtime.Scheme, cl client.Client, config *rest.Config) Actor {
return &clusterRestart{
action: newAction("Crdb Cluster Restart", scheme, cl),
config: config,
}
}
// clusterRestart will restart the CRDB cluster using 2 option: Rolling Restart and
// Full Restart in case of CA renew
type clusterRestart struct {
action
config *rest.Config
}
//GetActionType returns api.ClusterRestartAction action used to set the cluster status errors
func (r *clusterRestart) GetActionType() api.ActionType {
return api.ClusterRestartAction
}
//Handles will return true if the prerequisite are met to run restart
//like the cluster exists, actin deploy and inizialize, run and the feature gate was enabled
//for this feature
func (r *clusterRestart) Handles(conds []api.ClusterCondition) bool {
return utilfeature.DefaultMutableFeatureGate.Enabled(features.ClusterRestart) &&
(condition.True(api.InitializedCondition, conds) || condition.False(api.InitializedCondition, conds)) &&
condition.True(api.CrdbVersionChecked, conds)
}
func (r *clusterRestart) Act(ctx context.Context, cluster *resource.Cluster) error {
log := r.log.WithValues("CrdbCluster", cluster.ObjectKey())
log.V(DEBUGLEVEL).Info("starting cluster restart action")
restartType := cluster.GetAnnotationRestartType()
if restartType == "" {
log.V(DEBUGLEVEL).Info("No restart cluster action")
return nil
}
// Get the sts and compare the sts size to the size in the CR
key := kubetypes.NamespacedName{
Namespace: cluster.Namespace(),
Name: cluster.StatefulSetName(),
}
clientset, err := kubernetes.NewForConfig(r.config)
if err != nil {
return errors.Wrapf(err, "failed to create kubernetes clientset")
}
statefulSet := &appsv1.StatefulSet{}
if err := r.client.Get(ctx, key, statefulSet); err != nil {
return errors.Wrap(err, "failed to fetch statefulset")
}
// TODO statefulSetIsUpdating is not quite working as expected.
// I had to check status. We should look at the update code in partition update to address this
if statefulSetIsUpdating(statefulSet) {
return NotReadyErr{Err: errors.New("restart statefulset is updating, waiting for the update to finish")}
}
status := &statefulSet.Status
if status.CurrentReplicas == 0 || status.CurrentReplicas < status.Replicas {
log.Info("restart statefulset does not have all replicas up")
return NotReadyErr{Err: errors.New("restart cluster statefulset does not have all replicas up")}
}
healthChecker := healthchecker.NewHealthChecker(cluster, clientset, r.scheme, r.config)
if strings.EqualFold(restartType, api.ClusterRestartType(api.RollingRestart).String()) {
log.V(DEBUGLEVEL).Info("initiating rolling restart action")
if err := r.rollingSts(ctx, statefulSet.DeepCopy(), clientset, r.log, healthChecker); err != nil {
return errors.Wrapf(err, "error restarting statefulset %s.%s", cluster.Namespace(), cluster.StatefulSetName())
}
log.V(DEBUGLEVEL).Info("completed rolling cluster restart")
} else if strings.EqualFold(restartType, api.ClusterRestartType(api.FullCluster).String()) {
if err := r.fullClusterRestart(ctx, statefulSet, log, clientset); err != nil {
return errors.Wrapf(err, "error reseting statefulset %s.%s to 0 replicas", cluster.Namespace(), cluster.StatefulSetName())
}
//sleep 1 minute to make sure the crdb is up and running
log.V(DEBUGLEVEL).Info("sleeping", "duration", sleepDuration.String(), "label", "after full cluster restart")
if err := healthChecker.Probe(ctx, log, fmt.Sprintf("waiting after restart for cluster %s", cluster.Name()), 0); err != nil {
return err
}
log.V(DEBUGLEVEL).Info("completed full cluster restart")
} else {
err := ValidationError{Err: errors.New("invalid annotation value, please use Rolling or FullCluster values")}
log.V(DEBUGLEVEL).Info("invalid annotation for cluster restart")
return err
}
// we force the saving of the status on the cluster and cancel the loop
fetcher := resource.NewKubeFetcher(ctx, cluster.Namespace(), r.client)
cr := resource.ClusterPlaceholder(cluster.Name())
if err := fetcher.Fetch(cr); err != nil {
log.Error(err, "failed to retrieve CrdbCluster resource on restart action")
return err
}
refreshedCluster := resource.NewCluster(cr)
//delete annotation
refreshedCluster.DeleteRestartTypeAnnotation()
//TODO use patch for annotations
if err := r.client.Update(ctx, refreshedCluster.Unwrap()); err != nil {
log.Error(err, "failed reseting the restart cluster field")
}
log.V(DEBUGLEVEL).Info("completed cluster restart")
CancelLoop(ctx)
return nil
}
// rollingSts performs a rolling update on the cluster.
func (r *clusterRestart) rollingSts(ctx context.Context, sts *appsv1.StatefulSet,
clientset *kubernetes.Clientset,
l logr.Logger,
healthChecker healthchecker.HealthChecker) error {
timeNow := metav1.Now()
// When a StatefulSet's partition number is set to `n`, only StatefulSet pods
// numbered greater or equal to `n` will be updated. The rest will remain untouched.
// https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions
for partition := *sts.Spec.Replicas - 1; partition >= 0; partition-- {
stsName := sts.Name
stsNamespace := sts.Namespace
replicas := sts.Spec.Replicas
refreshedSts, err := clientset.AppsV1().StatefulSets(stsNamespace).Get(ctx, stsName, metav1.GetOptions{})
if err != nil {
return handleStsError(err, l, stsName, stsNamespace)
}
sts := refreshedSts.DeepCopy()
sts.Annotations[resource.CrdbRestartAnnotation] = timeNow.Format(time.RFC3339)
if sts.Spec.Template.Annotations == nil {
sts.Spec.Template.Annotations = make(map[string]string)
}
sts.Spec.Template.Annotations[resource.CrdbRestartAnnotation] = timeNow.Format(time.RFC3339)
sts.Spec.UpdateStrategy.RollingUpdate = &v1.RollingUpdateStatefulSetStrategy{
Partition: &partition,
}
_, err = clientset.AppsV1().StatefulSets(stsNamespace).Update(ctx, sts, metav1.UpdateOptions{})
if err != nil {
return handleStsError(err, l, stsName, stsNamespace)
}
// Wait until verificationFunction verifies the update, passing in
// the current partition so the function knows which pod to check
// the status of.
l.V(DEBUGLEVEL).Info("waiting until partition done restarting", "partition number:", partition)
if err := scale.WaitUntilStatefulSetIsReadyToServe(ctx, clientset, stsNamespace, stsName, *replicas); err != nil {
return errors.Wrapf(err, "error rolling update stategy on pod %d", int(partition))
}
// wait for all replicas to be up
if err := healthChecker.Probe(ctx, l, "between restarting pods", int(partition)); err != nil {
return errors.Wrapf(err, "error health checker for rolling restart on pod %d", int(partition))
}
}
return nil
}
//fullClusterRestart will delete all the pods of the sts
//to force the reload of the certificateon the POD
//used on the CA cert rotation
func (r *clusterRestart) fullClusterRestart(ctx context.Context, sts *appsv1.StatefulSet, l logr.Logger, clientset *kubernetes.Clientset) error {
timeNow := metav1.Now()
stsName := sts.Name
stsNamespace := sts.Namespace
sts.Annotations[resource.CrdbRestartAnnotation] = timeNow.Format(time.RFC3339)
_, err := clientset.AppsV1().StatefulSets(stsNamespace).Update(ctx, sts, metav1.UpdateOptions{})
if err != nil {
return handleStsError(err, l, stsName, stsNamespace)
}
dp := metav1.DeletePropagationForeground
err = clientset.CoreV1().Pods(sts.Namespace).DeleteCollection(ctx, metav1.DeleteOptions{
PropagationPolicy: &dp,
}, metav1.ListOptions{
LabelSelector: labels.Set(sts.Spec.Selector.MatchLabels).AsSelector().String(),
})
if err != nil {
l.Error(err, "failed to delete the pods for sts")
return err
}
//waiting for autohealing
return scale.WaitUntilStatefulSetIsReadyToServe(ctx, clientset, stsNamespace, stsName, *sts.Spec.Replicas)
}
func handleStsError(err error, l logr.Logger, stsName string, ns string) error {
if k8sErrors.IsNotFound(err) {
l.Error(err, "sts is not found", "stsName", stsName, "namespace", ns)
return errors.Wrapf(err, "sts is not found: %s ns: %s", stsName, ns)
} else if statusError, isStatus := err.(*k8sErrors.StatusError); isStatus {
l.Error(statusError, fmt.Sprintf("Error getting statefulset %v", statusError.ErrStatus.Message), "stsName", stsName, "namespace", ns)
return statusError
}
l.Error(err, "error getting statefulset", "stsName", stsName, "namspace", ns)
return err
}