forked from kubernetes/kops
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathinstancegroups.go
388 lines (325 loc) · 12.5 KB
/
instancegroups.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package instancegroups
import (
"bufio"
"fmt"
"os"
"strings"
"time"
"github.com/golang/glog"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
api "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/cloudinstances"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/validation"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kubernetes/pkg/kubectl/cmd"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/genericclioptions"
)
// RollingUpdateInstanceGroup is the AWS ASG backing an InstanceGroup.
type RollingUpdateInstanceGroup struct {
// Cloud is the kops cloud provider
Cloud fi.Cloud
// CloudGroup is the kops cloud provider groups
CloudGroup *cloudinstances.CloudInstanceGroup
// TODO should remove the need to have rollingupdate struct and add:
// TODO - the kubernetes client
// TODO - the cluster name
// TODO - the client config
// TODO - fail on validate
// TODO - fail on drain
// TODO - cloudonly
}
// NewRollingUpdateInstanceGroup creates a new struct
func NewRollingUpdateInstanceGroup(cloud fi.Cloud, cloudGroup *cloudinstances.CloudInstanceGroup) (*RollingUpdateInstanceGroup, error) {
if cloud == nil {
return nil, fmt.Errorf("cloud provider is required")
}
if cloudGroup == nil {
return nil, fmt.Errorf("cloud group is required")
}
// TODO check more values in cloudGroup that they are set properly
return &RollingUpdateInstanceGroup{
Cloud: cloud,
CloudGroup: cloudGroup,
}, nil
}
// promptInteractive asks the user to continue, mostly copied from vendor/google.golang.org/api/examples/gmail.go.
func promptInteractive(upgradedHostId, upgradedHostName string) (stopPrompting bool, err error) {
stopPrompting = false
scanner := bufio.NewScanner(os.Stdin)
if upgradedHostName != "" {
glog.Infof("Pausing after finished %q, node %q", upgradedHostId, upgradedHostName)
} else {
glog.Infof("Pausing after finished %q", upgradedHostId)
}
fmt.Print("Continue? (Y)es, (N)o, (A)lwaysYes: [Y] ")
scanner.Scan()
err = scanner.Err()
if err != nil {
glog.Infof("unable to interpret input: %v", err)
return stopPrompting, err
}
val := scanner.Text()
val = strings.TrimSpace(val)
val = strings.ToLower(val)
switch val {
case "n":
glog.Info("User signaled to stop")
os.Exit(3)
case "a":
glog.Info("Always Yes, stop prompting for rest of hosts")
stopPrompting = true
}
return stopPrompting, err
}
// TODO: Temporarily increase size of ASG?
// TODO: Remove from ASG first so status is immediately updated?
// TODO: Batch termination, like a rolling-update
// RollingUpdate performs a rolling update on a list of ec2 instances.
func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpdateCluster, cluster *api.Cluster, instanceGroupList *api.InstanceGroupList, isBastion bool, sleepAfterTerminate time.Duration, validationTimeout time.Duration) (err error) {
// we should not get here, but hey I am going to check.
if rollingUpdateData == nil {
return fmt.Errorf("rollingUpdate cannot be nil")
}
// Do not need a k8s client if you are doing cloudonly.
if rollingUpdateData.K8sClient == nil && !rollingUpdateData.CloudOnly {
return fmt.Errorf("rollingUpdate is missing a k8s client")
}
if instanceGroupList == nil {
return fmt.Errorf("rollingUpdate is missing the InstanceGroupList")
}
update := r.CloudGroup.NeedUpdate
if rollingUpdateData.Force {
update = append(update, r.CloudGroup.Ready...)
}
if len(update) == 0 {
return nil
}
if isBastion {
glog.V(3).Info("Not validating the cluster as instance is a bastion.")
} else if rollingUpdateData.CloudOnly {
glog.V(3).Info("Not validating cluster as validation is turned off via the cloud-only flag.")
} else if featureflag.DrainAndValidateRollingUpdate.Enabled() {
if err = r.ValidateCluster(rollingUpdateData, cluster, instanceGroupList); err != nil {
if rollingUpdateData.FailOnValidate {
return fmt.Errorf("error validating cluster: %v", err)
} else {
glog.V(2).Infof("Ignoring cluster validation error: %v", err)
glog.Info("Cluster validation failed, but proceeding since fail-on-validate-error is set to false")
}
}
}
for _, u := range update {
instanceId := u.ID
nodeName := ""
if u.Node != nil {
nodeName = u.Node.Name
}
if isBastion {
// We don't want to validate for bastions - they aren't part of the cluster
} else if rollingUpdateData.CloudOnly {
glog.Warning("Not draining cluster nodes as 'cloudonly' flag is set.")
} else if featureflag.DrainAndValidateRollingUpdate.Enabled() {
if u.Node != nil {
glog.Infof("Draining the node: %q.", nodeName)
if err = r.DrainNode(u, rollingUpdateData); err != nil {
if rollingUpdateData.FailOnDrainError {
return fmt.Errorf("failed to drain node %q: %v", nodeName, err)
} else {
glog.Infof("Ignoring error draining node %q: %v", nodeName, err)
}
}
} else {
glog.Warningf("Skipping drain of instance %q, because it is not registered in kubernetes", instanceId)
}
}
// We unregister the node before deleting it; if the replacement comes up with the same name it would otherwise still be cordoned
// (It often seems like GCE tries to re-use names)
if !isBastion && !rollingUpdateData.CloudOnly {
if u.Node == nil {
glog.Warningf("no kubernetes Node associated with %s, skipping node deletion", instanceId)
} else {
glog.Infof("deleting node %q from kubernetes", nodeName)
if err := r.deleteNode(u.Node, rollingUpdateData); err != nil {
return fmt.Errorf("error deleting node %q: %v", nodeName, err)
}
}
}
if err = r.DeleteInstance(u); err != nil {
glog.Errorf("error deleting instance %q, node %q: %v", instanceId, nodeName, err)
return err
}
// Wait for the minimum interval
glog.Infof("waiting for %v after terminating instance", sleepAfterTerminate)
time.Sleep(sleepAfterTerminate)
if isBastion {
glog.Infof("Deleted a bastion instance, %s, and continuing with rolling-update.", instanceId)
continue
} else if rollingUpdateData.CloudOnly {
glog.Warningf("Not validating cluster as cloudonly flag is set.")
} else if featureflag.DrainAndValidateRollingUpdate.Enabled() {
glog.Info("Validating the cluster.")
if err = r.ValidateClusterWithDuration(rollingUpdateData, cluster, instanceGroupList, validationTimeout); err != nil {
if rollingUpdateData.FailOnValidate {
glog.Errorf("Cluster did not validate within %s", validationTimeout)
return fmt.Errorf("error validating cluster after removing a node: %v", err)
}
glog.Warningf("Cluster validation failed after removing instance, proceeding since fail-on-validate is set to false: %v", err)
}
}
if rollingUpdateData.Interactive {
stopPrompting, err := promptInteractive(u.ID, nodeName)
if err != nil {
return err
}
if stopPrompting {
// Is a pointer to a struct, changes here push back into the original
rollingUpdateData.Interactive = false
}
}
}
return nil
}
// ValidateClusterWithDuration runs validation.ValidateCluster until either we get positive result or the timeout expires
func (r *RollingUpdateInstanceGroup) ValidateClusterWithDuration(rollingUpdateData *RollingUpdateCluster, cluster *api.Cluster, instanceGroupList *api.InstanceGroupList, duration time.Duration) error {
// TODO should we expose this to the UI?
tickDuration := 30 * time.Second
// Try to validate cluster at least once, this will handle durations that are lower
// than our tick time
if r.tryValidateCluster(rollingUpdateData, cluster, instanceGroupList, duration, tickDuration) {
return nil
}
timeout := time.After(duration)
tick := time.Tick(tickDuration)
// Keep trying until we're timed out or got a result or got an error
for {
select {
case <-timeout:
// Got a timeout fail with a timeout error
return fmt.Errorf("cluster did not validate within a duration of %q", duration)
case <-tick:
// Got a tick, validate cluster
if r.tryValidateCluster(rollingUpdateData, cluster, instanceGroupList, duration, tickDuration) {
return nil
}
// ValidateCluster didn't work yet, so let's try again
// this will exit up to the for loop
}
}
}
func (r *RollingUpdateInstanceGroup) tryValidateCluster(rollingUpdateData *RollingUpdateCluster, cluster *api.Cluster, instanceGroupList *api.InstanceGroupList, duration time.Duration, tickDuration time.Duration) bool {
result, err := validation.ValidateCluster(cluster, instanceGroupList, rollingUpdateData.K8sClient)
if err != nil {
glog.Infof("Cluster did not validate, will try again in %q until duration %q expires: %v.", tickDuration, duration, err)
return false
} else if len(result.Failures) > 0 {
glog.Infof("Cluster did not pass validation, will try again in %q until duration %q expires: %v.", tickDuration, duration, result.Failures[0].Message)
return false
} else {
glog.Info("Cluster validated.")
return true
}
}
// ValidateCluster runs our validation methods on the K8s Cluster.
func (r *RollingUpdateInstanceGroup) ValidateCluster(rollingUpdateData *RollingUpdateCluster, cluster *api.Cluster, instanceGroupList *api.InstanceGroupList) error {
if _, err := validation.ValidateCluster(cluster, instanceGroupList, rollingUpdateData.K8sClient); err != nil {
return fmt.Errorf("cluster %q did not pass validation: %v", cluster.Name, err)
}
return nil
}
// DeleteInstance deletes an Cloud Instance.
func (r *RollingUpdateInstanceGroup) DeleteInstance(u *cloudinstances.CloudInstanceGroupMember) error {
id := u.ID
nodeName := ""
if u.Node != nil {
nodeName = u.Node.Name
}
if nodeName != "" {
glog.Infof("Stopping instance %q, node %q, in group %q (this may take a while).", id, nodeName, r.CloudGroup.HumanName)
} else {
glog.Infof("Stopping instance %q, in group %q (this may take a while).", id, r.CloudGroup.HumanName)
}
if err := r.Cloud.DeleteInstance(u); err != nil {
if nodeName != "" {
return fmt.Errorf("error deleting instance %q, node %q: %v", id, nodeName, err)
} else {
return fmt.Errorf("error deleting instance %q: %v", id, err)
}
}
return nil
}
// DrainNode drains a K8s node.
func (r *RollingUpdateInstanceGroup) DrainNode(u *cloudinstances.CloudInstanceGroupMember, rollingUpdateData *RollingUpdateCluster) error {
if rollingUpdateData.ClientGetter == nil {
return fmt.Errorf("ClientGetter not set")
}
if u.Node.Name == "" {
return fmt.Errorf("node name not set")
}
f := cmdutil.NewFactory(rollingUpdateData.ClientGetter)
streams := genericclioptions.IOStreams{
Out: os.Stdout,
ErrOut: os.Stderr,
}
drain := cmd.NewCmdDrain(f, streams)
args := []string{u.Node.Name}
options := cmd.NewDrainOptions(f, streams)
// Override some options
options.IgnoreDaemonsets = true
options.Force = true
options.DeleteLocalData = true
options.GracePeriodSeconds = -1
err := options.Complete(f, drain, args)
if err != nil {
return fmt.Errorf("error setting up drain: %v", err)
}
err = options.RunCordonOrUncordon(true)
if err != nil {
return fmt.Errorf("error cordoning node node: %v", err)
}
err = options.RunDrain()
if err != nil {
return fmt.Errorf("error draining node: %v", err)
}
if rollingUpdateData.PostDrainDelay > 0 {
glog.Infof("Waiting for %s for pods to stabilize after draining.", rollingUpdateData.PostDrainDelay)
time.Sleep(rollingUpdateData.PostDrainDelay)
}
return nil
}
// DeleteNode deletes a node from the k8s API. It does not delete the underlying instance.
func (r *RollingUpdateInstanceGroup) deleteNode(node *corev1.Node, rollingUpdateData *RollingUpdateCluster) error {
k8sclient := rollingUpdateData.K8sClient
var options metav1.DeleteOptions
err := k8sclient.CoreV1().Nodes().Delete(node.Name, &options)
if err != nil {
if apierrors.IsNotFound(err) {
return nil
}
return fmt.Errorf("error deleting node: %v", err)
}
return nil
}
// Delete a CloudInstanceGroups
func (r *RollingUpdateInstanceGroup) Delete() error {
if r.CloudGroup == nil {
return fmt.Errorf("group has to be set")
}
// TODO: Leaving func in place in order to cordon nd drain nodes
return r.Cloud.DeleteGroup(r.CloudGroup)
}