-
Notifications
You must be signed in to change notification settings - Fork 39.6k
/
rollback.go
486 lines (423 loc) · 16.3 KB
/
rollback.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"bytes"
"fmt"
"sort"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes"
kapps "k8s.io/kubernetes/pkg/kubectl/apps"
"k8s.io/kubernetes/pkg/kubectl/scheme"
deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment"
)
const (
rollbackSuccess = "rolled back"
rollbackSkipped = "skipped rollback"
)
// Rollbacker provides an interface for resources that can be rolled back.
type Rollbacker interface {
Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error)
}
type RollbackVisitor struct {
clientset kubernetes.Interface
result Rollbacker
}
func (v *RollbackVisitor) VisitDeployment(elem kapps.GroupKindElement) {
v.result = &DeploymentRollbacker{v.clientset}
}
func (v *RollbackVisitor) VisitStatefulSet(kind kapps.GroupKindElement) {
v.result = &StatefulSetRollbacker{v.clientset}
}
func (v *RollbackVisitor) VisitDaemonSet(kind kapps.GroupKindElement) {
v.result = &DaemonSetRollbacker{v.clientset}
}
func (v *RollbackVisitor) VisitJob(kind kapps.GroupKindElement) {}
func (v *RollbackVisitor) VisitPod(kind kapps.GroupKindElement) {}
func (v *RollbackVisitor) VisitReplicaSet(kind kapps.GroupKindElement) {}
func (v *RollbackVisitor) VisitReplicationController(kind kapps.GroupKindElement) {}
func (v *RollbackVisitor) VisitCronJob(kind kapps.GroupKindElement) {}
// RollbackerFor returns an implementation of Rollbacker interface for the given schema kind
func RollbackerFor(kind schema.GroupKind, c kubernetes.Interface) (Rollbacker, error) {
elem := kapps.GroupKindElement(kind)
visitor := &RollbackVisitor{
clientset: c,
}
err := elem.Accept(visitor)
if err != nil {
return nil, fmt.Errorf("error retrieving rollbacker for %q, %v", kind.String(), err)
}
if visitor.result == nil {
return nil, fmt.Errorf("no rollbacker has been implemented for %q", kind)
}
return visitor.result, nil
}
type DeploymentRollbacker struct {
c kubernetes.Interface
}
func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) {
if toRevision < 0 {
return "", revisionNotFoundErr(toRevision)
}
accessor, err := meta.Accessor(obj)
if err != nil {
return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error())
}
name := accessor.GetName()
namespace := accessor.GetNamespace()
// TODO: Fix this after kubectl has been removed from core. It is not possible to convert the runtime.Object
// to the external appsv1 Deployment without round-tripping through an internal version of Deployment. We're
// currently getting rid of all internal versions of resources. So we specifically request the appsv1 version
// here. This follows the same pattern as for DaemonSet and StatefulSet.
deployment, err := r.c.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", fmt.Errorf("failed to retrieve Deployment %s: %v", name, err)
}
rsForRevision, err := deploymentRevision(deployment, r.c, toRevision)
if err != nil {
return "", err
}
if dryRun {
return printTemplate(&rsForRevision.Spec.Template)
}
if deployment.Spec.Paused {
return "", fmt.Errorf("you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume deployment/%s' and try again", name)
}
// Skip if the revision already matches current Deployment
if equalIgnoreHash(&rsForRevision.Spec.Template, &deployment.Spec.Template) {
return fmt.Sprintf("%s (current template already matches revision %d)", rollbackSkipped, toRevision), nil
}
// remove hash label before patching back into the deployment
delete(rsForRevision.Spec.Template.Labels, appsv1.DefaultDeploymentUniqueLabelKey)
// compute deployment annotations
annotations := map[string]string{}
for k := range annotationsToSkip {
if v, ok := deployment.Annotations[k]; ok {
annotations[k] = v
}
}
for k, v := range rsForRevision.Annotations {
if !annotationsToSkip[k] {
annotations[k] = v
}
}
// make patch to restore
patchType, patch, err := getDeploymentPatch(&rsForRevision.Spec.Template, annotations)
if err != nil {
return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err)
}
// Restore revision
if _, err = r.c.AppsV1().Deployments(namespace).Patch(name, patchType, patch); err != nil {
return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err)
}
return rollbackSuccess, nil
}
// equalIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash]
// We ignore pod-template-hash because:
// 1. The hash result would be different upon podTemplateSpec API changes
// (e.g. the addition of a new field will cause the hash code to change)
// 2. The deployment template won't have hash labels
func equalIgnoreHash(template1, template2 *corev1.PodTemplateSpec) bool {
t1Copy := template1.DeepCopy()
t2Copy := template2.DeepCopy()
// Remove hash labels from template.Labels before comparing
delete(t1Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey)
delete(t2Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey)
return apiequality.Semantic.DeepEqual(t1Copy, t2Copy)
}
// annotationsToSkip lists the annotations that should be preserved from the deployment and not
// copied from the replicaset when rolling a deployment back
var annotationsToSkip = map[string]bool{
corev1.LastAppliedConfigAnnotation: true,
deploymentutil.RevisionAnnotation: true,
deploymentutil.RevisionHistoryAnnotation: true,
deploymentutil.DesiredReplicasAnnotation: true,
deploymentutil.MaxReplicasAnnotation: true,
appsv1.DeprecatedRollbackTo: true,
}
// getPatch returns a patch that can be applied to restore a Deployment to a
// previous version. If the returned error is nil the patch is valid.
func getDeploymentPatch(podTemplate *corev1.PodTemplateSpec, annotations map[string]string) (types.PatchType, []byte, error) {
// Create a patch of the Deployment that replaces spec.template
patch, err := json.Marshal([]interface{}{
map[string]interface{}{
"op": "replace",
"path": "/spec/template",
"value": podTemplate,
},
map[string]interface{}{
"op": "replace",
"path": "/metadata/annotations",
"value": annotations,
},
})
return types.JSONPatchType, patch, err
}
func deploymentRevision(deployment *appsv1.Deployment, c kubernetes.Interface, toRevision int64) (revision *appsv1.ReplicaSet, err error) {
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1())
if err != nil {
return nil, fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", deployment.Name, err)
}
allRSs := allOldRSs
if newRS != nil {
allRSs = append(allRSs, newRS)
}
var (
latestReplicaSet *appsv1.ReplicaSet
latestRevision = int64(-1)
previousReplicaSet *appsv1.ReplicaSet
previousRevision = int64(-1)
)
for _, rs := range allRSs {
if v, err := deploymentutil.Revision(rs); err == nil {
if toRevision == 0 {
if latestRevision < v {
// newest one we've seen so far
previousRevision = latestRevision
previousReplicaSet = latestReplicaSet
latestRevision = v
latestReplicaSet = rs
} else if previousRevision < v {
// second newest one we've seen so far
previousRevision = v
previousReplicaSet = rs
}
} else if toRevision == v {
return rs, nil
}
}
}
if toRevision > 0 {
return nil, revisionNotFoundErr(toRevision)
}
if previousReplicaSet == nil {
return nil, fmt.Errorf("no rollout history found for deployment %q", deployment.Name)
}
return previousReplicaSet, nil
}
type DaemonSetRollbacker struct {
c kubernetes.Interface
}
func (r *DaemonSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) {
if toRevision < 0 {
return "", revisionNotFoundErr(toRevision)
}
accessor, err := meta.Accessor(obj)
if err != nil {
return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error())
}
ds, history, err := daemonSetHistory(r.c.AppsV1(), accessor.GetNamespace(), accessor.GetName())
if err != nil {
return "", err
}
if toRevision == 0 && len(history) <= 1 {
return "", fmt.Errorf("no last revision to roll back to")
}
toHistory := findHistory(toRevision, history)
if toHistory == nil {
return "", revisionNotFoundErr(toRevision)
}
if dryRun {
appliedDS, err := applyDaemonSetHistory(ds, toHistory)
if err != nil {
return "", err
}
return printPodTemplate(&appliedDS.Spec.Template)
}
// Skip if the revision already matches current DaemonSet
done, err := daemonSetMatch(ds, toHistory)
if err != nil {
return "", err
}
if done {
return fmt.Sprintf("%s (current template already matches revision %d)", rollbackSkipped, toRevision), nil
}
// Restore revision
if _, err = r.c.AppsV1().DaemonSets(accessor.GetNamespace()).Patch(accessor.GetName(), types.StrategicMergePatchType, toHistory.Data.Raw); err != nil {
return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err)
}
return rollbackSuccess, nil
}
// daemonMatch check if the given DaemonSet's template matches the template stored in the given history.
func daemonSetMatch(ds *appsv1.DaemonSet, history *appsv1.ControllerRevision) (bool, error) {
patch, err := getDaemonSetPatch(ds)
if err != nil {
return false, err
}
return bytes.Equal(patch, history.Data.Raw), nil
}
// getPatch returns a strategic merge patch that can be applied to restore a Daemonset to a
// previous version. If the returned error is nil the patch is valid. The current state that we save is just the
// PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously
// recorded patches.
func getDaemonSetPatch(ds *appsv1.DaemonSet) ([]byte, error) {
dsBytes, err := json.Marshal(ds)
if err != nil {
return nil, err
}
var raw map[string]interface{}
err = json.Unmarshal(dsBytes, &raw)
if err != nil {
return nil, err
}
objCopy := make(map[string]interface{})
specCopy := make(map[string]interface{})
// Create a patch of the DaemonSet that replaces spec.template
spec := raw["spec"].(map[string]interface{})
template := spec["template"].(map[string]interface{})
specCopy["template"] = template
template["$patch"] = "replace"
objCopy["spec"] = specCopy
patch, err := json.Marshal(objCopy)
return patch, err
}
type StatefulSetRollbacker struct {
c kubernetes.Interface
}
// toRevision is a non-negative integer, with 0 being reserved to indicate rolling back to previous configuration
func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) {
if toRevision < 0 {
return "", revisionNotFoundErr(toRevision)
}
accessor, err := meta.Accessor(obj)
if err != nil {
return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error())
}
sts, history, err := statefulSetHistory(r.c.AppsV1(), accessor.GetNamespace(), accessor.GetName())
if err != nil {
return "", err
}
if toRevision == 0 && len(history) <= 1 {
return "", fmt.Errorf("no last revision to roll back to")
}
toHistory := findHistory(toRevision, history)
if toHistory == nil {
return "", revisionNotFoundErr(toRevision)
}
if dryRun {
appliedSS, err := applyRevision(sts, toHistory)
if err != nil {
return "", err
}
return printPodTemplate(&appliedSS.Spec.Template)
}
// Skip if the revision already matches current StatefulSet
done, err := statefulsetMatch(sts, toHistory)
if err != nil {
return "", err
}
if done {
return fmt.Sprintf("%s (current template already matches revision %d)", rollbackSkipped, toRevision), nil
}
// Restore revision
if _, err = r.c.AppsV1().StatefulSets(sts.Namespace).Patch(sts.Name, types.StrategicMergePatchType, toHistory.Data.Raw); err != nil {
return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err)
}
return rollbackSuccess, nil
}
var appsCodec = scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion)
// applyRevision returns a new StatefulSet constructed by restoring the state in revision to set. If the returned error
// is nil, the returned StatefulSet is valid.
func applyRevision(set *appsv1.StatefulSet, revision *appsv1.ControllerRevision) (*appsv1.StatefulSet, error) {
clone := set.DeepCopy()
patched, err := strategicpatch.StrategicMergePatch([]byte(runtime.EncodeOrDie(appsCodec, clone)), revision.Data.Raw, clone)
if err != nil {
return nil, err
}
err = json.Unmarshal(patched, clone)
if err != nil {
return nil, err
}
return clone, nil
}
// statefulsetMatch check if the given StatefulSet's template matches the template stored in the given history.
func statefulsetMatch(ss *appsv1.StatefulSet, history *appsv1.ControllerRevision) (bool, error) {
patch, err := getStatefulSetPatch(ss)
if err != nil {
return false, err
}
return bytes.Equal(patch, history.Data.Raw), nil
}
// getStatefulSetPatch returns a strategic merge patch that can be applied to restore a StatefulSet to a
// previous version. If the returned error is nil the patch is valid. The current state that we save is just the
// PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously
// recorded patches.
func getStatefulSetPatch(set *appsv1.StatefulSet) ([]byte, error) {
str, err := runtime.Encode(appsCodec, set)
if err != nil {
return nil, err
}
var raw map[string]interface{}
if err := json.Unmarshal([]byte(str), &raw); err != nil {
return nil, err
}
objCopy := make(map[string]interface{})
specCopy := make(map[string]interface{})
spec := raw["spec"].(map[string]interface{})
template := spec["template"].(map[string]interface{})
specCopy["template"] = template
template["$patch"] = "replace"
objCopy["spec"] = specCopy
patch, err := json.Marshal(objCopy)
return patch, err
}
// findHistory returns a controllerrevision of a specific revision from the given controllerrevisions.
// It returns nil if no such controllerrevision exists.
// If toRevision is 0, the last previously used history is returned.
func findHistory(toRevision int64, allHistory []*appsv1.ControllerRevision) *appsv1.ControllerRevision {
if toRevision == 0 && len(allHistory) <= 1 {
return nil
}
// Find the history to rollback to
var toHistory *appsv1.ControllerRevision
if toRevision == 0 {
// If toRevision == 0, find the latest revision (2nd max)
sort.Sort(historiesByRevision(allHistory))
toHistory = allHistory[len(allHistory)-2]
} else {
for _, h := range allHistory {
if h.Revision == toRevision {
// If toRevision != 0, find the history with matching revision
return h
}
}
}
return toHistory
}
// printPodTemplate converts a given pod template into a human-readable string.
func printPodTemplate(specTemplate *corev1.PodTemplateSpec) (string, error) {
podSpec, err := printTemplate(specTemplate)
if err != nil {
return "", err
}
return fmt.Sprintf("will roll back to %s", podSpec), nil
}
func revisionNotFoundErr(r int64) error {
return fmt.Errorf("unable to find specified revision %v in history", r)
}
// TODO: copied from daemon controller, should extract to a library
type historiesByRevision []*appsv1.ControllerRevision
func (h historiesByRevision) Len() int { return len(h) }
func (h historiesByRevision) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h historiesByRevision) Less(i, j int) bool {
return h[i].Revision < h[j].Revision
}