forked from kubernetes-retired/contrib
-
Notifications
You must be signed in to change notification settings - Fork 0
/
drain.go
192 lines (171 loc) · 6.12 KB
/
drain.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package drain
import (
"fmt"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/runtime"
)
// GetPodsForDeletionOnNodeDrain returns pods that should be deleted on node drain as well as some extra information
// about possibly problematic pods (unreplicated and deamon sets).
func GetPodsForDeletionOnNodeDrain(
podList []*api.Pod,
decoder runtime.Decoder,
skipNodesWithSystemPods bool,
skipNodesWithLocalStorage bool,
checkReferences bool, // Setting this to true requires client to be not-null.
client *client.Client,
minReplica int32) ([]*api.Pod, error) {
pods := []*api.Pod{}
for _, pod := range podList {
if IsMirrorPod(pod) {
continue
}
daemonsetPod := false
replicated := false
sr, err := CreatorRef(pod)
if err != nil {
return []*api.Pod{}, fmt.Errorf("failed to obtain refkind: %v", err)
}
refKind := ""
if sr != nil {
refKind = sr.Reference.Kind
}
if refKind == "ReplicationController" {
if checkReferences {
rc, err := client.ReplicationControllers(sr.Reference.Namespace).Get(sr.Reference.Name)
// Assume a reason for an error is because the RC is either
// gone/missing or that the rc has too few replicas configured.
// TODO: replace the minReplica check with pod disruption budget.
if err == nil && rc != nil {
if rc.Spec.Replicas < minReplica {
return []*api.Pod{}, fmt.Errorf("replication controller for %s/%s has too few replicas spec: %d min: %d",
pod.Namespace, pod.Name, rc.Spec.Replicas, minReplica)
}
replicated = true
} else {
return []*api.Pod{}, fmt.Errorf("replication controller for %s/%s is not available, err: %v", pod.Namespace, pod.Name, err)
}
} else {
replicated = true
}
} else if refKind == "DaemonSet" {
if checkReferences {
ds, err := client.DaemonSets(sr.Reference.Namespace).Get(sr.Reference.Name)
// Assume the only reason for an error is because the DaemonSet is
// gone/missing, not for any other cause. TODO(mml): something more
// sophisticated than this
if err == nil && ds != nil {
// Otherwise, treat daemonset-managed pods as unmanaged since
// DaemonSet Controller currently ignores the unschedulable bit.
// FIXME(mml): Add link to the issue concerning a proper way to drain
// daemonset pods, probably using taints.
daemonsetPod = true
} else {
return []*api.Pod{}, fmt.Errorf("deamonset for %s/%s is not present, err: %v", pod.Namespace, pod.Name, err)
}
} else {
daemonsetPod = true
}
} else if refKind == "Job" {
if checkReferences {
job, err := client.ExtensionsClient.Jobs(sr.Reference.Namespace).Get(sr.Reference.Name)
// Assume the only reason for an error is because the Job is
// gone/missing, not for any other cause. TODO(mml): something more
// sophisticated than this
if err == nil && job != nil {
replicated = true
} else {
return []*api.Pod{}, fmt.Errorf("job for %s/%s is not available: err: %v", pod.Namespace, pod.Name, err)
}
} else {
replicated = true
}
} else if refKind == "ReplicaSet" {
if checkReferences {
rs, err := client.ExtensionsClient.ReplicaSets(sr.Reference.Namespace).Get(sr.Reference.Name)
// Assume the only reason for an error is because the RS is
// gone/missing, not for any other cause. TODO(mml): something more
// sophisticated than this
if err == nil && rs != nil {
if rs.Spec.Replicas < minReplica {
return []*api.Pod{}, fmt.Errorf("replication controller for %s/%s has too few replicas spec: %d min: %d",
pod.Namespace, pod.Name, rs.Spec.Replicas, minReplica)
}
replicated = true
} else {
return []*api.Pod{}, fmt.Errorf("replication controller for %s/%s is not available, err: %v", pod.Namespace, pod.Name, err)
}
} else {
replicated = true
}
}
if daemonsetPod {
continue
}
if !replicated {
return []*api.Pod{}, fmt.Errorf("%s/%s is not replicated", pod.Namespace, pod.Name)
}
if pod.Namespace == "kube-system" && skipNodesWithSystemPods {
return []*api.Pod{}, fmt.Errorf("non-deamons set, non-mirrored, kube-system pod present: %s", pod.Name)
}
if HasLocalStorage(pod) && skipNodesWithLocalStorage {
return []*api.Pod{}, fmt.Errorf("pod with local storage present: %s", pod.Name)
}
pods = append(pods, pod)
}
return pods, nil
}
// CreatorRefKind returns the kind of the creator of the pod.
func CreatorRefKind(pod *api.Pod) (string, error) {
sr, err := CreatorRef(pod)
if err != nil {
return "", err
}
if sr == nil {
return "", nil
}
return sr.Reference.Kind, nil
}
// CreatorRef returns the kind of the creator reference of the pod.
func CreatorRef(pod *api.Pod) (*api.SerializedReference, error) {
creatorRef, found := pod.ObjectMeta.Annotations[controller.CreatedByAnnotation]
if !found {
return nil, nil
}
var sr api.SerializedReference
if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), []byte(creatorRef), &sr); err != nil {
return nil, err
}
return &sr, nil
}
// IsMirrorPod checks whether the pod is a mirror pod.
func IsMirrorPod(pod *api.Pod) bool {
_, found := pod.ObjectMeta.Annotations[types.ConfigMirrorAnnotationKey]
return found
}
// HasLocalStorage returns true if pod has any local storage.
func HasLocalStorage(pod *api.Pod) bool {
for _, volume := range pod.Spec.Volumes {
if isLocalVolume(&volume) {
return true
}
}
return false
}
func isLocalVolume(volume *api.Volume) bool {
return volume.HostPath != nil || volume.EmptyDir != nil
}