This repository has been archived by the owner on Jul 18, 2019. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 2
/
controller.go
375 lines (336 loc) · 17.8 KB
/
controller.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
package deployment
import (
"fmt"
"github.com/golang/glog"
kapi "k8s.io/kubernetes/pkg/api"
kerrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/client/record"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
deployapi "github.com/openshift/origin/pkg/deploy/api"
deployutil "github.com/openshift/origin/pkg/deploy/util"
"github.com/openshift/origin/pkg/util"
)
// DeploymentController starts a deployment by creating a deployer pod which
// implements a deployment strategy. The status of the deployment will follow
// the status of the deployer pod. The deployer pod is correlated to the
// deployment with annotations.
//
// When the deployment enters a terminal status:
//
// 1. If the deployment finished normally, the deployer pod is deleted.
// 2. If the deployment failed, the deployer pod is not deleted.
//
// Use the DeploymentControllerFactory to create this controller.
type DeploymentController struct {
// serviceAccount to create deployment pods with
serviceAccount string
// deploymentClient provides access to deployments.
deploymentClient deploymentClient
// podClient provides access to pods.
podClient podClient
// makeContainer knows how to make a container appropriate to execute a deployment strategy.
makeContainer func(strategy *deployapi.DeploymentStrategy) (*kapi.Container, error)
// decodeConfig knows how to decode the deploymentConfig from a deployment's annotations.
decodeConfig func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error)
// recorder is used to record events.
recorder record.EventRecorder
}
// fatalError is an error which can't be retried.
type fatalError string
func (e fatalError) Error() string { return "fatal error handling deployment: " + string(e) }
// Handle processes deployment and either creates a deployer pod or responds
// to a terminal deployment status.
func (c *DeploymentController) Handle(deployment *kapi.ReplicationController) error {
currentStatus := deployutil.DeploymentStatusFor(deployment)
nextStatus := currentStatus
deploymentScaled := false
switch currentStatus {
case deployapi.DeploymentStatusNew:
// If the deployment has been cancelled, don't create a deployer pod.
// Transition the deployment to Pending so that re-syncs will check
// up on the deployer pods and so that the deployment config controller
// continues to see the deployment as in-flight (which it is until we
// have deployer pod outcomes).
if deployutil.IsDeploymentCancelled(deployment) {
nextStatus = deployapi.DeploymentStatusPending
if err := c.cancelDeployerPods(deployment); err != nil {
return err
}
break
}
// Generate a deployer pod spec.
podTemplate, err := c.makeDeployerPod(deployment)
if err != nil {
return fatalError(fmt.Sprintf("couldn't make deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err))
}
// Create the deployer pod.
deploymentPod, err := c.podClient.createPod(deployment.Namespace, podTemplate)
if err == nil {
deployment.Annotations[deployapi.DeploymentPodAnnotation] = deploymentPod.Name
nextStatus = deployapi.DeploymentStatusPending
glog.V(4).Infof("Created pod %s for deployment %s", deploymentPod.Name, deployutil.LabelForDeployment(deployment))
break
}
// Retry on error.
if !kerrors.IsAlreadyExists(err) {
if config, decodeErr := c.decodeConfig(deployment); decodeErr == nil {
c.recorder.Eventf(config, kapi.EventTypeWarning, "FailedCreate", "Error creating deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
} else {
c.recorder.Eventf(deployment, kapi.EventTypeWarning, "FailedCreate", "Error creating deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
}
return fmt.Errorf("couldn't create deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
}
// If the pod already exists, it's possible that a previous CreatePod
// succeeded but the deployment state update failed and now we're re-
// entering. Ensure that the pod is the one we created by verifying the
// annotation on it, and throw a retryable error.
existingPod, err := c.podClient.getPod(deployment.Namespace, deployutil.DeployerPodNameForDeployment(deployment.Name))
if err != nil {
if config, decodeErr := c.decodeConfig(deployment); decodeErr == nil {
c.recorder.Eventf(config, kapi.EventTypeWarning, "FailedCreate", "Error getting existing deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
} else {
c.recorder.Eventf(deployment, kapi.EventTypeWarning, "FailedCreate", "Error getting existing deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
}
return fmt.Errorf("couldn't fetch existing deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
}
// Do a stronger check to validate that the existing deployer pod is
// actually for this deployment, and if not, fail this deployment.
//
// TODO: Investigate checking the container image of the running pod and
// comparing with the intended deployer pod image. If we do so, we'll need
// to ensure that changes to 'unrelated' pods don't result in updates to
// the deployment. So, the image check will have to be done in other areas
// of the code as well.
if deployutil.DeploymentNameFor(existingPod) != deployment.Name {
nextStatus = deployapi.DeploymentStatusFailed
deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentFailedUnrelatedDeploymentExists
if config, decodeErr := c.decodeConfig(deployment); decodeErr == nil {
c.recorder.Eventf(config, kapi.EventTypeWarning, "FailedCreate", "Error creating deployer pod for %s since another pod with the same name (%q) exists", deployutil.LabelForDeployment(deployment), existingPod.Name)
} else {
c.recorder.Eventf(deployment, kapi.EventTypeWarning, "FailedCreate", "Error creating deployer pod for %s since another pod with the same name (%q) exists", deployutil.LabelForDeployment(deployment), existingPod.Name)
}
glog.V(2).Infof("Couldn't create deployer pod for %s since an unrelated pod with the same name (%q) exists", deployutil.LabelForDeployment(deployment), existingPod.Name)
break
}
// Update to pending relative to the existing validated deployer pod.
deployment.Annotations[deployapi.DeploymentPodAnnotation] = existingPod.Name
nextStatus = deployapi.DeploymentStatusPending
glog.V(4).Infof("Detected existing deployer pod %s for deployment %s", existingPod.Name, deployutil.LabelForDeployment(deployment))
case deployapi.DeploymentStatusPending, deployapi.DeploymentStatusRunning:
// If the deployer pod has vanished, consider the deployment a failure.
deployerPodName := deployutil.DeployerPodNameForDeployment(deployment.Name)
if _, err := c.podClient.getPod(deployment.Namespace, deployerPodName); err != nil {
if kerrors.IsNotFound(err) {
nextStatus = deployapi.DeploymentStatusFailed
deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(nextStatus)
deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentFailedDeployerPodNoLongerExists
if config, decodeErr := c.decodeConfig(deployment); decodeErr == nil {
c.recorder.Eventf(config, kapi.EventTypeWarning, "Failed", "Deployer pod %q has gone missing", deployerPodName)
} else {
c.recorder.Eventf(deployment, kapi.EventTypeWarning, "Failed", "Deployer pod %q has gone missing", deployerPodName)
}
glog.V(4).Infof("Failing deployment %q because its deployer pod %q disappeared", deployutil.LabelForDeployment(deployment), deployerPodName)
break
} else {
// We'll try again later on resync. Continue to process cancellations.
glog.V(2).Infof("Error getting deployer pod %s for deployment %s: %#v", deployerPodName, deployutil.LabelForDeployment(deployment), err)
}
}
// If the deployment is cancelled, terminate any deployer/hook pods.
// NOTE: Do not mark the deployment as Failed just yet.
// The deployment will be marked as Failed by the deployer pod controller
// when the deployer pod failure state is picked up.
// Then, the deployment config controller will scale down the failed deployment
// and scale back up the last successful completed deployment.
if deployutil.IsDeploymentCancelled(deployment) {
if err := c.cancelDeployerPods(deployment); err != nil {
return err
}
}
case deployapi.DeploymentStatusFailed:
// Check for test deployment and ensure the deployment scale matches
if config, err := c.decodeConfig(deployment); err == nil && config.Spec.Test {
deploymentScaled = deployment.Spec.Replicas != 0
deployment.Spec.Replicas = 0
}
case deployapi.DeploymentStatusComplete:
// Check for test deployment and ensure the deployment scale matches
if config, err := c.decodeConfig(deployment); err == nil && config.Spec.Test {
deploymentScaled = deployment.Spec.Replicas != 0
deployment.Spec.Replicas = 0
}
// now list any pods in the namespace that have the specified label
deployerPods, err := c.podClient.getDeployerPodsFor(deployment.Namespace, deployment.Name)
if err != nil {
return fmt.Errorf("couldn't fetch deployer pods for %s after successful completion: %v", deployutil.LabelForDeployment(deployment), err)
}
if len(deployerPods) > 0 {
glog.V(4).Infof("Deleting %d deployer pods for deployment %s", len(deployerPods), deployutil.LabelForDeployment(deployment))
}
cleanedAll := true
for _, deployerPod := range deployerPods {
if err := c.podClient.deletePod(deployerPod.Namespace, deployerPod.Name); err != nil {
if !kerrors.IsNotFound(err) {
// if the pod deletion failed, then log the error and continue
// we will try to delete any remaining deployer pods and return an error later
utilruntime.HandleError(fmt.Errorf("couldn't delete completed deployer pod %s/%s for deployment %s: %v", deployment.Namespace, deployerPod.Name, deployutil.LabelForDeployment(deployment), err))
cleanedAll = false
}
// Already deleted
} else {
glog.V(4).Infof("Deleted completed deployer pod %s/%s for deployment %s", deployment.Namespace, deployerPod.Name, deployutil.LabelForDeployment(deployment))
}
}
if !cleanedAll {
return fmt.Errorf("couldn't clean up all deployer pods for %s", deployutil.LabelForDeployment(deployment))
}
}
if deployutil.CanTransitionPhase(currentStatus, nextStatus) || deploymentScaled {
deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(nextStatus)
if _, err := c.deploymentClient.updateDeployment(deployment.Namespace, deployment); err != nil {
if config, decodeErr := c.decodeConfig(deployment); decodeErr == nil {
c.recorder.Eventf(config, kapi.EventTypeWarning, "FailedUpdate", "Cannot update deployment %s status to %s: %v", deployutil.LabelForDeployment(deployment), nextStatus, err)
} else {
c.recorder.Eventf(deployment, kapi.EventTypeWarning, "FailedUpdate", "Cannot update deployment %s status to %s: %v", deployutil.LabelForDeployment(deployment), nextStatus, err)
}
return fmt.Errorf("couldn't update deployment %s to status %s: %v", deployutil.LabelForDeployment(deployment), nextStatus, err)
}
glog.V(4).Infof("Updated deployment %s status from %s to %s (scale: %d)", deployutil.LabelForDeployment(deployment), currentStatus, nextStatus, deployment.Spec.Replicas)
}
return nil
}
// makeDeployerPod creates a pod which implements deployment behavior. The pod is correlated to
// the deployment with an annotation.
func (c *DeploymentController) makeDeployerPod(deployment *kapi.ReplicationController) (*kapi.Pod, error) {
deploymentConfig, err := c.decodeConfig(deployment)
if err != nil {
return nil, err
}
container, err := c.makeContainer(&deploymentConfig.Spec.Strategy)
if err != nil {
return nil, err
}
// Add deployment environment variables to the container.
envVars := []kapi.EnvVar{}
for _, env := range container.Env {
envVars = append(envVars, env)
}
envVars = append(envVars, kapi.EnvVar{Name: "OPENSHIFT_DEPLOYMENT_NAME", Value: deployment.Name})
envVars = append(envVars, kapi.EnvVar{Name: "OPENSHIFT_DEPLOYMENT_NAMESPACE", Value: deployment.Namespace})
// Assigning to a variable since its address is required
maxDeploymentDurationSeconds := deployapi.MaxDeploymentDurationSeconds
pod := &kapi.Pod{
ObjectMeta: kapi.ObjectMeta{
Name: deployutil.DeployerPodNameForDeployment(deployment.Name),
Annotations: map[string]string{
deployapi.DeploymentAnnotation: deployment.Name,
},
Labels: map[string]string{
deployapi.DeployerPodForDeploymentLabel: deployment.Name,
},
},
Spec: kapi.PodSpec{
Containers: []kapi.Container{
{
Name: "deployment",
Command: container.Command,
Args: container.Args,
Image: container.Image,
Env: envVars,
Resources: deploymentConfig.Spec.Strategy.Resources,
},
},
ActiveDeadlineSeconds: &maxDeploymentDurationSeconds,
// Setting the node selector on the deployer pod so that it is created
// on the same set of nodes as the pods.
NodeSelector: deployment.Spec.Template.Spec.NodeSelector,
RestartPolicy: kapi.RestartPolicyNever,
ServiceAccountName: c.serviceAccount,
},
}
// MergeInfo will not overwrite values unless the flag OverwriteExistingDstKey is set.
util.MergeInto(pod.Labels, deploymentConfig.Spec.Strategy.Labels, 0)
util.MergeInto(pod.Annotations, deploymentConfig.Spec.Strategy.Annotations, 0)
pod.Spec.Containers[0].ImagePullPolicy = kapi.PullIfNotPresent
return pod, nil
}
func (c *DeploymentController) cancelDeployerPods(deployment *kapi.ReplicationController) error {
deployerPods, err := c.podClient.getDeployerPodsFor(deployment.Namespace, deployment.Name)
if err != nil {
return fmt.Errorf("couldn't fetch deployer pods for %s while trying to cancel deployment: %v", deployutil.LabelForDeployment(deployment), err)
}
glog.V(4).Infof("Cancelling %d deployer pods for deployment %s", len(deployerPods), deployutil.LabelForDeployment(deployment))
zeroDelay := int64(1)
anyCancelled := false
for _, deployerPod := range deployerPods {
// Set the ActiveDeadlineSeconds on the pod so it's terminated very soon.
if deployerPod.Spec.ActiveDeadlineSeconds == nil || *deployerPod.Spec.ActiveDeadlineSeconds != zeroDelay {
deployerPod.Spec.ActiveDeadlineSeconds = &zeroDelay
if _, err := c.podClient.updatePod(deployerPod.Namespace, &deployerPod); err != nil {
if config, decodeErr := c.decodeConfig(deployment); decodeErr == nil {
c.recorder.Eventf(config, kapi.EventTypeWarning, "FailedCancellation", "Error cancelling deployer pod %s for deployment %s: %v", deployerPod.Name, deployutil.LabelForDeployment(deployment), err)
} else {
c.recorder.Eventf(deployment, kapi.EventTypeWarning, "FailedCancellation", "Error cancelling deployer pod %s for deployment %s: %v", deployerPod.Name, deployutil.LabelForDeployment(deployment), err)
}
return fmt.Errorf("couldn't cancel deployer pod %s for deployment %s: %v", deployerPod.Name, deployutil.LabelForDeployment(deployment), err)
}
anyCancelled = true
glog.V(4).Infof("Cancelled deployer pod %s for deployment %s", deployerPod.Name, deployutil.LabelForDeployment(deployment))
}
}
if anyCancelled {
if config, decodeErr := c.decodeConfig(deployment); decodeErr == nil && len(deployerPods) > 0 {
c.recorder.Eventf(config, kapi.EventTypeNormal, "Cancelled", "Cancelled deployer pods for deployment %s", deployutil.LabelForDeployment(deployment))
} else if len(deployerPods) > 0 {
c.recorder.Eventf(deployment, kapi.EventTypeNormal, "Cancelled", "Cancelled deployer pods")
}
}
return nil
}
// deploymentClient abstracts access to deployments.
type deploymentClient interface {
getDeployment(namespace, name string) (*kapi.ReplicationController, error)
updateDeployment(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error)
}
// podClient abstracts access to pods.
type podClient interface {
getPod(namespace, name string) (*kapi.Pod, error)
createPod(namespace string, pod *kapi.Pod) (*kapi.Pod, error)
deletePod(namespace, name string) error
updatePod(namespace string, pod *kapi.Pod) (*kapi.Pod, error)
getDeployerPodsFor(namespace, name string) ([]kapi.Pod, error)
}
// deploymentClientImpl is a pluggable deploymentClient.
type deploymentClientImpl struct {
getDeploymentFunc func(namespace, name string) (*kapi.ReplicationController, error)
updateDeploymentFunc func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error)
}
func (i *deploymentClientImpl) getDeployment(namespace, name string) (*kapi.ReplicationController, error) {
return i.getDeploymentFunc(namespace, name)
}
func (i *deploymentClientImpl) updateDeployment(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {
return i.updateDeploymentFunc(namespace, deployment)
}
// podClientImpl is a pluggable podClient.
type podClientImpl struct {
getPodFunc func(namespace, name string) (*kapi.Pod, error)
createPodFunc func(namespace string, pod *kapi.Pod) (*kapi.Pod, error)
deletePodFunc func(namespace, name string) error
updatePodFunc func(namespace string, pod *kapi.Pod) (*kapi.Pod, error)
getDeployerPodsForFunc func(namespace, name string) ([]kapi.Pod, error)
}
func (i *podClientImpl) getPod(namespace, name string) (*kapi.Pod, error) {
return i.getPodFunc(namespace, name)
}
func (i *podClientImpl) createPod(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {
return i.createPodFunc(namespace, pod)
}
func (i *podClientImpl) deletePod(namespace, name string) error {
return i.deletePodFunc(namespace, name)
}
func (i *podClientImpl) updatePod(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {
return i.updatePodFunc(namespace, pod)
}
func (i *podClientImpl) getDeployerPodsFor(namespace, name string) ([]kapi.Pod, error) {
return i.getDeployerPodsForFunc(namespace, name)
}