forked from openshift/origin
/
deploy.go
344 lines (300 loc) · 11.6 KB
/
deploy.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
package cmd
import (
"errors"
"fmt"
"io"
"strconv"
"strings"
"github.com/spf13/cobra"
kapi "k8s.io/kubernetes/pkg/api"
kerrors "k8s.io/kubernetes/pkg/api/errors"
kclient "k8s.io/kubernetes/pkg/client"
"k8s.io/kubernetes/pkg/fields"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource"
"github.com/openshift/origin/pkg/client"
"github.com/openshift/origin/pkg/cmd/cli/describe"
"github.com/openshift/origin/pkg/cmd/util/clientcmd"
deployapi "github.com/openshift/origin/pkg/deploy/api"
deployutil "github.com/openshift/origin/pkg/deploy/util"
)
type DeployOptions struct {
out io.Writer
osClient client.Interface
kubeClient kclient.Interface
builder *resource.Builder
namespace string
baseCommandName string
deploymentConfigName string
deployLatest bool
retryDeploy bool
cancelDeploy bool
enableTriggers bool
}
const (
deployLong = `
View, start, cancel, or retry a deployment
This command allows you to control a deployment config. Each individual deployment is exposed
as a new replication controller, and the deployment process manages scaling down old deployments
and scaling up new ones. You can rollback to any previous deployment, or even scale multiple
deployments up at the same time.
There are several deployment strategies defined:
* Rolling (default) - scales up the new deployment in stages, gradually reducing the number
of old deployments. If one of the new deployed pods never becomes "ready", the new deployment
will be rolled back (scaled down to zero). Use when your application can tolerate two versions
of code running at the same time (many web applications, scalable databases)
* Recreate - scales the old deployment down to zero, then scales the new deployment up to full.
Use when your application cannot tolerate two versions of code running at the same time
* Custom - run your own deployment process inside a Docker container using your own scripts.
If a deployment fails, you may opt to retry it (if the error was transient). Some deployments may
never successfully complete - in which case you can use the '--latest' flag to force a redeployment.
When rolling back to a previous deployment, a new deployment will be created with an identical copy
of your config at the latest position.
If no options are given, shows information about the latest deployment.`
deployExample = ` // Display the latest deployment for the 'database' deployment config
$ %[1]s deploy database
// Start a new deployment based on the 'database'
$ %[1]s deploy database --latest
// Retry the latest failed deployment based on 'frontend'
// The deployer pod and any hook pods are deleted for the latest failed deployment
$ %[1]s deploy frontend --retry
// Cancel the in-progress deployment based on 'frontend'
$ %[1]s deploy frontend --cancel`
)
// NewCmdDeploy creates a new `deploy` command.
func NewCmdDeploy(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
options := &DeployOptions{
baseCommandName: fullName,
}
cmd := &cobra.Command{
Use: "deploy DEPLOYMENTCONFIG",
Short: "View, start, cancel, or retry a deployment",
Long: deployLong,
Example: fmt.Sprintf(deployExample, fullName),
Run: func(cmd *cobra.Command, args []string) {
if err := options.Complete(f, args, out); err != nil {
cmdutil.CheckErr(err)
}
if err := options.Validate(args); err != nil {
cmdutil.CheckErr(cmdutil.UsageError(cmd, err.Error()))
}
if err := options.RunDeploy(); err != nil {
cmdutil.CheckErr(err)
}
},
}
cmd.Flags().BoolVar(&options.deployLatest, "latest", false, "Start a new deployment now.")
cmd.Flags().BoolVar(&options.retryDeploy, "retry", false, "Retry the latest failed deployment.")
cmd.Flags().BoolVar(&options.cancelDeploy, "cancel", false, "Cancel the in-progress deployment.")
cmd.Flags().BoolVar(&options.enableTriggers, "enable-triggers", false, "Enables all image triggers for the deployment config.")
return cmd
}
func (o *DeployOptions) Complete(f *clientcmd.Factory, args []string, out io.Writer) error {
var err error
o.osClient, o.kubeClient, err = f.Clients()
if err != nil {
return err
}
o.namespace, _, err = f.DefaultNamespace()
if err != nil {
return err
}
mapper, typer := f.Object()
o.builder = resource.NewBuilder(mapper, typer, f.ClientMapperForCommand())
o.out = out
if len(args) > 0 {
name := args[0]
if strings.Index(name, "/") == -1 {
name = fmt.Sprintf("dc/%s", name)
}
o.deploymentConfigName = name
}
return nil
}
func (o *DeployOptions) Validate(args []string) error {
if len(args) == 0 || len(args[0]) == 0 {
return errors.New("a DeploymentConfig name is required.")
}
if len(args) > 1 {
return errors.New("only one DeploymentConfig name is supported as argument.")
}
numOptions := 0
if o.deployLatest {
numOptions++
}
if o.retryDeploy {
numOptions++
}
if o.cancelDeploy {
numOptions++
}
if o.enableTriggers {
numOptions++
}
if numOptions > 1 {
return errors.New("only one of --latest, --retry, --cancel, or --enable-triggers is allowed.")
}
return nil
}
func (o *DeployOptions) RunDeploy() error {
r := o.builder.
NamespaceParam(o.namespace).
ResourceTypeOrNameArgs(false, o.deploymentConfigName).
SingleResourceType().
Do()
resultObj, err := r.Object()
if err != nil {
return err
}
config, ok := resultObj.(*deployapi.DeploymentConfig)
if !ok {
return fmt.Errorf("%s is not a valid deploymentconfig", o.deploymentConfigName)
}
switch {
case o.deployLatest:
err = o.deploy(config, o.out)
case o.retryDeploy:
err = o.retry(config, o.out)
case o.cancelDeploy:
err = o.cancel(config, o.out)
case o.enableTriggers:
err = o.reenableTriggers(config, o.out)
default:
describer := describe.NewLatestDeploymentsDescriber(o.osClient, o.kubeClient, -1)
desc, err := describer.Describe(config.Namespace, config.Name)
if err != nil {
return err
}
fmt.Fprint(o.out, desc)
}
return err
}
// deploy launches a new deployment unless there's already a deployment
// process in progress for config.
func (o *DeployOptions) deploy(config *deployapi.DeploymentConfig, out io.Writer) error {
deploymentName := deployutil.LatestDeploymentNameForConfig(config)
deployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName)
if err == nil {
// Reject attempts to start a concurrent deployment.
status := deployutil.DeploymentStatusFor(deployment)
if status != deployapi.DeploymentStatusComplete && status != deployapi.DeploymentStatusFailed {
return fmt.Errorf("#%d is already in progress (%s).\nOptionally, you can cancel this deployment using the --cancel option.", config.LatestVersion, status)
}
} else {
if !kerrors.IsNotFound(err) {
return err
}
}
config.LatestVersion++
_, err = o.osClient.DeploymentConfigs(config.Namespace).Update(config)
if err == nil {
fmt.Fprintf(out, "Started deployment #%d\n", config.LatestVersion)
}
return err
}
// retry resets the status of the latest deployment to New, which will cause
// the deployment to be retried. An error is returned if the deployment is not
// currently in a failed state.
func (o *DeployOptions) retry(config *deployapi.DeploymentConfig, out io.Writer) error {
if config.LatestVersion == 0 {
return fmt.Errorf("no deployments found for %s/%s", config.Namespace, config.Name)
}
deploymentName := deployutil.LatestDeploymentNameForConfig(config)
deployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName)
if err != nil {
if kerrors.IsNotFound(err) {
return fmt.Errorf("Unable to find the latest deployment (#%d).\nYou can start a new deployment using the --latest option.", config.LatestVersion)
}
return err
}
if status := deployutil.DeploymentStatusFor(deployment); status != deployapi.DeploymentStatusFailed {
message := fmt.Sprintf("#%d is %s; only failed deployments can be retried.\n", config.LatestVersion, status)
if status == deployapi.DeploymentStatusComplete {
message += "You can start a new deployment using the --latest option."
} else {
message += "Optionally, you can cancel this deployment using the --cancel option."
}
return fmt.Errorf(message)
}
// Delete the deployer pod as well as the deployment hooks pods, if any
pods, err := o.kubeClient.Pods(config.Namespace).List(deployutil.DeployerPodSelector(deploymentName), fields.Everything())
if err != nil {
return fmt.Errorf("Failed to list deployer/hook pods for deployment #%d: %v", config.LatestVersion, err)
}
for _, pod := range pods.Items {
err := o.kubeClient.Pods(pod.Namespace).Delete(pod.Name, kapi.NewDeleteOptions(0))
if err != nil {
return fmt.Errorf("Failed to delete deployer/hook pod %s for deployment #%d: %v", pod.Name, config.LatestVersion, err)
}
}
deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew)
// clear out the cancellation flag as well as any previous status-reason annotation
delete(deployment.Annotations, deployapi.DeploymentStatusReasonAnnotation)
delete(deployment.Annotations, deployapi.DeploymentCancelledAnnotation)
_, err = o.kubeClient.ReplicationControllers(deployment.Namespace).Update(deployment)
if err == nil {
fmt.Fprintf(out, "retried #%d\n", config.LatestVersion)
}
return err
}
// cancel cancels any deployment process in progress for config.
func (o *DeployOptions) cancel(config *deployapi.DeploymentConfig, out io.Writer) error {
deployments, err := o.kubeClient.ReplicationControllers(config.Namespace).List(deployutil.ConfigSelector(config.Name))
if err != nil {
return err
}
if len(deployments.Items) == 0 {
fmt.Fprintln(out, "no deployments found to cancel")
return nil
}
failedCancellations := []string{}
anyCancelled := false
for _, deployment := range deployments.Items {
status := deployutil.DeploymentStatusFor(&deployment)
switch status {
case deployapi.DeploymentStatusNew,
deployapi.DeploymentStatusPending,
deployapi.DeploymentStatusRunning:
if deployutil.IsDeploymentCancelled(&deployment) {
continue
}
deployment.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue
deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentCancelledByUser
_, err := o.kubeClient.ReplicationControllers(deployment.Namespace).Update(&deployment)
if err == nil {
fmt.Fprintf(out, "cancelled deployment #%d\n", config.LatestVersion)
anyCancelled = true
} else {
fmt.Fprintf(out, "couldn't cancel deployment #%d (status: %s): %v\n", deployutil.DeploymentVersionFor(&deployment), status, err)
failedCancellations = append(failedCancellations, strconv.Itoa(deployutil.DeploymentVersionFor(&deployment)))
}
}
}
if len(failedCancellations) > 0 {
return fmt.Errorf("couldn't cancel deployment %s", strings.Join(failedCancellations, ", "))
}
if !anyCancelled {
fmt.Fprintln(out, "no active deployments to cancel")
}
return nil
}
// reenableTriggers enables all image triggers and then persists config.
func (o *DeployOptions) reenableTriggers(config *deployapi.DeploymentConfig, out io.Writer) error {
enabled := []string{}
for _, trigger := range config.Triggers {
if trigger.Type == deployapi.DeploymentTriggerOnImageChange {
trigger.ImageChangeParams.Automatic = true
enabled = append(enabled, trigger.ImageChangeParams.From.Name)
}
}
if len(enabled) == 0 {
fmt.Fprintln(out, "no image triggers found to enable")
return nil
}
_, err := o.osClient.DeploymentConfigs(config.Namespace).Update(config)
if err != nil {
return err
}
fmt.Fprintf(out, "enabled image triggers: %s\n", strings.Join(enabled, ","))
return nil
}