Skip to content

Commit

Permalink
Merge pull request kubernetes#25157 from davidopp/automated-cherry-pi…
Browse files Browse the repository at this point in the history
…ck-of-#23689-upstream-release-1.2

Drain pods created from ReplicaSets in 'kubectl drain'
  • Loading branch information
roberthbailey committed May 5, 2016
2 parents a9385d8 + 83cbbb0 commit 3de3be7
Show file tree
Hide file tree
Showing 4 changed files with 69 additions and 15 deletions.
9 changes: 5 additions & 4 deletions docs/man/man1/kubectl-drain.1
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ without \-\-ignore\-daemonsets, and regardless it will not delete any
DaemonSet\-managed pods, because those pods would be immediately replaced by the
DaemonSet controller, which ignores unschedulable marknigs. If there are any
pods that are neither mirror pods nor managed\-\-by ReplicationController,
DaemonSet or Job\-\-, then drain will not delete any pods unless you use \-\-force.
ReplicaSet, DaemonSet or Job\-\-, then drain will not delete any pods unless you
use \-\-force.

.PP
When you are ready to put the node back into service, use kubectl uncordon, which
Expand All @@ -33,7 +34,7 @@ will make the node schedulable again.
.SH OPTIONS
.PP
\fB\-\-force\fP=false
Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.
Continue even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet.

.PP
\fB\-\-grace\-period\fP=\-1
Expand Down Expand Up @@ -143,10 +144,10 @@ will make the node schedulable again.
.RS

.nf
# Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
# Drain node "foo", even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet on it.
$ kubectl drain foo \-\-force

# As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes.
# As above, but abort if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet, and use a grace period of 15 minutes.
$ kubectl drain foo \-\-grace\-period=900


Expand Down
9 changes: 5 additions & 4 deletions docs/user-guide/kubectl/kubectl_drain.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ without --ignore-daemonsets, and regardless it will not delete any
DaemonSet-managed pods, because those pods would be immediately replaced by the
DaemonSet controller, which ignores unschedulable marknigs. If there are any
pods that are neither mirror pods nor managed--by ReplicationController,
DaemonSet or Job--, then drain will not delete any pods unless you use --force.
ReplicaSet, DaemonSet or Job--, then drain will not delete any pods unless you
use --force.

When you are ready to put the node back into service, use kubectl uncordon, which
will make the node schedulable again.
Expand All @@ -32,18 +33,18 @@ kubectl drain NODE
### Examples

```
# Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
# Drain node "foo", even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet on it.
$ kubectl drain foo --force
# As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes.
# As above, but abort if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet, and use a grace period of 15 minutes.
$ kubectl drain foo --grace-period=900
```

### Options

```
--force[=false]: Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.
--force[=false]: Continue even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet.
--grace-period=-1: Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.
--ignore-daemonsets[=false]: Ignore DaemonSet-managed pods.
```
Expand Down
20 changes: 15 additions & 5 deletions pkg/kubectl/cmd/drain.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,15 +105,16 @@ without --ignore-daemonsets, and regardless it will not delete any
DaemonSet-managed pods, because those pods would be immediately replaced by the
DaemonSet controller, which ignores unschedulable marknigs. If there are any
pods that are neither mirror pods nor managed--by ReplicationController,
DaemonSet or Job--, then drain will not delete any pods unless you use --force.
ReplicaSet, DaemonSet or Job--, then drain will not delete any pods unless you
use --force.
When you are ready to put the node back into service, use kubectl uncordon, which
will make the node schedulable again.
`
drain_example = `# Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
drain_example = `# Drain node "foo", even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet on it.
$ kubectl drain foo --force
# As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes.
# As above, but abort if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet, and use a grace period of 15 minutes.
$ kubectl drain foo --grace-period=900
`
)
Expand All @@ -131,7 +132,7 @@ func NewCmdDrain(f *cmdutil.Factory, out io.Writer) *cobra.Command {
cmdutil.CheckErr(options.RunDrain())
},
}
cmd.Flags().BoolVar(&options.Force, "force", false, "Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.")
cmd.Flags().BoolVar(&options.Force, "force", false, "Continue even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet.")
cmd.Flags().BoolVar(&options.IgnoreDaemonsets, "ignore-daemonsets", false, "Ignore DaemonSet-managed pods.")
cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.")
return cmd
Expand Down Expand Up @@ -250,6 +251,15 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) {
if err == nil && job != nil {
replicated = true
}
} else if sr.Reference.Kind == "ReplicaSet" {
rs, err := o.client.ExtensionsClient.ReplicaSets(sr.Reference.Namespace).Get(sr.Reference.Name)

// Assume the only reason for an error is because the RS is
// gone/missing, not for any other cause. TODO(mml): something more
// sophisticated than this
if err == nil && rs != nil {
replicated = true
}
}
}

Expand Down Expand Up @@ -292,7 +302,7 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) {
func unmanagedMsg(unreplicatedNames []string, daemonSetNames []string, include_guidance bool) string {
msgs := []string{}
if len(unreplicatedNames) > 0 {
msg := fmt.Sprintf("pods not managed by ReplicationController, Job, or DaemonSet: %s", strings.Join(unreplicatedNames, ","))
msg := fmt.Sprintf("pods not managed by ReplicationController, ReplicaSet, Job, or DaemonSet: %s", strings.Join(unreplicatedNames, ","))
if include_guidance {
msg += " (use --force to override)"
}
Expand Down
46 changes: 44 additions & 2 deletions pkg/kubectl/cmd/drain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ func TestDrain(t *testing.T) {
rc_anno := make(map[string]string)
rc_anno[controller.CreatedByAnnotation] = refJson(t, &rc)

replicated_pod := api.Pod{
rc_pod := api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "bar",
Namespace: "default",
Expand Down Expand Up @@ -284,6 +284,35 @@ func TestDrain(t *testing.T) {
},
}

rs := extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{
Name: "rs",
Namespace: "default",
CreationTimestamp: unversioned.Time{Time: time.Now()},
Labels: labels,
SelfLink: testapi.Default.SelfLink("replicasets", "rs"),
},
Spec: extensions.ReplicaSetSpec{
Selector: &unversioned.LabelSelector{MatchLabels: labels},
},
}

rs_anno := make(map[string]string)
rs_anno[controller.CreatedByAnnotation] = refJson(t, &rs)

rs_pod := api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "bar",
Namespace: "default",
CreationTimestamp: unversioned.Time{Time: time.Now()},
Labels: labels,
Annotations: rs_anno,
},
Spec: api.PodSpec{
NodeName: "node",
},
}

naked_pod := api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "bar",
Expand All @@ -302,6 +331,7 @@ func TestDrain(t *testing.T) {
expected *api.Node
pods []api.Pod
rcs []api.ReplicationController
replicaSets []extensions.ReplicaSet
args []string
expectFatal bool
expectDelete bool
Expand All @@ -310,7 +340,7 @@ func TestDrain(t *testing.T) {
description: "RC-managed pod",
node: node,
expected: cordoned_node,
pods: []api.Pod{replicated_pod},
pods: []api.Pod{rc_pod},
rcs: []api.ReplicationController{rc},
args: []string{"node"},
expectFatal: false,
Expand Down Expand Up @@ -346,6 +376,16 @@ func TestDrain(t *testing.T) {
expectFatal: false,
expectDelete: true,
},
{
description: "RS-managed pod",
node: node,
expected: cordoned_node,
pods: []api.Pod{rs_pod},
replicaSets: []extensions.ReplicaSet{rs},
args: []string{"node"},
expectFatal: false,
expectDelete: true,
},
{
description: "naked pod",
node: node,
Expand Down Expand Up @@ -396,6 +436,8 @@ func TestDrain(t *testing.T) {
return &http.Response{StatusCode: 200, Body: objBody(testapi.Extensions.Codec(), &ds)}, nil
case m.isFor("GET", "/namespaces/default/jobs/job"):
return &http.Response{StatusCode: 200, Body: objBody(testapi.Extensions.Codec(), &job)}, nil
case m.isFor("GET", "/namespaces/default/replicasets/rs"):
return &http.Response{StatusCode: 200, Body: objBody(testapi.Extensions.Codec(), &test.replicaSets[0])}, nil
case m.isFor("GET", "/pods"):
values, err := url.ParseQuery(req.URL.RawQuery)
if err != nil {
Expand Down

0 comments on commit 3de3be7

Please sign in to comment.