Skip to content

Commit

Permalink
Merge pull request #51186 from dixudx/fix_delete_uninitialized_resources
Browse files Browse the repository at this point in the history
Automatic merge from submit-queue (batch tested with PRs 51186, 50350, 51751, 51645, 51837)

fix bug on kubectl deleting uninitialized resources

**What this PR does / why we need it**:

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #51185

**Special notes for your reviewer**:
/assign @caesarxuchao @ahmetb 

**Release note**:

```release-note
fix bug on kubectl deleting uninitialized resources
```
  • Loading branch information
Kubernetes Submit Queue committed Sep 6, 2017
2 parents 4b63c1f + 5e120cf commit ee4e4a5
Show file tree
Hide file tree
Showing 5 changed files with 84 additions and 2 deletions.
27 changes: 25 additions & 2 deletions hack/make-rules/test-cmd-util.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2816,7 +2816,7 @@ run_deployment_tests() {
! kubectl rollout status deployment/nginx --revision=3
cat hack/testdata/deployment-revision1.yaml | $SED "s/name: nginx$/name: nginx2/" | kubectl create -f - "${kube_flags[@]}"
# Deletion of both deployments should not be blocked
kubectl delete deployment nginx2 "${kube_flags[@]}"
kubectl delete deployment nginx2 "${kube_flags[@]}"
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"

Expand Down Expand Up @@ -2854,7 +2854,6 @@ run_deployment_tests() {
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"

# Clean up
kubectl delete deployment nginx-deployment "${kube_flags[@]}"

Expand Down Expand Up @@ -2883,6 +2882,18 @@ run_deployment_tests() {
kubectl delete configmap test-set-env-config "${kube_flags[@]}"
kubectl delete secret test-set-env-secret "${kube_flags[@]}"

### Delete a deployment with initializer
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a deployment
kubectl create --request-timeout=1 -f hack/testdata/deployment-with-initializer.yaml 2>&1 "${kube_flags[@]}" || true
kube::test::get_object_assert 'deployment web' "{{$id_field}}" 'web'
# Delete a deployment
kubectl delete deployment web "${kube_flags[@]}"
# Check Deployment web doesn't exist
output_message=$(! kubectl get deployment web 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" '"web" not found'

set +o nounset
set +o errexit
}
Expand Down Expand Up @@ -2995,6 +3006,18 @@ run_rs_tests() {
# Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''

### Delete a rs with initializer
# Pre-condition: no rs exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a rs
kubectl create --request-timeout=1 -f hack/testdata/replicaset-with-initializer.yaml 2>&1 "${kube_flags[@]}" || true
kube::test::get_object_assert 'rs nginx' "{{$id_field}}" 'nginx'
# Delete a rs
kubectl delete rs nginx "${kube_flags[@]}"
# check rs nginx doesn't exist
output_message=$(! kubectl get rs nginx 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" '"nginx" not found'

if kube::test::if_supports_resource "${horizontalpodautoscalers}" ; then
### Auto scale replica set
# Pre-condition: no replica set exists
Expand Down
25 changes: 25 additions & 0 deletions hack/testdata/deployment-with-initializer.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: web
labels:
run: web
initializers:
pending:
- name: podimage.initializer.com
spec:
replicas: 5
selector:
matchLabels:
run: web
template:
metadata:
labels:
run: web
spec:
containers:
- image: nginx:1.10
name: web
ports:
- containerPort: 80
protocol: TCP
23 changes: 23 additions & 0 deletions hack/testdata/replicaset-with-initializer.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
apiVersion: extensions/v1beta1
kind: ReplicaSet
metadata:
name: nginx
initializers:
pending:
- name: podimage.initializer.com
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.10
ports:
- containerPort: 80
5 changes: 5 additions & 0 deletions pkg/kubectl/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -404,6 +404,11 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati
if err != nil {
return err
}
if deployment.Initializers != nil {
var falseVar = false
nonOrphanOption := metav1.DeleteOptions{OrphanDependents: &falseVar}
return deployments.Delete(name, &nonOrphanOption)
}

// Use observedGeneration to determine if the deployment controller noticed the pause.
if err := deploymentutil.WaitForObservedDeploymentInternal(func() (*extensions.Deployment, error) {
Expand Down
6 changes: 6 additions & 0 deletions pkg/kubectl/scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -305,6 +305,9 @@ func (scaler *ReplicaSetScaler) Scale(namespace, name string, newSize uint, prec
if err != nil {
return err
}
if rs.Initializers != nil {
return nil
}
err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.ReplicaSetHasDesiredReplicas(scaler.c, rs))

if err == wait.ErrWaitTimeout {
Expand Down Expand Up @@ -373,6 +376,9 @@ func (scaler *StatefulSetScaler) Scale(namespace, name string, newSize uint, pre
if err != nil {
return err
}
if job.Initializers != nil {
return nil
}
err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.StatefulSetHasDesiredReplicas(scaler.c, job))
if err == wait.ErrWaitTimeout {
return fmt.Errorf("timed out waiting for %q to be synced", name)
Expand Down

0 comments on commit ee4e4a5

Please sign in to comment.