Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cascading delete deployment #18077

Merged
merged 3 commits into from
Feb 13, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
50 changes: 22 additions & 28 deletions hack/test-cmd.sh
Original file line number Diff line number Diff line change
Expand Up @@ -637,13 +637,13 @@ runTests() {
# Post-Condition: service "nginx" has configuration annotation
[[ "$(kubectl get svc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete rc,svc nginx
kubectl delete rc,svc nginx
## 6. kubectl autoscale --save-config should generate configuration annotation
# Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
! [[ "$(kubectl get rc frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: autoscale rc "frontend"
# Command: autoscale rc "frontend"
kubectl autoscale -f examples/guestbook/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2
# Post-Condition: hpa "frontend" has configuration annotation
[[ "$(kubectl get hpa frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
Expand All @@ -653,33 +653,32 @@ runTests() {
## kubectl apply should create the resource that doesn't exist yet
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: apply a pod "test-pod" (doesn't exist) should create this pod
# Command: apply a pod "test-pod" (doesn't exist) should create this pod
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is created
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
# Clean up
kubectl delete pods test-pod "${kube_flags[@]}"

## kubectl run should create deployments or jobs
## kubectl run should create deployments or jobs
# Pre-Condition: no Job exists
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run pi --generator=job/v1beta1 --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: Job "pi" is created
# Post-Condition: Job "pi" is created
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Clean up
kubectl delete jobs pi "${kube_flags[@]}"
# Pre-Condition: no Deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run nginx --image=nginx --generator=deployment/v1beta1 "${kube_flags[@]}"
# Post-Condition: Deployment "nginx" is created
# Post-Condition: Deployment "nginx" is created
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
# Clean up
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"
kubectl delete rc -l pod-template-hash "${kube_flags[@]}"

##############
# Namespaces #
Expand Down Expand Up @@ -721,7 +720,7 @@ runTests() {
# Command
kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod --grace-period=0
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
# Clean up
kubectl delete namespace other

Expand Down Expand Up @@ -966,17 +965,15 @@ __EOF__
kube::test::get_object_assert 'job pi' "{{$job_parallelism_field}}" '2'
# Clean-up
kubectl delete job/pi "${kube_flags[@]}"
# TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
# ### Scale a deployment
# kubectl create -f examples/extensions/deployment.yaml "${kube_flags[@]}"
# # Command
# kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment
# # Post-condition: 1 replica for nginx-deployment
# kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1'
# # Clean-up
# kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
# # TODO: Remove once deployment reaping is implemented
# kubectl delete rs --all "${kube_flags[@]}"

### Scale a deployment
kubectl create -f docs/user-guide/deployment.yaml "${kube_flags[@]}"
# Command
kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment
# Post-condition: 1 replica for nginx-deployment
kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1'
# Clean-up
kubectl delete deployment/nginx-deployment "${kube_flags[@]}"

### Expose a deployment as a service
kubectl create -f docs/user-guide/deployment.yaml "${kube_flags[@]}"
Expand All @@ -988,8 +985,6 @@ __EOF__
kube::test::get_object_assert 'service nginx-deployment' "{{$port_field}}" '80'
# Clean-up
kubectl delete deployment/nginx-deployment service/nginx-deployment "${kube_flags[@]}"
# TODO: Remove once deployment reaping is implemented
kubectl delete rs --all "${kube_flags[@]}"

### Expose replication controller as service
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
Expand Down Expand Up @@ -1094,7 +1089,7 @@ __EOF__
# Clean up
kubectl delete rc frontend "${kube_flags[@]}"

### Auto scale deployment
### Auto scale deployment
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Remove L1078 kubectl delete rc -l deployment.kubernetes.io/podTemplateHash "${kube_flags[@]}"

# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
Expand All @@ -1106,9 +1101,8 @@ __EOF__
# Clean up
kubectl delete hpa nginx-deployment "${kube_flags[@]}"
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
kubectl delete rs -l pod-template-hash "${kube_flags[@]}"

### Rollback a deployment
### Rollback a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
Expand Down Expand Up @@ -1330,7 +1324,7 @@ __EOF__
# cleaning
rm "${temp_editor}"
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl label"
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl label"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing labels.
kubectl-with-retry label -f $file labeled=true --overwrite "${kube_flags[@]}"
Expand All @@ -1349,7 +1343,7 @@ __EOF__
fi
# Command
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl annotate"
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl annotate"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing annotations.
kubectl-with-retry annotate -f $file annotated=true --overwrite "${kube_flags[@]}"
Expand Down
111 changes: 54 additions & 57 deletions pkg/kubectl/scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,8 @@ func ScalerFor(kind unversioned.GroupKind, c client.Interface) (Scaler, error) {
return &ReplicaSetScaler{c.Extensions()}, nil
case extensions.Kind("Job"):
return &JobScaler{c.Extensions()}, nil
// TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
// case extensions.Kind("Deployment"):
// return &DeploymentScaler{c.Extensions()}, nil
case extensions.Kind("Deployment"):
return &DeploymentScaler{c.Extensions()}, nil
}
return nil, fmt.Errorf("no scaler has been implemented for %q", kind)
}
Expand Down Expand Up @@ -328,57 +327,55 @@ func (precondition *ScalePrecondition) ValidateDeployment(deployment *extensions
return nil
}

// TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
// type DeploymentScaler struct {
// c client.ExtensionsInterface
// }

// // ScaleSimple is responsible for updating a deployment's desired replicas count.
// func (scaler *DeploymentScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error {
// deployment, err := scaler.c.Deployments(namespace).Get(name)
// if err != nil {
// return ScaleError{ScaleGetFailure, "Unknown", err}
// }
// if preconditions != nil {
// if err := preconditions.ValidateDeployment(deployment); err != nil {
// return err
// }
// }
// scale, err := extensions.ScaleFromDeployment(deployment)
// if err != nil {
// return ScaleError{ScaleUpdateFailure, deployment.ResourceVersion, err}
// }
// scale.Spec.Replicas = int(newSize)
// if _, err := scaler.c.Scales(namespace).Update("Deployment", scale); err != nil {
// if errors.IsInvalid(err) {
// return ScaleError{ScaleUpdateInvalidFailure, deployment.ResourceVersion, err}
// }
// return ScaleError{ScaleUpdateFailure, deployment.ResourceVersion, err}
// }
// return nil
// }

// // Scale updates a deployment to a new size, with optional precondition check (if preconditions is not nil),
// // optional retries (if retry is not nil), and then optionally waits for the status to reach desired count.
// func (scaler *DeploymentScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
// if preconditions == nil {
// preconditions = &ScalePrecondition{-1, ""}
// }
// if retry == nil {
// // Make it try only once, immediately
// retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
// }
// cond := ScaleCondition(scaler, preconditions, namespace, name, newSize)
// if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
// return err
// }
// if waitForReplicas != nil {
// deployment, err := scaler.c.Deployments(namespace).Get(name)
// if err != nil {
// return err
// }
// return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout,
// client.DeploymentHasDesiredReplicas(scaler.c, deployment))
// }
// return nil
// }
type DeploymentScaler struct {
c client.ExtensionsInterface
}

// ScaleSimple is responsible for updating a deployment's desired replicas count.
func (scaler *DeploymentScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error {
deployment, err := scaler.c.Deployments(namespace).Get(name)
if err != nil {
return ScaleError{ScaleGetFailure, "Unknown", err}
}
if preconditions != nil {
if err := preconditions.ValidateDeployment(deployment); err != nil {
return err
}
}

// TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
// For now I'm falling back to regular Deployment update operation.
deployment.Spec.Replicas = int(newSize)
if _, err := scaler.c.Deployments(namespace).Update(deployment); err != nil {
if errors.IsInvalid(err) {
return ScaleError{ScaleUpdateInvalidFailure, deployment.ResourceVersion, err}
}
return ScaleError{ScaleUpdateFailure, deployment.ResourceVersion, err}
}
return nil
}

// Scale updates a deployment to a new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for the status to reach desired count.
func (scaler *DeploymentScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
if preconditions == nil {
preconditions = &ScalePrecondition{-1, ""}
}
if retry == nil {
// Make it try only once, immediately
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize)
if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
return err
}
if waitForReplicas != nil {
deployment, err := scaler.c.Deployments(namespace).Get(name)
if err != nil {
return err
}
return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout,
client.DeploymentHasDesiredReplicas(scaler.c, deployment))
}
return nil
}