-
Notifications
You must be signed in to change notification settings - Fork 38.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
kubectl: Make scaling smarter #18169
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -146,6 +146,10 @@ func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri | |
errs := []error{} | ||
for _, info := range infos { | ||
if err := scaler.Scale(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas); err != nil { | ||
if scaleErr, ok := err.(kubectl.ScaleError); ok && scaleErr.FailureType == kubectl.AlreadyScaled { | ||
cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "already scaled") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @kubernetes/rh-ux @kubernetes/goog-ux |
||
continue | ||
} | ||
errs = append(errs, err) | ||
continue | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -17,6 +17,7 @@ limitations under the License. | |
package kubectl | ||
|
||
import ( | ||
goerrors "errors" | ||
"fmt" | ||
"strconv" | ||
"time" | ||
|
@@ -79,8 +80,13 @@ const ( | |
ScaleGetFailure ScaleErrorType = iota | ||
ScaleUpdateFailure | ||
ScaleUpdateInvalidFailure | ||
// AlreadyScaled is not really an error but we need a way to surface to the client that | ||
// the scaling didn't happen because we already have the desired state the user asked for. | ||
AlreadyScaled | ||
) | ||
|
||
var alreadyScaledErr = goerrors.New("desired replicas already equals the requested replicas") | ||
|
||
// A ScaleError is returned when a scale request passes | ||
// preconditions but fails to actually scale the controller. | ||
type ScaleError struct { | ||
|
@@ -112,12 +118,14 @@ func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name s | |
case nil: | ||
return true, nil | ||
case ScaleError: | ||
// if it's invalid we shouldn't keep waiting | ||
if e.FailureType == ScaleUpdateInvalidFailure { | ||
switch e.FailureType { | ||
case ScaleUpdateInvalidFailure: | ||
// if it's invalid we shouldn't keep waiting | ||
return false, err | ||
} | ||
if e.FailureType == ScaleUpdateFailure { | ||
case ScaleUpdateFailure: | ||
return false, nil | ||
case AlreadyScaled: | ||
return false, err | ||
} | ||
} | ||
return false, err | ||
|
@@ -149,8 +157,10 @@ func (scaler *ReplicationControllerScaler) ScaleSimple(namespace, name string, p | |
return err | ||
} | ||
} | ||
if controller.Spec.Replicas == int(newSize) { | ||
return ScaleError{AlreadyScaled, controller.ResourceVersion, alreadyScaledErr} | ||
} | ||
controller.Spec.Replicas = int(newSize) | ||
// TODO: do retry on 409 errors here? | ||
if _, err := scaler.c.ReplicationControllers(namespace).Update(controller); err != nil { | ||
if errors.IsInvalid(err) { | ||
return ScaleError{ScaleUpdateInvalidFailure, controller.ResourceVersion, err} | ||
|
@@ -216,6 +226,9 @@ func (scaler *JobScaler) ScaleSimple(namespace, name string, preconditions *Scal | |
} | ||
} | ||
parallelism := int(newSize) | ||
if job.Spec.Parallelism != nil && *job.Spec.Parallelism == parallelism { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Again no need for nil check. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I cringe in the face of accessing a pointer w/o checking for nil first. I am going to leave this around since it doesn't harm having safer code:) |
||
return ScaleError{AlreadyScaled, job.ResourceVersion, alreadyScaledErr} | ||
} | ||
job.Spec.Parallelism = ¶llelism | ||
if _, err := scaler.c.Jobs(namespace).Update(job); err != nil { | ||
if errors.IsInvalid(err) { | ||
|
@@ -279,6 +292,9 @@ func (scaler *DeploymentScaler) ScaleSimple(namespace, name string, precondition | |
} | ||
} | ||
scale := extensions.ScaleFromDeployment(deployment) | ||
if scale.Spec.Replicas == int(newSize) { | ||
return ScaleError{AlreadyScaled, deployment.ResourceVersion, alreadyScaledErr} | ||
} | ||
scale.Spec.Replicas = int(newSize) | ||
if _, err := scaler.c.Scales(namespace).Update("Deployment", scale); err != nil { | ||
if errors.IsInvalid(err) { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Parallelism will never be nil, since it's defaulted if not present. No need to change that.