Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HPA update its status even if getting metrics failed #21743

Merged
merged 2 commits into from Feb 24, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
48 changes: 32 additions & 16 deletions pkg/controller/podautoscaler/horizontal.go
Expand Up @@ -196,6 +196,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
if hpa.Spec.CPUUtilization != nil {
cpuDesiredReplicas, cpuCurrentUtilization, cpuTimestamp, err = a.computeReplicasForCPUUtilization(hpa, scale)
if err != nil {
a.updateCurrentReplicasInStatus(hpa, currentReplicas)
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedComputeReplicas", err.Error())
return fmt.Errorf("failed to compute desired number of replicas based on CPU utilization for %s: %v", reference, err)
}
Expand All @@ -204,6 +205,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
if cmAnnotation, cmAnnotationFound := hpa.Annotations[HpaCustomMetricsTargetAnnotationName]; cmAnnotationFound {
cmDesiredReplicas, cmStatus, cmTimestamp, err = a.computeReplicasForCustomMetrics(hpa, scale, cmAnnotation)
if err != nil {
a.updateCurrentReplicasInStatus(hpa, currentReplicas)
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedComputeCMReplicas", err.Error())
return fmt.Errorf("failed to compute desired number of replicas based on Custom Metrics for %s: %v", reference, err)
}
Expand Down Expand Up @@ -231,40 +233,54 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
desiredReplicas = hpa.Spec.MaxReplicas
}
}
rescale := false

rescale := shouldScale(hpa, currentReplicas, desiredReplicas, timestamp)
if rescale {
scale.Spec.Replicas = desiredReplicas
_, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale)
if err != nil {
a.eventRecorder.Eventf(&hpa, api.EventTypeWarning, "FailedRescale", "New size: %d; error: %v", desiredReplicas, err.Error())
return fmt.Errorf("failed to rescale %s: %v", reference, err)
}
a.eventRecorder.Eventf(&hpa, api.EventTypeNormal, "SuccessfulRescale", "New size: %d", desiredReplicas)
glog.Infof("Successfull rescale of %s, old size: %d, new size: %d",
hpa.Name, currentReplicas, desiredReplicas)
} else {
desiredReplicas = currentReplicas
}

return a.updateStatus(hpa, currentReplicas, desiredReplicas, cpuCurrentUtilization, cmStatus, rescale)
}

func shouldScale(hpa extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, timestamp time.Time) bool {
if desiredReplicas != currentReplicas {
// Going down only if the usageRatio dropped significantly below the target
// and there was no rescaling in the last downscaleForbiddenWindow.
if desiredReplicas < currentReplicas &&
(hpa.Status.LastScaleTime == nil ||
hpa.Status.LastScaleTime.Add(downscaleForbiddenWindow).Before(timestamp)) {
rescale = true
return true
}

// Going up only if the usage ratio increased significantly above the target
// and there was no rescaling in the last upscaleForbiddenWindow.
if desiredReplicas > currentReplicas &&
(hpa.Status.LastScaleTime == nil ||
hpa.Status.LastScaleTime.Add(upscaleForbiddenWindow).Before(timestamp)) {
rescale = true
return true
}
}
return false
}

if rescale {
scale.Spec.Replicas = desiredReplicas
_, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale)
if err != nil {
a.eventRecorder.Eventf(&hpa, api.EventTypeWarning, "FailedRescale", "New size: %d; error: %v", desiredReplicas, err.Error())
return fmt.Errorf("failed to rescale %s: %v", reference, err)
}
a.eventRecorder.Eventf(&hpa, api.EventTypeNormal, "SuccessfulRescale", "New size: %d", desiredReplicas)
glog.Infof("Successfull rescale of %s, old size: %d, new size: %d",
hpa.Name, currentReplicas, desiredReplicas)
} else {
desiredReplicas = currentReplicas
func (a *HorizontalController) updateCurrentReplicasInStatus(hpa extensions.HorizontalPodAutoscaler, currentReplicas int) {
err := a.updateStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentCPUUtilizationPercentage, hpa.Annotations[HpaCustomMetricsStatusAnnotationName], false)
if err != nil {
glog.Errorf("%v", err)
}
}

func (a *HorizontalController) updateStatus(hpa extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, cpuCurrentUtilization *int, cmStatus string, rescale bool) error {
hpa.Status = extensions.HorizontalPodAutoscalerStatus{
CurrentReplicas: currentReplicas,
DesiredReplicas: desiredReplicas,
Expand All @@ -280,7 +296,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
hpa.Status.LastScaleTime = &now
}

_, err = a.hpaNamespacer.HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(&hpa)
_, err := a.hpaNamespacer.HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(&hpa)
if err != nil {
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedUpdateStatus", err.Error())
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
Expand Down
8 changes: 8 additions & 0 deletions pkg/controller/podautoscaler/horizontal_test.go
Expand Up @@ -66,6 +66,7 @@ type testCase struct {
reportedCPURequests []resource.Quantity
cmTarget *extensions.CustomMetricTargetList
scaleUpdated bool
statusUpdated bool
eventCreated bool
verifyEvents bool
}
Expand All @@ -77,6 +78,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
podNamePrefix := "test-pod"

tc.scaleUpdated = false
tc.statusUpdated = false
tc.eventCreated = false

fakeClient := &fake.Clientset{}
Expand All @@ -98,6 +100,10 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
MinReplicas: &tc.minReplicas,
MaxReplicas: tc.maxReplicas,
},
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please add a test that checks that current replicas is updated on error.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

Status: extensions.HorizontalPodAutoscalerStatus{
CurrentReplicas: tc.initialReplicas,
DesiredReplicas: tc.initialReplicas,
},
},
},
}
Expand Down Expand Up @@ -191,6 +197,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
assert.Equal(t, namespace, obj.Namespace)
assert.Equal(t, hpaName, obj.Name)
assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas)
tc.statusUpdated = true
return true, obj, nil
})

Expand All @@ -209,6 +216,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {

func (tc *testCase) verifyResults(t *testing.T) {
assert.Equal(t, tc.initialReplicas != tc.desiredReplicas, tc.scaleUpdated)
assert.True(t, tc.statusUpdated)
if tc.verifyEvents {
assert.Equal(t, tc.initialReplicas != tc.desiredReplicas, tc.eventCreated)
}
Expand Down