Skip to content

Commit

Permalink
Merge pull request #39694 from DirectXMan12/bug/hpa-panic
Browse files Browse the repository at this point in the history
Automatic merge from submit-queue (batch tested with PRs 39694, 39383, 39651, 39691, 39497)

HPA Controller: Check for 0-sum request value

In certain conditions in which the set of metrics returned by Heapster
is completely disjoint from the set of pods returned by the API server,
we can have a request sum of zero, which can cause a panic (due to
division by zero).  This checks for that condition.

Fixes #39680

**Release note**:

```release-note
Fixes an HPA-related panic due to division-by-zero.
```
  • Loading branch information
Kubernetes Submit Queue committed Jan 11, 2017
2 parents ea18d5c + c830d94 commit bb7d07a
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 1 deletion.
10 changes: 10 additions & 0 deletions pkg/controller/podautoscaler/metrics/utilization.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,10 @@ limitations under the License.

package metrics

import (
"fmt"
)

// GetResourceUtilizationRatio takes in a set of metrics, a set of matching requests,
// and a target utilization percentage, and calcuates the the ratio of
// desired to actual utilization (returning that and the actual utilization)
Expand All @@ -34,6 +38,12 @@ func GetResourceUtilizationRatio(metrics PodResourceInfo, requests map[string]in
requestsTotal += request
}

// if the set of requests is completely disjoint from the set of metrics,
// then we could have an issue where the requests total is zero
if requestsTotal == 0 {
return 0, 0, fmt.Errorf("no metrics returned matched known pods")
}

currentUtilization := int32((metricsTotal * 100) / requestsTotal)

return float64(currentUtilization) / float64(targetUtilization), currentUtilization, nil
Expand Down
24 changes: 23 additions & 1 deletion pkg/controller/podautoscaler/replica_calculator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ type resourceInfo struct {
name v1.ResourceName
requests []resource.Quantity
levels []int64
// only applies to pod names returned from "heapster"
podNames []string

targetUtilization int32
expectedUtilization int32
Expand Down Expand Up @@ -134,9 +136,13 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
if tc.resource != nil {
metrics := metricsapi.PodMetricsList{}
for i, resValue := range tc.resource.levels {
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
if len(tc.resource.podNames) > i {
podName = tc.resource.podNames[i]
}
podMetric := metricsapi.PodMetrics{
ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
Name: podName,
Namespace: testNamespace,
},
Timestamp: unversioned.Time{Time: tc.timestamp},
Expand Down Expand Up @@ -250,6 +256,22 @@ func (tc *replicaCalcTestCase) runTest(t *testing.T) {
}
}

func TestReplicaCalcDisjointResourcesMetrics(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 1,
expectedError: fmt.Errorf("no metrics returned matched known pods"),
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0")},
levels: []int64{100},
podNames: []string{"an-older-pod-name"},

targetUtilization: 100,
},
}
tc.runTest(t)
}

func TestReplicaCalcScaleUp(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
Expand Down

0 comments on commit bb7d07a

Please sign in to comment.