Skip to content

Commit

Permalink
Add e2e HPA Behavior tests: scale up&down limited by number of Pods p…
Browse files Browse the repository at this point in the history
…er minute, scale up&down limited by percentage of Pods per minute
  • Loading branch information
piotrnosek committed Aug 4, 2022
1 parent 1c455c1 commit aa9ed52
Show file tree
Hide file tree
Showing 2 changed files with 237 additions and 37 deletions.
206 changes: 185 additions & 21 deletions test/e2e/autoscaling/horizontal_pod_autoscaling_behavior.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged

hpaName := "consumer"

podCPURequest := 500
targetCPUUtilizationPercent := 25
usageForSingleReplica := 110

fullWindowOfNewUsage := 30 * time.Second
windowWithOldUsagePasses := 30 * time.Second
newPodMetricsDelay := 15 * time.Second
Expand All @@ -48,16 +54,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
ginkgo.Describe("with short downscale stabilization window", func() {
ginkgo.It("should scale down soon after the stabilization period", func() {
ginkgo.By("setting up resource consumer and HPA")
podCPURequest := 500
targetCPUUtilizationPercent := 25
usageForSingleReplica := 110
initPods := 1
initCPUUsageTotal := initPods * usageForSingleReplica
upScaleStabilization := 0 * time.Minute
downScaleStabilization := 1 * time.Minute

rc := e2eautoscaling.NewDynamicResourceConsumer(
"consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
Expand Down Expand Up @@ -92,16 +95,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
ginkgo.Describe("with long upscale stabilization window", func() {
ginkgo.It("should scale up only after the stabilization period", func() {
ginkgo.By("setting up resource consumer and HPA")
podCPURequest := 500
targetCPUUtilizationPercent := 25
usageForSingleReplica := 110
initPods := 2
initCPUUsageTotal := initPods * usageForSingleReplica
upScaleStabilization := 3 * time.Minute
downScaleStabilization := 0 * time.Minute

rc := e2eautoscaling.NewDynamicResourceConsumer(
"consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
Expand Down Expand Up @@ -133,24 +133,21 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
})
})

ginkgo.Describe("with upscale autoscaling disabled", func() {
ginkgo.Describe("with autoscaling disabled", func() {
ginkgo.It("shouldn't scale up", func() {
ginkgo.By("setting up resource consumer and HPA")
podCPURequest := 500
targetCPUUtilizationPercent := 25
usageForSingleReplica := 110
initPods := 1
initCPUUsageTotal := initPods * usageForSingleReplica

rc := e2eautoscaling.NewDynamicResourceConsumer(
"consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()

hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleUpDisabled(),
rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleUpDirection),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)

Expand All @@ -171,26 +168,21 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
replicas := rc.GetReplicas()
framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas)
})
})

ginkgo.Describe("with downscale autoscaling disabled", func() {
ginkgo.It("shouldn't scale down", func() {
ginkgo.By("setting up resource consumer and HPA")
podCPURequest := 500
targetCPUUtilizationPercent := 25
usageForSingleReplica := 110
initPods := 3
initCPUUsageTotal := initPods * usageForSingleReplica

rc := e2eautoscaling.NewDynamicResourceConsumer(
"consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()

hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDownDisabled(),
rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleDownDirection),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)

Expand All @@ -212,6 +204,178 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
replicas := rc.GetReplicas()
framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas)
})

})

ginkgo.Describe("with scale limited by number of Pods rate", func() {
ginkgo.It("should scale up no more than given number of Pods per minute", func() {
ginkgo.By("setting up resource consumer and HPA")
initPods := 1
initCPUUsageTotal := initPods * usageForSingleReplica
limitWindowLength := 1 * time.Minute
podsLimitPerMinute := 2

rc := e2eautoscaling.NewDynamicResourceConsumer(
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()

hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleUpDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)

ginkgo.By("triggering scale up by increasing consumption")
rc.ConsumeCPU(5 * usageForSingleReplica)

waitStart := time.Now()
rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor3 := time.Now().Sub(waitStart)

waitStart = time.Now()
rc.WaitForReplicas(5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor5 := time.Now().Sub(waitStart)

ginkgo.By("verifying time waited for a scale up to 3 replicas")
deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
// First scale event can happen right away, as there were no scale events in the past.
framework.ExpectEqual(timeWaitedFor3 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor3, deadline)

ginkgo.By("verifying time waited for a scale up to 5 replicas")
// Second scale event needs to respect limit window.
framework.ExpectEqual(timeWaitedFor5 > limitWindowLength, true, "waited %s, wanted to wait more than %s", timeWaitedFor5, limitWindowLength)
framework.ExpectEqual(timeWaitedFor5 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor5, deadline)
})

ginkgo.It("should scale down no more than given number of Pods per minute", func() {
ginkgo.By("setting up resource consumer and HPA")
initPods := 6
initCPUUsageTotal := initPods * usageForSingleReplica
limitWindowLength := 1 * time.Minute
podsLimitPerMinute := 2

rc := e2eautoscaling.NewDynamicResourceConsumer(
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()

hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleDownDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)

ginkgo.By("triggering scale down by lowering consumption")
rc.ConsumeCPU(1 * usageForSingleReplica)

waitStart := time.Now()
rc.WaitForReplicas(4, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor4 := time.Now().Sub(waitStart)

waitStart = time.Now()
rc.WaitForReplicas(2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor2 := time.Now().Sub(waitStart)

ginkgo.By("verifying time waited for a scale down to 4 replicas")
deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
// First scale event can happen right away, as there were no scale events in the past.
framework.ExpectEqual(timeWaitedFor4 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor4, deadline)

ginkgo.By("verifying time waited for a scale down to 2 replicas")
// Second scale event needs to respect limit window.
framework.ExpectEqual(timeWaitedFor2 > limitWindowLength, true, "waited %s, wanted more than %s", timeWaitedFor2, limitWindowLength)
framework.ExpectEqual(timeWaitedFor2 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor2, deadline)
})
})

ginkgo.Describe("with scale limited by percentage", func() {
ginkgo.It("should scale up no more than given percentage of current Pods per minute", func() {
ginkgo.By("setting up resource consumer and HPA")
initPods := 4
initCPUUsageTotal := initPods * usageForSingleReplica
limitWindowLength := 1 * time.Minute
percentageLimitPerMinute := 50

rc := e2eautoscaling.NewDynamicResourceConsumer(
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()

hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleUpDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)

ginkgo.By("triggering scale up by increasing consumption")
rc.ConsumeCPU(10 * usageForSingleReplica)

waitStart := time.Now()
rc.WaitForReplicas(6, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor6 := time.Now().Sub(waitStart)

waitStart = time.Now()
rc.WaitForReplicas(9, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor9 := time.Now().Sub(waitStart)

ginkgo.By("verifying time waited for a scale up to 6 replicas")
deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
// First scale event can happen right away, as there were no scale events in the past.
framework.ExpectEqual(timeWaitedFor6 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor6, deadline)

ginkgo.By("verifying time waited for a scale up to 9 replicas")
// Second scale event needs to respect limit window.
framework.ExpectEqual(timeWaitedFor9 > limitWindowLength, true, "waited %s, wanted to wait more than %s", timeWaitedFor9, limitWindowLength)
framework.ExpectEqual(timeWaitedFor9 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor9, deadline)
})

ginkgo.It("should scale down no more than given percentage of current Pods per minute", func() {
ginkgo.By("setting up resource consumer and HPA")
initPods := 8
initCPUUsageTotal := initPods * usageForSingleReplica
limitWindowLength := 1 * time.Minute
percentageLimitPerMinute := 50

rc := e2eautoscaling.NewDynamicResourceConsumer(
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()

hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleDownDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)

ginkgo.By("triggering scale down by lowering consumption")
rc.ConsumeCPU(1 * usageForSingleReplica)

waitStart := time.Now()
rc.WaitForReplicas(4, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor4 := time.Now().Sub(waitStart)

waitStart = time.Now()
rc.WaitForReplicas(2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor2 := time.Now().Sub(waitStart)

ginkgo.By("verifying time waited for a scale down to 4 replicas")
deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
// First scale event can happen right away, as there were no scale events in the past.
framework.ExpectEqual(timeWaitedFor4 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor4, deadline)

ginkgo.By("verifying time waited for a scale down to 2 replicas")
// Second scale event needs to respect limit window.
framework.ExpectEqual(timeWaitedFor2 > limitWindowLength, true, "waited %s, wanted more than %s", timeWaitedFor2, limitWindowLength)
framework.ExpectEqual(timeWaitedFor2 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor2, deadline)
})
})
})
68 changes: 52 additions & 16 deletions test/e2e/framework/autoscaling/autoscaling_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,15 @@ var (
KindReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"}
)

// ScalingDirection identifies the scale direction for HPA Behavior.
type ScalingDirection int

const (
DirectionUnknown ScalingDirection = iota
ScaleUpDirection
ScaleDownDirection
)

/*
ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warning: memory not supported)
typical use case:
Expand Down Expand Up @@ -725,38 +734,65 @@ func HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule *autoscalingv
}
}

func HPAStabilizationWindowScalingRule(stabilizationDuration int32) *autoscalingv2.HPAScalingRules {
func HPABehaviorWithScalingRuleInDirection(scalingDirection ScalingDirection, rule *autoscalingv2.HPAScalingRules) *autoscalingv2.HorizontalPodAutoscalerBehavior {
var scaleUpRule, scaleDownRule *autoscalingv2.HPAScalingRules
if scalingDirection == ScaleUpDirection {
scaleUpRule = rule
}
if scalingDirection == ScaleDownDirection {
scaleDownRule = rule
}
return HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule)
}

func HPAScalingRuleWithStabilizationWindow(stabilizationDuration int32) *autoscalingv2.HPAScalingRules {
return &autoscalingv2.HPAScalingRules{
StabilizationWindowSeconds: &stabilizationDuration,
}
}

func HPAPolicyDisabledScalingRule() *autoscalingv2.HPAScalingRules {
func HPAScalingRuleWithPolicyDisabled() *autoscalingv2.HPAScalingRules {
disabledPolicy := autoscalingv2.DisabledPolicySelect
return &autoscalingv2.HPAScalingRules{
SelectPolicy: &disabledPolicy,
}
}

func HPAScalingRuleWithScalingPolicy(policyType autoscalingv2.HPAScalingPolicyType, value, periodSeconds int32) *autoscalingv2.HPAScalingRules {
stabilizationWindowDisabledDuration := int32(0)
selectPolicy := autoscalingv2.MaxChangePolicySelect
return &autoscalingv2.HPAScalingRules{
Policies: []autoscalingv2.HPAScalingPolicy{
{
Type: policyType,
Value: value,
PeriodSeconds: periodSeconds,
},
},
SelectPolicy: &selectPolicy,
StabilizationWindowSeconds: &stabilizationWindowDisabledDuration,
}
}

func HPABehaviorWithStabilizationWindows(upscaleStabilization, downscaleStabilization time.Duration) *autoscalingv2.HorizontalPodAutoscalerBehavior {
return HPABehaviorWithScaleUpAndDownRules(
/*scaleUpRule=*/ HPAStabilizationWindowScalingRule(int32(upscaleStabilization.Seconds())),
/*scaleDownRule=*/ HPAStabilizationWindowScalingRule(int32(downscaleStabilization.Seconds())),
)
scaleUpRule := HPAScalingRuleWithStabilizationWindow(int32(upscaleStabilization.Seconds()))
scaleDownRule := HPAScalingRuleWithStabilizationWindow(int32(downscaleStabilization.Seconds()))
return HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule)
}

func HPABehaviorWithScaleDisabled(scalingDirection ScalingDirection) *autoscalingv2.HorizontalPodAutoscalerBehavior {
scalingRule := HPAScalingRuleWithPolicyDisabled()
return HPABehaviorWithScalingRuleInDirection(scalingDirection, scalingRule)
}

func HPABehaviorWithScaleUpDisabled() *autoscalingv2.HorizontalPodAutoscalerBehavior {
return HPABehaviorWithScaleUpAndDownRules(
/*scaleUpRule=*/ HPAPolicyDisabledScalingRule(),
/*scaleDownRule=*/ nil,
)
func HPABehaviorWithScaleLimitedByNumberOfPods(scalingDirection ScalingDirection, numberOfPods, periodSeconds int32) *autoscalingv2.HorizontalPodAutoscalerBehavior {
scalingRule := HPAScalingRuleWithScalingPolicy(autoscalingv2.PodsScalingPolicy, numberOfPods, periodSeconds)
return HPABehaviorWithScalingRuleInDirection(scalingDirection, scalingRule)
}

func HPABehaviorWithScaleDownDisabled() *autoscalingv2.HorizontalPodAutoscalerBehavior {
return HPABehaviorWithScaleUpAndDownRules(
/*scaleUpRule=*/ nil,
/*scaleDownRule=*/ HPAPolicyDisabledScalingRule(),
)
func HPABehaviorWithScaleLimitedByPercentage(scalingDirection ScalingDirection, percentage, periodSeconds int32) *autoscalingv2.HorizontalPodAutoscalerBehavior {
scalingRule := HPAScalingRuleWithScalingPolicy(autoscalingv2.PercentScalingPolicy, percentage, periodSeconds)
return HPABehaviorWithScalingRuleInDirection(scalingDirection, scalingRule)
}

func DeleteHPAWithBehavior(rc *ResourceConsumer, autoscalerName string) {
Expand Down

0 comments on commit aa9ed52

Please sign in to comment.