Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor autoscaling utils in e2e #84510

Merged
merged 2 commits into from
Nov 8, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion test/e2e/autoscaling/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/autoscaling:go_default_library",
"//test/e2e/framework/network:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
Expand Down
8 changes: 4 additions & 4 deletions test/e2e/autoscaling/autoscaling_timer.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ import (

"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"

"github.com/onsi/ginkgo"
Expand Down Expand Up @@ -94,15 +94,15 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
replicas := 1
resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter)
resourceConsumer := e2eautoscaling.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter)
defer resourceConsumer.CleanUp()
resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough.

// Enable Horizontal Pod Autoscaler with 50% target utilization and
// scale up the CPU usage to trigger autoscaling to 8 pods for target to be satisfied.
targetCPUUtilizationPercent := int32(50)
hpa := common.CreateCPUHorizontalPodAutoscaler(resourceConsumer, targetCPUUtilizationPercent, 1, 10)
defer common.DeleteHorizontalPodAutoscaler(resourceConsumer, hpa.Name)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscaler(resourceConsumer, targetCPUUtilizationPercent, 1, 10)
defer e2eautoscaling.DeleteHorizontalPodAutoscaler(resourceConsumer, hpa.Name)
cpuLoad := 8 * cpuRequestMillis * int64(targetCPUUtilizationPercent) / 100 // 8 pods utilized to the target level
resourceConsumer.ConsumeCPU(int(cpuLoad))

Expand Down
32 changes: 16 additions & 16 deletions test/e2e/autoscaling/horizontal_pod_autoscaling.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,16 @@ import (
"time"

"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"

"github.com/onsi/ginkgo"
)

// These tests don't seem to be running properly in parallel: issue: #20338.
//
var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: CPU)", func() {
var rc *common.ResourceConsumer
var rc *e2eautoscaling.ResourceConsumer
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")

titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5"
Expand All @@ -38,31 +38,31 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
SIGDescribe("[Serial] [Slow] Deployment", func() {
// CPU tests via deployments
ginkgo.It(titleUp, func() {
scaleUp("test-deployment", common.KindDeployment, false, rc, f)
scaleUp("test-deployment", e2eautoscaling.KindDeployment, false, rc, f)
})
ginkgo.It(titleDown, func() {
scaleDown("test-deployment", common.KindDeployment, false, rc, f)
scaleDown("test-deployment", e2eautoscaling.KindDeployment, false, rc, f)
})
})

SIGDescribe("[Serial] [Slow] ReplicaSet", func() {
// CPU tests via ReplicaSets
ginkgo.It(titleUp, func() {
scaleUp("rs", common.KindReplicaSet, false, rc, f)
scaleUp("rs", e2eautoscaling.KindReplicaSet, false, rc, f)
})
ginkgo.It(titleDown, func() {
scaleDown("rs", common.KindReplicaSet, false, rc, f)
scaleDown("rs", e2eautoscaling.KindReplicaSet, false, rc, f)
})
})

// These tests take ~20 minutes each.
SIGDescribe("[Serial] [Slow] ReplicationController", func() {
// CPU tests via replication controllers
ginkgo.It(titleUp+" and verify decision stability", func() {
scaleUp("rc", common.KindRC, true, rc, f)
scaleUp("rc", e2eautoscaling.KindRC, true, rc, f)
})
ginkgo.It(titleDown+" and verify decision stability", func() {
scaleDown("rc", common.KindRC, true, rc, f)
scaleDown("rc", e2eautoscaling.KindRC, true, rc, f)
})
})

Expand All @@ -77,7 +77,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
maxPods: 2,
firstScale: 2,
}
scaleTest.run("rc-light", common.KindRC, rc, f)
scaleTest.run("rc-light", e2eautoscaling.KindRC, rc, f)
})
ginkgo.It("Should scale from 2 pods to 1 pod [Slow]", func() {
scaleTest := &HPAScaleTest{
Expand All @@ -89,7 +89,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
maxPods: 2,
firstScale: 1,
}
scaleTest.run("rc-light", common.KindRC, rc, f)
scaleTest.run("rc-light", e2eautoscaling.KindRC, rc, f)
})
})
})
Expand All @@ -114,12 +114,12 @@ type HPAScaleTest struct {
// The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts.
// The second state change (optional) is due to the CPU burst parameter, which HPA again responds to.
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *common.ResourceConsumer, f *framework.Framework) {
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *e2eautoscaling.ResourceConsumer, f *framework.Framework) {
const timeToWait = 15 * time.Minute
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.ScalesGetter)
rc = e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.ScalesGetter)
defer rc.CleanUp()
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
defer e2eautoscaling.DeleteHorizontalPodAutoscaler(rc, hpa.Name)

rc.WaitForReplicas(scaleTest.firstScale, timeToWait)
if scaleTest.firstScaleStasis > 0 {
Expand All @@ -131,7 +131,7 @@ func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc
}
}

func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, rc *common.ResourceConsumer, f *framework.Framework) {
func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, rc *e2eautoscaling.ResourceConsumer, f *framework.Framework) {
stasis := 0 * time.Minute
if checkStability {
stasis = 10 * time.Minute
Expand All @@ -151,7 +151,7 @@ func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, rc
scaleTest.run(name, kind, rc, f)
}

func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, rc *common.ResourceConsumer, f *framework.Framework) {
func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, rc *e2eautoscaling.ResourceConsumer, f *framework.Framework) {
stasis := 0 * time.Minute
if checkStability {
stasis = 10 * time.Minute
Expand Down
7 changes: 0 additions & 7 deletions test/e2e/common/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ go_library(
name = "go_default_library",
srcs = [
"apparmor.go",
"autoscaling_utils.go",
"configmap.go",
"configmap_volume.go",
"container.go",
Expand Down Expand Up @@ -46,15 +45,13 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/common",
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/kubelet:go_default_library",
"//pkg/kubelet/events:go_default_library",
"//pkg/kubelet/images:go_default_library",
"//pkg/kubelet/runtimeclass/testing:go_default_library",
"//pkg/kubelet/sysctl:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/api/coordination/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
Expand All @@ -64,7 +61,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
Expand All @@ -74,16 +70,13 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/network:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/replicaset:go_default_library",
"//test/e2e/framework/service:go_default_library",
"//test/e2e/framework/volume:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
Expand Down
1 change: 1 addition & 0 deletions test/e2e/framework/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ filegroup(
srcs = [
":package-srcs",
"//test/e2e/framework/auth:all-srcs",
"//test/e2e/framework/autoscaling:all-srcs",
"//test/e2e/framework/config:all-srcs",
"//test/e2e/framework/deployment:all-srcs",
"//test/e2e/framework/deviceplugin:all-srcs",
Expand Down
39 changes: 39 additions & 0 deletions test/e2e/framework/autoscaling/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")

go_library(
name = "go_default_library",
srcs = ["autoscaling_utils.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/autoscaling",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/core:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/replicaset:go_default_library",
"//test/e2e/framework/service:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
)

filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)

filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/

package common
package autoscaling

import (
"context"
Expand Down Expand Up @@ -67,8 +67,11 @@ var (
)

var (
KindRC = schema.GroupVersionKind{Version: "v1", Kind: "ReplicationController"}
// KindRC is the GVK for ReplicationController
KindRC = schema.GroupVersionKind{Version: "v1", Kind: "ReplicationController"}
// KindDeployment is the GVK for Deployment
KindDeployment = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "Deployment"}
// KindReplicaSet is the GVK for ReplicaSet
KindReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"}
)

Expand Down Expand Up @@ -101,21 +104,25 @@ type ResourceConsumer struct {
requestSizeCustomMetric int
}

// GetResourceConsumerImage is a wrapper to get the fully qualified URI of the ResourceConsumer image
func GetResourceConsumerImage() string {
return resourceConsumerImage
}

// NewDynamicResourceConsumer is a wrapper to create a new dynamic ResourceConsumer
func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, scaleClient, nil, nil)
}

// NewStaticResourceConsumer is a wrapper to create a new static ResourceConsumer
// TODO this still defaults to replication controller
func NewStaticResourceConsumer(name, nsName string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
return newResourceConsumer(name, nsName, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds,
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, clientset, scaleClient, nil, nil)
}

// NewMetricExporter is a wrapper to create a new ResourceConsumer for metrics exporter
func NewMetricExporter(name, nsName string, podAnnotations, serviceAnnotations map[string]string, metricValue int, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
return newResourceConsumer(name, nsName, KindDeployment, 1, 0, 0, metricValue, dynamicConsumptionTimeInSeconds,
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, 100, 100, clientset, scaleClient, podAnnotations, serviceAnnotations)
Expand Down Expand Up @@ -179,7 +186,7 @@ func (rc *ResourceConsumer) ConsumeMem(megabytes int) {
rc.mem <- megabytes
}

// ConsumeMem consumes given number of custom metric
// ConsumeCustomMetric consumes given number of custom metric
func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) {
framework.Logf("RC %s: consume custom metric %v in total", rc.name, amount)
rc.customMetric <- amount
Expand Down Expand Up @@ -328,6 +335,7 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
framework.ExpectNoError(err)
}

// GetReplicas get the replicas
func (rc *ResourceConsumer) GetReplicas() int {
switch rc.kind {
case KindRC:
Expand Down Expand Up @@ -357,10 +365,12 @@ func (rc *ResourceConsumer) GetReplicas() int {
return 0
}

// GetHpa get the corresponding horizontalPodAutoscaler object
func (rc *ResourceConsumer) GetHpa(name string) (*autoscalingv1.HorizontalPodAutoscaler, error) {
return rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Get(name, metav1.GetOptions{})
}

// WaitForReplicas wait for the desired replicas
func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.Duration) {
interval := 20 * time.Second
err := wait.PollImmediate(interval, duration, func() (bool, error) {
Expand All @@ -371,10 +381,12 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.D
framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas)
}

// EnsureDesiredReplicas ensure the replicas to desired number
func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration, hpaName string) {
rc.EnsureDesiredReplicasInRange(desiredReplicas, desiredReplicas, duration, hpaName)
}

// EnsureDesiredReplicasInRange ensure the replicas is in a desired range
func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, maxDesiredReplicas int, duration time.Duration, hpaName string) {
interval := 10 * time.Second
err := wait.PollImmediate(interval, duration, func() (bool, error) {
Expand Down Expand Up @@ -411,14 +423,15 @@ func (rc *ResourceConsumer) Pause() {
rc.stopWaitGroup.Wait()
}

// Pause starts background goroutines responsible for consuming resources.
// Resume starts background goroutines responsible for consuming resources.
func (rc *ResourceConsumer) Resume() {
ginkgo.By(fmt.Sprintf("HPA resuming RC %s", rc.name))
go rc.makeConsumeCPURequests()
go rc.makeConsumeMemRequests()
go rc.makeConsumeCustomMetric()
}

// CleanUp clean up the background goroutines responsible for consuming resources.
func (rc *ResourceConsumer) CleanUp() {
ginkgo.By(fmt.Sprintf("Removing consuming RC %s", rc.name))
close(rc.stopCPU)
Expand Down Expand Up @@ -526,6 +539,8 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
}

// CreateCPUHorizontalPodAutoscaler create a horizontalPodAutoscaler with CPU target
// for consuming resources.
func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32) *autoscalingv1.HorizontalPodAutoscaler {
hpa := &autoscalingv1.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -548,6 +563,7 @@ func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma
return hpa
}

// DeleteHorizontalPodAutoscaler delete the horizontalPodAutoscaler for consuming resources.
func DeleteHorizontalPodAutoscaler(rc *ResourceConsumer, autoscalerName string) {
rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Delete(autoscalerName, nil)
}
2 changes: 1 addition & 1 deletion test/e2e/instrumentation/monitoring/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ go_library(
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/external_metrics:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/autoscaling:go_default_library",
"//test/e2e/framework/config:go_default_library",
"//test/e2e/framework/gpu:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
Expand Down