Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix golint failures of test/e2e/autoscaling #77913

Merged
merged 1 commit into from May 22, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 0 additions & 1 deletion hack/.golint_failures
Expand Up @@ -590,7 +590,6 @@ staging/src/k8s.io/sample-apiserver/pkg/apis/wardle
staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/fischer
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder
test/e2e/autoscaling
test/e2e/chaosmonkey
test/e2e/common
test/e2e/lifecycle/bootstrap
Expand Down
26 changes: 13 additions & 13 deletions test/e2e/autoscaling/autoscaling_timer.go
Expand Up @@ -25,28 +25,28 @@ import (
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)

var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling", func() {
f := framework.NewDefaultFramework("autoscaling")

SIGDescribe("Autoscaling a service", func() {
BeforeEach(func() {
ginkgo.BeforeEach(func() {
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
if err != nil {
framework.Skipf("test expects Cluster Autoscaler to be enabled")
}
})

Context("from 1 pod and 3 nodes to 8 pods and >=4 nodes", func() {
ginkgo.Context("from 1 pod and 3 nodes to 8 pods and >=4 nodes", func() {
const nodesNum = 3 // Expect there to be 3 nodes before and after the test.
var nodeGroupName string // Set by BeforeEach, used by AfterEach to scale this node group down after the test.
var nodes *v1.NodeList // Set by BeforeEach, used by Measure to calculate CPU request based on node's sizes.

BeforeEach(func() {
ginkgo.BeforeEach(func() {
// Make sure there is only 1 node group, otherwise this test becomes useless.
nodeGroups := strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",")
if len(nodeGroups) != 1 {
Expand All @@ -64,10 +64,10 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
// Make sure all nodes are schedulable, otherwise we are in some kind of a problem state.
nodes = framework.GetReadySchedulableNodesOrDie(f.ClientSet)
schedulableCount := len(nodes.Items)
Expect(schedulableCount).To(Equal(nodeGroupSize), "not all nodes are schedulable")
gomega.Expect(schedulableCount).To(gomega.Equal(nodeGroupSize), "not all nodes are schedulable")
})

AfterEach(func() {
ginkgo.AfterEach(func() {
// Attempt cleanup only if a node group was targeted for scale up.
// Otherwise the test was probably skipped and we'll get a gcloud error due to invalid parameters.
if len(nodeGroupName) > 0 {
Expand All @@ -77,16 +77,16 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
}
})

Measure("takes less than 15 minutes", func(b Benchmarker) {
ginkgo.Measure("takes less than 15 minutes", func(b ginkgo.Benchmarker) {
// Measured over multiple samples, scaling takes 10 +/- 2 minutes, so 15 minutes should be fully sufficient.
const timeToWait = 15 * time.Minute

// Calculate the CPU request of the service.
// This test expects that 8 pods will not fit in 'nodesNum' nodes, but will fit in >='nodesNum'+1 nodes.
// Make it so that 'nodesNum' pods fit perfectly per node.
nodeCpus := nodes.Items[0].Status.Allocatable[v1.ResourceCPU]
nodeCpuMillis := (&nodeCpus).MilliValue()
cpuRequestMillis := int64(nodeCpuMillis / nodesNum)
nodeCPUMillis := (&nodeCpus).MilliValue()
cpuRequestMillis := int64(nodeCPUMillis / nodesNum)

// Start the service we want to scale and wait for it to be up and running.
nodeMemoryBytes := nodes.Items[0].Status.Allocatable[v1.ResourceMemory]
Expand All @@ -99,10 +99,10 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"

// Enable Horizontal Pod Autoscaler with 50% target utilization and
// scale up the CPU usage to trigger autoscaling to 8 pods for target to be satisfied.
targetCpuUtilizationPercent := int32(50)
hpa := common.CreateCPUHorizontalPodAutoscaler(resourceConsumer, targetCpuUtilizationPercent, 1, 10)
targetCPUUtilizationPercent := int32(50)
hpa := common.CreateCPUHorizontalPodAutoscaler(resourceConsumer, targetCPUUtilizationPercent, 1, 10)
defer common.DeleteHorizontalPodAutoscaler(resourceConsumer, hpa.Name)
cpuLoad := 8 * cpuRequestMillis * int64(targetCpuUtilizationPercent) / 100 // 8 pods utilized to the target level
cpuLoad := 8 * cpuRequestMillis * int64(targetCPUUtilizationPercent) / 100 // 8 pods utilized to the target level
resourceConsumer.ConsumeCPU(int(cpuLoad))

// Measure the time it takes for the service to scale to 8 pods with 50% CPU utilization each.
Expand Down
44 changes: 22 additions & 22 deletions test/e2e/autoscaling/cluster_autoscaler_scalability.go
Expand Up @@ -33,8 +33,8 @@ import (
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/klog"
)

Expand Down Expand Up @@ -65,7 +65,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
var originalSizes map[string]int
var sum int

BeforeEach(func() {
ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke", "kubemark")

// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
Expand All @@ -81,7 +81,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
size, err := framework.GroupSize(mig)
framework.ExpectNoError(err)
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
ginkgo.By(fmt.Sprintf("Initial size of %s: %d", mig, size))
originalSizes[mig] = size
sum += size
}
Expand All @@ -91,13 +91,13 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun

nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
mem := nodes.Items[0].Status.Capacity[v1.ResourceMemory]
coresPerNode = int((&cpu).MilliValue() / 1000)
memCapacityMb = int((&mem).Value() / 1024 / 1024)

Expect(nodeCount).Should(Equal(sum))
gomega.Expect(nodeCount).Should(gomega.Equal(sum))

if framework.ProviderIs("gke") {
val, err := isAutoscalerEnabled(3)
Expand All @@ -109,8 +109,8 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
}
})

AfterEach(func() {
By(fmt.Sprintf("Restoring initial size of the cluster"))
ginkgo.AfterEach(func() {
ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster"))
setMigSizes(originalSizes)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount, scaleDownTimeout))
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
Expand All @@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
klog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
})

It("should scale up at all [Feature:ClusterAutoscalerScalability1]", func() {
ginkgo.It("should scale up at all [Feature:ClusterAutoscalerScalability1]", func() {
perNodeReservation := int(float64(memCapacityMb) * 0.95)
replicasPerNode := 10

Expand All @@ -155,7 +155,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
defer testCleanup()
})

It("should scale up twice [Feature:ClusterAutoscalerScalability2]", func() {
ginkgo.It("should scale up twice [Feature:ClusterAutoscalerScalability2]", func() {
perNodeReservation := int(float64(memCapacityMb) * 0.95)
replicasPerNode := 10
additionalNodes1 := int(math.Ceil(0.7 * maxNodes))
Expand Down Expand Up @@ -204,7 +204,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
klog.Infof("Scaled up twice")
})

It("should scale down empty nodes [Feature:ClusterAutoscalerScalability3]", func() {
ginkgo.It("should scale down empty nodes [Feature:ClusterAutoscalerScalability3]", func() {
perNodeReservation := int(float64(memCapacityMb) * 0.7)
replicas := int(math.Ceil(maxNodes * 0.7))
totalNodes := maxNodes
Expand Down Expand Up @@ -232,7 +232,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
}, scaleDownTimeout))
})

It("should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4]", func() {
ginkgo.It("should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4]", func() {
perPodReservation := int(float64(memCapacityMb) * 0.01)
// underutilizedNodes are 10% full
underutilizedPerNodeReplicas := 10
Expand Down Expand Up @@ -291,7 +291,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
}, timeout))
})

It("shouldn't scale down with underutilized nodes due to host port conflicts [Feature:ClusterAutoscalerScalability5]", func() {
ginkgo.It("shouldn't scale down with underutilized nodes due to host port conflicts [Feature:ClusterAutoscalerScalability5]", func() {
fullReservation := int(float64(memCapacityMb) * 0.9)
hostPortPodReservation := int(float64(memCapacityMb) * 0.3)
totalNodes := maxNodes
Expand All @@ -307,28 +307,28 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
fullNodesCount := divider
underutilizedNodesCount := totalNodes - fullNodesCount

By("Reserving full nodes")
ginkgo.By("Reserving full nodes")
// run RC1 w/o host port
cleanup := ReserveMemory(f, "filling-pod", fullNodesCount, fullNodesCount*fullReservation, true, largeScaleUpTimeout*2)
defer cleanup()

By("Reserving host ports on remaining nodes")
ginkgo.By("Reserving host ports on remaining nodes")
// run RC2 w/ host port
cleanup2 := createHostPortPodsWithMemory(f, "underutilizing-host-port-pod", underutilizedNodesCount, reservedPort, underutilizedNodesCount*hostPortPodReservation, largeScaleUpTimeout)
defer cleanup2()

waitForAllCaPodsReadyInNamespace(f, c)
// wait and check scale down doesn't occur
By(fmt.Sprintf("Sleeping %v minutes...", scaleDownTimeout.Minutes()))
ginkgo.By(fmt.Sprintf("Sleeping %v minutes...", scaleDownTimeout.Minutes()))
time.Sleep(scaleDownTimeout)

By("Checking if the number of nodes is as expected")
ginkgo.By("Checking if the number of nodes is as expected")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
klog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes)
Expect(len(nodes.Items)).Should(Equal(totalNodes))
gomega.Expect(len(nodes.Items)).Should(gomega.Equal(totalNodes))
})

Specify("CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]", func() {
ginkgo.Specify("CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]", func() {
// Start a number of pods saturating existing nodes.
perNodeReservation := int(float64(memCapacityMb) * 0.80)
replicasPerNode := 10
Expand All @@ -348,7 +348,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name)

// Ensure that no new nodes have been added so far.
Expect(framework.NumberOfReadyNodes(f.ClientSet)).To(Equal(nodeCount))
gomega.Expect(framework.NumberOfReadyNodes(f.ClientSet)).To(gomega.Equal(nodeCount))

// Start a number of schedulable pods to ensure CA reacts.
additionalNodes := maxNodes - nodeCount
Expand All @@ -375,7 +375,7 @@ func anyKey(input map[string]int) string {
func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestConfig, tolerateMissingNodeCount int, tolerateMissingPodCount int) func() error {
// resize cluster to start size
// run rc based on config
By(fmt.Sprintf("Running RC %v from config", config.extraPods.Name))
ginkgo.By(fmt.Sprintf("Running RC %v from config", config.extraPods.Name))
start := time.Now()
framework.ExpectNoError(framework.RunRC(*config.extraPods))
// check results
Expand Down Expand Up @@ -461,7 +461,7 @@ func addAnnotation(f *framework.Framework, nodes []v1.Node, key, value string) e
}

func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, port, megabytes int, timeout time.Duration) func() error {
By(fmt.Sprintf("Running RC which reserves host port and memory"))
ginkgo.By(fmt.Sprintf("Running RC which reserves host port and memory"))
request := int64(1024 * 1024 * megabytes / replicas)
config := &testutils.RCConfig{
Client: f.ClientSet,
Expand Down