From 5fa04d2627a68dccfc856e9c5ca86ed8332cb534 Mon Sep 17 00:00:00 2001 From: Mahmoud Atwa Date: Wed, 8 Nov 2023 13:49:05 +0000 Subject: [PATCH] Add extra-params for test flags/params & test user-defined schedulers to be in line with #6235 --- .../autoscaling/cluster_size_autoscaling.go | 36 ++++++++++++++++--- test/e2e/framework/test_context.go | 6 ++++ 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index f6a28ecbc9762..4428c937a7a37 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -92,6 +92,8 @@ const ( highPriorityClassName = "high-priority" gpuLabel = "cloud.google.com/gke-accelerator" + + nonExistingIgnoredSchedulerNameKey = "non-existing-ignored-scheduler" ) var _ = SIGDescribe("Cluster size autoscaling", framework.WithSlow(), func() { @@ -1002,10 +1004,15 @@ var _ = SIGDescribe("Cluster size autoscaling", framework.WithSlow(), func() { }) ginkgo.It("should scale up when unprocessed pod is created and is going to be unschedulable[Feature:ClusterScaleUpIgnoringScheduler]", func(ctx context.Context) { + schedulerName, found := framework.TestContext.ExtraParams[nonExistingIgnoredSchedulerNameKey] + if !found { + framework.Logf("Skipping test, Didn't find an ignored non-existent scheduler name to use") + return + } // 70% of allocatable memory of a single node * replica count, forcing a scale up in case of normal pods - replicaCount := 2*nodeCount - reservedMemory := int(float64(replicaCount)*float64(0.7)*float64(memAllocatableMb)) - cleanupFunc := ReserveMemoryWithSchedulerName(ctx, f, "memory-reservation", replicaCount, reservedMemory, false, 1, "non-existent-scheduler") + replicaCount := 2 * nodeCount + reservedMemory := int(float64(replicaCount) * float64(0.7) * float64(memAllocatableMb)) + cleanupFunc := ReserveMemoryWithSchedulerName(ctx, f, "memory-reservation", replicaCount, reservedMemory, false, 1, schedulerName) defer cleanupFunc() // Verify that cluster size is increased ginkgo.By("Waiting for cluster scale-up") @@ -1016,10 +1023,15 @@ var _ = SIGDescribe("Cluster size autoscaling", framework.WithSlow(), func() { framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(ctx, f.ClientSet, sizeFunc, scaleUpTimeout, 0)) }) ginkgo.It("shouldn't scale up when unprocessed pod is created and is going to be schedulable[Feature:ClusterScaleUpIgnoringScheduler]", func(ctx context.Context) { + schedulerName, found := framework.TestContext.ExtraParams[nonExistingIgnoredSchedulerNameKey] + if !found { + framework.Logf("Skipping test, Didn't find an ignored non-existent scheduler name to use") + return + } // 50% of allocatable memory of a single node, so that no scale up would trigger in normal cases replicaCount := 1 - reservedMemory := int(float64(0.5)*float64(memAllocatableMb)) - cleanupFunc := ReserveMemoryWithSchedulerName(ctx, f, "memory-reservation", replicaCount, reservedMemory, false, 1, "non-existent-scheduler") + reservedMemory := int(float64(0.5) * float64(memAllocatableMb)) + cleanupFunc := ReserveMemoryWithSchedulerName(ctx, f, "memory-reservation", replicaCount, reservedMemory, false, 1, schedulerName) defer cleanupFunc() // Verify that cluster size is the same ginkgo.By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String())) @@ -1029,6 +1041,20 @@ var _ = SIGDescribe("Cluster size autoscaling", framework.WithSlow(), func() { } framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(ctx, f.ClientSet, sizeFunc, time.Second, 0)) }) + ginkgo.It("shouldn't scale up when unprocessed pod is created and scheduler is not specified to be ignored[Feature:ClusterScaleUpIgnoringScheduler]", func(ctx context.Context) { + // 70% of allocatable memory of a single node * replica count, forcing a scale up in case of normal pods + replicaCount := 2 * nodeCount + reservedMemory := int(float64(replicaCount) * float64(0.7) * float64(memAllocatableMb)) + schedulerName := "non-existent-scheduler-" + f.UniqueName + cleanupFunc := ReserveMemoryWithSchedulerName(ctx, f, "memory-reservation", replicaCount, reservedMemory, false, 1, schedulerName) + defer cleanupFunc() + // Verify that cluster size is the same + ginkgo.By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String())) + sizeFunc := func(size int) bool { + return size == nodeCount + } + framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(ctx, f.ClientSet, sizeFunc, time.Second, 0)) + }) }) func installNvidiaDriversDaemonSet(ctx context.Context, f *framework.Framework) { diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index 3f1622dc8772e..3b19547defb47 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -227,6 +227,10 @@ type TestContextType struct { // Enable volume drivers which are disabled by default. See test/e2e/storage/in_tree_volumes.go for details. EnabledVolumeDrivers []string + + // Similar to NodeTextContextType.ExtraEnvs, + // ExtraParams is a map of extra parameter names to values. + ExtraParams map[string]string } // NodeKillerConfig describes configuration of NodeKiller -- a utility to @@ -391,6 +395,8 @@ func RegisterCommonFlags(flags *flag.FlagSet) { flags.IntVar(&TestContext.SnapshotControllerHTTPPort, "snapshot-controller-http-port", 0, "The port to use for snapshot controller HTTP communication.") flags.Var(&stringArrayValue{&TestContext.EnabledVolumeDrivers}, "enabled-volume-drivers", "Comma-separated list of in-tree volume drivers to enable for testing. This is only needed for in-tree drivers disabled by default. An example is gcepd; see test/e2e/storage/in_tree_volumes.go for full details.") + + flags.Var(cliflag.NewMapStringString(&TestContext.ExtraParams), "extra-params", "Extra parameters that might be needed for some e2e tests. Format: a list of key=value pairs, e.g., env1=val1,env2=val2") } func CreateGinkgoConfig() (types.SuiteConfig, types.ReporterConfig) {