Skip to content

Commit

Permalink
Add extra-params for test flags/params & test user-defined schedulers…
Browse files Browse the repository at this point in the history
… to be in line with kubernetes#6235
  • Loading branch information
atwamahmoud committed Jan 10, 2024
1 parent 6ad177c commit 5fa04d2
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 5 deletions.
36 changes: 31 additions & 5 deletions test/e2e/autoscaling/cluster_size_autoscaling.go
Expand Up @@ -92,6 +92,8 @@ const (
highPriorityClassName = "high-priority"

gpuLabel = "cloud.google.com/gke-accelerator"

nonExistingIgnoredSchedulerNameKey = "non-existing-ignored-scheduler"
)

var _ = SIGDescribe("Cluster size autoscaling", framework.WithSlow(), func() {
Expand Down Expand Up @@ -1002,10 +1004,15 @@ var _ = SIGDescribe("Cluster size autoscaling", framework.WithSlow(), func() {
})

ginkgo.It("should scale up when unprocessed pod is created and is going to be unschedulable[Feature:ClusterScaleUpIgnoringScheduler]", func(ctx context.Context) {
schedulerName, found := framework.TestContext.ExtraParams[nonExistingIgnoredSchedulerNameKey]
if !found {
framework.Logf("Skipping test, Didn't find an ignored non-existent scheduler name to use")
return
}
// 70% of allocatable memory of a single node * replica count, forcing a scale up in case of normal pods
replicaCount := 2*nodeCount
reservedMemory := int(float64(replicaCount)*float64(0.7)*float64(memAllocatableMb))
cleanupFunc := ReserveMemoryWithSchedulerName(ctx, f, "memory-reservation", replicaCount, reservedMemory, false, 1, "non-existent-scheduler")
replicaCount := 2 * nodeCount
reservedMemory := int(float64(replicaCount) * float64(0.7) * float64(memAllocatableMb))
cleanupFunc := ReserveMemoryWithSchedulerName(ctx, f, "memory-reservation", replicaCount, reservedMemory, false, 1, schedulerName)
defer cleanupFunc()
// Verify that cluster size is increased
ginkgo.By("Waiting for cluster scale-up")
Expand All @@ -1016,10 +1023,15 @@ var _ = SIGDescribe("Cluster size autoscaling", framework.WithSlow(), func() {
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(ctx, f.ClientSet, sizeFunc, scaleUpTimeout, 0))
})
ginkgo.It("shouldn't scale up when unprocessed pod is created and is going to be schedulable[Feature:ClusterScaleUpIgnoringScheduler]", func(ctx context.Context) {
schedulerName, found := framework.TestContext.ExtraParams[nonExistingIgnoredSchedulerNameKey]
if !found {
framework.Logf("Skipping test, Didn't find an ignored non-existent scheduler name to use")
return
}
// 50% of allocatable memory of a single node, so that no scale up would trigger in normal cases
replicaCount := 1
reservedMemory := int(float64(0.5)*float64(memAllocatableMb))
cleanupFunc := ReserveMemoryWithSchedulerName(ctx, f, "memory-reservation", replicaCount, reservedMemory, false, 1, "non-existent-scheduler")
reservedMemory := int(float64(0.5) * float64(memAllocatableMb))
cleanupFunc := ReserveMemoryWithSchedulerName(ctx, f, "memory-reservation", replicaCount, reservedMemory, false, 1, schedulerName)
defer cleanupFunc()
// Verify that cluster size is the same
ginkgo.By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String()))
Expand All @@ -1029,6 +1041,20 @@ var _ = SIGDescribe("Cluster size autoscaling", framework.WithSlow(), func() {
}
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(ctx, f.ClientSet, sizeFunc, time.Second, 0))
})
ginkgo.It("shouldn't scale up when unprocessed pod is created and scheduler is not specified to be ignored[Feature:ClusterScaleUpIgnoringScheduler]", func(ctx context.Context) {
// 70% of allocatable memory of a single node * replica count, forcing a scale up in case of normal pods
replicaCount := 2 * nodeCount
reservedMemory := int(float64(replicaCount) * float64(0.7) * float64(memAllocatableMb))
schedulerName := "non-existent-scheduler-" + f.UniqueName
cleanupFunc := ReserveMemoryWithSchedulerName(ctx, f, "memory-reservation", replicaCount, reservedMemory, false, 1, schedulerName)
defer cleanupFunc()
// Verify that cluster size is the same
ginkgo.By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String()))
sizeFunc := func(size int) bool {
return size == nodeCount
}
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(ctx, f.ClientSet, sizeFunc, time.Second, 0))
})
})

func installNvidiaDriversDaemonSet(ctx context.Context, f *framework.Framework) {
Expand Down
6 changes: 6 additions & 0 deletions test/e2e/framework/test_context.go
Expand Up @@ -227,6 +227,10 @@ type TestContextType struct {

// Enable volume drivers which are disabled by default. See test/e2e/storage/in_tree_volumes.go for details.
EnabledVolumeDrivers []string

// Similar to NodeTextContextType.ExtraEnvs,
// ExtraParams is a map of extra parameter names to values.
ExtraParams map[string]string
}

// NodeKillerConfig describes configuration of NodeKiller -- a utility to
Expand Down Expand Up @@ -391,6 +395,8 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
flags.IntVar(&TestContext.SnapshotControllerHTTPPort, "snapshot-controller-http-port", 0, "The port to use for snapshot controller HTTP communication.")

flags.Var(&stringArrayValue{&TestContext.EnabledVolumeDrivers}, "enabled-volume-drivers", "Comma-separated list of in-tree volume drivers to enable for testing. This is only needed for in-tree drivers disabled by default. An example is gcepd; see test/e2e/storage/in_tree_volumes.go for full details.")

flags.Var(cliflag.NewMapStringString(&TestContext.ExtraParams), "extra-params", "Extra parameters that might be needed for some e2e tests. Format: a list of key=value pairs, e.g., env1=val1,env2=val2")
}

func CreateGinkgoConfig() (types.SuiteConfig, types.ReporterConfig) {
Expand Down

0 comments on commit 5fa04d2

Please sign in to comment.