Skip to content

Commit

Permalink
Merge pull request #895 from epam/podscont-selector-assignments
Browse files Browse the repository at this point in the history
[tests] NodeSelector assignment when using quota only for pod count
  • Loading branch information
k8s-ci-robot committed Jun 26, 2023
2 parents ddb2006 + 5081111 commit b463573
Show file tree
Hide file tree
Showing 2 changed files with 101 additions and 7 deletions.
23 changes: 23 additions & 0 deletions test/integration/controller/job/job_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -782,6 +782,7 @@ var _ = ginkgo.Describe("Job controller interacting with scheduler", ginkgo.Orde
spotUntaintedFlavor *kueue.ResourceFlavor
prodClusterQ *kueue.ClusterQueue
devClusterQ *kueue.ClusterQueue
podsCountClusterQ *kueue.ClusterQueue
prodLocalQ *kueue.LocalQueue
devLocalQ *kueue.LocalQueue
)
Expand Down Expand Up @@ -838,12 +839,19 @@ var _ = ginkgo.Describe("Job controller interacting with scheduler", ginkgo.Orde
}).
Obj()
gomega.Expect(k8sClient.Create(ctx, devClusterQ)).Should(gomega.Succeed())
podsCountClusterQ = testing.MakeClusterQueue("pods-clusterqueue").
ResourceGroup(
*testing.MakeFlavorQuotas("on-demand").Resource(corev1.ResourcePods, "5").Obj(),
).
Obj()
gomega.Expect(k8sClient.Create(ctx, podsCountClusterQ)).Should(gomega.Succeed())
})

ginkgo.AfterEach(func() {
gomega.Expect(util.DeleteNamespace(ctx, k8sClient, ns)).To(gomega.Succeed())
util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, prodClusterQ, true)
util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, devClusterQ, true)
util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, podsCountClusterQ, true)
util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, onDemandFlavor, true)
gomega.Expect(util.DeleteResourceFlavor(ctx, k8sClient, spotTaintedFlavor)).To(gomega.Succeed())
gomega.Expect(util.DeleteResourceFlavor(ctx, k8sClient, spotUntaintedFlavor)).To(gomega.Succeed())
Expand Down Expand Up @@ -1201,6 +1209,21 @@ var _ = ginkgo.Describe("Job controller interacting with scheduler", ginkgo.Orde
gomega.Expect(features.SetEnable(features.PartialAdmission, origPartialAdmission)).To(gomega.Succeed())
})
})

ginkgo.It("Should set the flavor's node selectors if the job is admitted by pods count only", func() {
localQ := testing.MakeLocalQueue("dev-queue", ns.Name).ClusterQueue(podsCountClusterQ.Name).Obj()
gomega.Expect(k8sClient.Create(ctx, localQ)).Should(gomega.Succeed())
ginkgo.By("Creating a job with no requests, will set the resource flavors selectors when admitted ", func() {
job := testingjob.MakeJob("job", ns.Name).
Queue(localQ.Name).
Parallelism(2).
Obj()
gomega.Expect(k8sClient.Create(ctx, job)).Should(gomega.Succeed())
expectJobUnsuspendedWithNodeSelectors(client.ObjectKeyFromObject(job), map[string]string{
instanceKey: "on-demand",
})
})
})
})

func expectJobUnsuspendedWithNodeSelectors(key types.NamespacedName, ns map[string]string) {
Expand Down
85 changes: 78 additions & 7 deletions test/integration/scheduler/scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,12 +79,14 @@ var _ = ginkgo.Describe("Scheduler", func() {

ginkgo.When("Scheduling workloads on clusterQueues", func() {
var (
prodClusterQ *kueue.ClusterQueue
devClusterQ *kueue.ClusterQueue
podsCountClusterQ *kueue.ClusterQueue
prodQueue *kueue.LocalQueue
devQueue *kueue.LocalQueue
podsCountQueue *kueue.LocalQueue
prodClusterQ *kueue.ClusterQueue
devClusterQ *kueue.ClusterQueue
podsCountClusterQ *kueue.ClusterQueue
podsCountOnlyClusterQ *kueue.ClusterQueue
prodQueue *kueue.LocalQueue
devQueue *kueue.LocalQueue
podsCountQueue *kueue.LocalQueue
podsCountOnlyQueue *kueue.LocalQueue
)

ginkgo.BeforeEach(func() {
Expand Down Expand Up @@ -118,6 +120,15 @@ var _ = ginkgo.Describe("Scheduler", func() {
Obj()
gomega.Expect(k8sClient.Create(ctx, podsCountClusterQ)).Should(gomega.Succeed())

podsCountOnlyClusterQ = testing.MakeClusterQueue("pods-count-only-cq").
ResourceGroup(
*testing.MakeFlavorQuotas("on-demand").
Resource(corev1.ResourcePods, "5").
Obj(),
).
Obj()
gomega.Expect(k8sClient.Create(ctx, podsCountOnlyClusterQ)).Should(gomega.Succeed())

prodQueue = testing.MakeLocalQueue("prod-queue", ns.Name).ClusterQueue(prodClusterQ.Name).Obj()
gomega.Expect(k8sClient.Create(ctx, prodQueue)).Should(gomega.Succeed())

Expand All @@ -126,13 +137,17 @@ var _ = ginkgo.Describe("Scheduler", func() {

podsCountQueue = testing.MakeLocalQueue("pods-count-queue", ns.Name).ClusterQueue(podsCountClusterQ.Name).Obj()
gomega.Expect(k8sClient.Create(ctx, podsCountQueue)).Should(gomega.Succeed())

podsCountOnlyQueue = testing.MakeLocalQueue("pods-count-only-queue", ns.Name).ClusterQueue(podsCountOnlyClusterQ.Name).Obj()
gomega.Expect(k8sClient.Create(ctx, podsCountOnlyQueue)).Should(gomega.Succeed())
})

ginkgo.AfterEach(func() {
gomega.Expect(util.DeleteNamespace(ctx, k8sClient, ns)).To(gomega.Succeed())
util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, prodClusterQ, true)
util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, devClusterQ, true)
util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, podsCountClusterQ, true)
util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, podsCountOnlyClusterQ, true)
util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, onDemandFlavor, true)
util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, spotTaintedFlavor, true)
util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, spotUntaintedFlavor, true)
Expand Down Expand Up @@ -172,7 +187,7 @@ var _ = ginkgo.Describe("Scheduler", func() {
util.ExpectAdmittedWorkloadsTotalMetric(prodClusterQ, 2)
})

ginkgo.It("Should admit workloads as NumPodsResourceName allows it", func() {
ginkgo.It("Should admit workloads as number of pods allows it", func() {
wl1 := testing.MakeWorkload("wl1", ns.Name).
Queue(podsCountQueue.Name).
PodSets(*testing.MakePodSet("main", 3).
Expand Down Expand Up @@ -232,6 +247,62 @@ var _ = ginkgo.Describe("Scheduler", func() {
})
})

ginkgo.It("Should admit workloads as the number of pods (only) allows it", func() {
wl1 := testing.MakeWorkload("wl1", ns.Name).
Queue(podsCountOnlyQueue.Name).
PodSets(*testing.MakePodSet("main", 3).
Obj()).
Obj()

ginkgo.By("checking the first workload gets created and admitted", func() {
gomega.Expect(k8sClient.Create(ctx, wl1)).Should(gomega.Succeed())
wl1Admission := testing.MakeAdmission(podsCountOnlyClusterQ.Name).
Assignment(corev1.ResourcePods, "on-demand", "3").
AssignmentPodCount(3).
Obj()
util.ExpectWorkloadToBeAdmittedAs(ctx, k8sClient, wl1, wl1Admission)
util.ExpectPendingWorkloadsMetric(podsCountOnlyClusterQ, 0, 0)
util.ExpectAdmittedActiveWorkloadsMetric(podsCountOnlyClusterQ, 1)
util.ExpectAdmittedWorkloadsTotalMetric(podsCountOnlyClusterQ, 1)
})

wl2 := testing.MakeWorkload("wl2", ns.Name).
Queue(podsCountOnlyQueue.Name).
PodSets(*testing.MakePodSet("main", 3).
Obj()).
Obj()

wl3 := testing.MakeWorkload("wl3", ns.Name).
Queue(podsCountOnlyQueue.Name).
PodSets(*testing.MakePodSet("main", 2).
Obj()).
Obj()

ginkgo.By("creating the next two workloads", func() {
gomega.Expect(k8sClient.Create(ctx, wl2)).Should(gomega.Succeed())
gomega.Expect(k8sClient.Create(ctx, wl3)).Should(gomega.Succeed())
})

ginkgo.By("checking the second workload is pending and the third admitted", func() {
util.ExpectWorkloadsToBeAdmitted(ctx, k8sClient, podsCountOnlyClusterQ.Name, wl1, wl3)
util.ExpectWorkloadsToBePending(ctx, k8sClient, wl2)
util.ExpectPendingWorkloadsMetric(podsCountOnlyClusterQ, 0, 1)
util.ExpectAdmittedActiveWorkloadsMetric(podsCountOnlyClusterQ, 2)
util.ExpectAdmittedWorkloadsTotalMetric(podsCountOnlyClusterQ, 2)
})

ginkgo.By("finishing the first workload", func() {
util.FinishWorkloads(ctx, k8sClient, wl1)
})

ginkgo.By("checking the second workload is also admitted", func() {
util.ExpectWorkloadsToBeAdmitted(ctx, k8sClient, podsCountOnlyClusterQ.Name, wl2, wl3)
util.ExpectPendingWorkloadsMetric(podsCountOnlyClusterQ, 0, 0)
util.ExpectAdmittedActiveWorkloadsMetric(podsCountOnlyClusterQ, 2)
util.ExpectAdmittedWorkloadsTotalMetric(podsCountOnlyClusterQ, 3)
})
})

ginkgo.It("Should admit workloads according to their priorities", func() {
queue := testing.MakeLocalQueue("queue", ns.Name).ClusterQueue(prodClusterQ.Name).Obj()

Expand Down

0 comments on commit b463573

Please sign in to comment.