New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Use worker nodes for WaitForStableCluster() #92450
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -53,7 +53,7 @@ const ( | |
var localStorageVersion = utilversion.MustParseSemantic("v1.8.0-beta.0") | ||
|
||
// variable set in BeforeEach, never modified afterwards | ||
var masterNodes sets.String | ||
var workerNodes sets.String | ||
|
||
type pausePodConfig struct { | ||
Name string | ||
|
@@ -95,17 +95,14 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { | |
|
||
framework.AllNodesReady(cs, time.Minute) | ||
|
||
// NOTE: Here doesn't get nodeList for supporting a master nodes which can host workload pods. | ||
masterNodes, _, err = e2enode.GetMasterAndWorkerNodes(cs) | ||
if err != nil { | ||
framework.Logf("Unexpected error occurred: %v", err) | ||
} | ||
nodeList, err = e2enode.GetReadySchedulableNodes(cs) | ||
if err != nil { | ||
framework.Logf("Unexpected error occurred: %v", err) | ||
} | ||
|
||
framework.ExpectNoErrorWithOffset(0, err) | ||
for _, n := range nodeList.Items { | ||
workerNodes.Insert(n.Name) | ||
} | ||
|
||
err = framework.CheckTestingNSDeletedExcept(cs, ns) | ||
framework.ExpectNoError(err) | ||
|
@@ -135,7 +132,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { | |
nodeMaxAllocatable = allocatable.Value() | ||
} | ||
} | ||
WaitForStableCluster(cs, masterNodes) | ||
WaitForStableCluster(cs, workerNodes) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. How about updating the parameter There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Nice point, I forgot to update it (^^;) |
||
|
||
pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) | ||
framework.ExpectNoError(err) | ||
|
@@ -215,7 +212,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { | |
var beardsecond v1.ResourceName = "example.com/beardsecond" | ||
|
||
ginkgo.BeforeEach(func() { | ||
WaitForStableCluster(cs, masterNodes) | ||
WaitForStableCluster(cs, workerNodes) | ||
ginkgo.By("Add RuntimeClass and fake resource") | ||
|
||
// find a node which can run a pod: | ||
|
@@ -323,7 +320,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { | |
Description: Scheduling Pods MUST fail if the resource requests exceed Machine capacity. | ||
*/ | ||
framework.ConformanceIt("validates resource limits of pods that are allowed to run ", func() { | ||
WaitForStableCluster(cs, masterNodes) | ||
WaitForStableCluster(cs, workerNodes) | ||
nodeMaxAllocatable := int64(0) | ||
nodeToAllocatableMap := make(map[string]int64) | ||
for _, node := range nodeList.Items { | ||
|
@@ -436,7 +433,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { | |
ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.") | ||
podName := "restricted-pod" | ||
|
||
WaitForStableCluster(cs, masterNodes) | ||
WaitForStableCluster(cs, workerNodes) | ||
|
||
conf := pausePodConfig{ | ||
Name: podName, | ||
|
@@ -491,7 +488,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { | |
ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.") | ||
podName := "restricted-pod" | ||
|
||
WaitForStableCluster(cs, masterNodes) | ||
WaitForStableCluster(cs, workerNodes) | ||
|
||
conf := pausePodConfig{ | ||
Name: podName, | ||
|
@@ -933,7 +930,7 @@ func WaitForSchedulerAfterAction(f *framework.Framework, action Action, ns, podN | |
func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) { | ||
allPods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) | ||
framework.ExpectNoError(err) | ||
scheduledPods, notScheduledPods := GetPodsScheduled(masterNodes, allPods) | ||
scheduledPods, notScheduledPods := GetPodsScheduled(workerNodes, allPods) | ||
|
||
framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)) | ||
framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)) | ||
|
@@ -1045,10 +1042,10 @@ func translateIPv4ToIPv6(ip string) string { | |
return ip | ||
} | ||
|
||
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods. | ||
func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) { | ||
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods on worker nodes. | ||
func GetPodsScheduled(workerNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) { | ||
for _, pod := range pods.Items { | ||
if !masterNodes.Has(pod.Spec.NodeName) { | ||
if workerNodes.Has(pod.Spec.NodeName) { | ||
if pod.Spec.NodeName != "" { | ||
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) | ||
framework.ExpectEqual(scheduledCondition != nil, true) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can we remove e2enode.GetMasterAndWorkerNodes(), or mark it as deprecated?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go still calls e2enode.GetMasterAndWorkerNodes() and it takes more time for removing that.
It is a good idea to make it as deprecated.
I will update this PR soon.