Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use worker nodes for WaitForStableCluster() #92450

Merged
merged 1 commit into from Jun 25, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
5 changes: 3 additions & 2 deletions test/e2e/framework/node/resource.go
Expand Up @@ -363,8 +363,9 @@ func GetReadyNodesIncludingTainted(c clientset.Interface) (nodes *v1.NodeList, e
return nodes, nil
}

// GetMasterAndWorkerNodes will return a list masters and schedulable worker nodes
func GetMasterAndWorkerNodes(c clientset.Interface) (sets.String, *v1.NodeList, error) {
// DeprecatedGetMasterAndWorkerNodes will return a list masters and schedulable worker nodes
// NOTE: This function has been deprecated because of calling DeprecatedMightBeMasterNode().
func DeprecatedGetMasterAndWorkerNodes(c clientset.Interface) (sets.String, *v1.NodeList, error) {
nodes := &v1.NodeList{}
masters := sets.NewString()
all, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
Expand Down
10 changes: 5 additions & 5 deletions test/e2e/scheduling/framework.go
Expand Up @@ -41,10 +41,10 @@ func SIGDescribe(text string, body func()) bool {
}

// WaitForStableCluster waits until all existing pods are scheduled and returns their amount.
func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
func WaitForStableCluster(c clientset.Interface, workerNodes sets.String) int {
startTime := time.Now()
// Wait for all pods to be scheduled.
allScheduledPods, allNotScheduledPods := getScheduledAndUnscheduledPods(c, masterNodes, metav1.NamespaceAll)
allScheduledPods, allNotScheduledPods := getScheduledAndUnscheduledPods(c, workerNodes, metav1.NamespaceAll)
for len(allNotScheduledPods) != 0 {
time.Sleep(waitTime)
if startTime.Add(timeout).Before(time.Now()) {
Expand All @@ -55,7 +55,7 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
framework.Failf("Timed out after %v waiting for stable cluster.", timeout)
break
}
allScheduledPods, allNotScheduledPods = getScheduledAndUnscheduledPods(c, masterNodes, metav1.NamespaceAll)
allScheduledPods, allNotScheduledPods = getScheduledAndUnscheduledPods(c, workerNodes, metav1.NamespaceAll)
}
return len(allScheduledPods)
}
Expand All @@ -79,7 +79,7 @@ func WaitForPodsToBeDeleted(c clientset.Interface) {
}

// getScheduledAndUnscheduledPods lists scheduled and not scheduled pods in the given namespace, with succeeded and failed pods filtered out.
func getScheduledAndUnscheduledPods(c clientset.Interface, masterNodes sets.String, ns string) (scheduledPods, notScheduledPods []v1.Pod) {
func getScheduledAndUnscheduledPods(c clientset.Interface, workerNodes sets.String, ns string) (scheduledPods, notScheduledPods []v1.Pod) {
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, fmt.Sprintf("listing all pods in namespace %q while waiting for stable cluster", ns))
// API server returns also Pods that succeeded. We need to filter them out.
Expand All @@ -90,7 +90,7 @@ func getScheduledAndUnscheduledPods(c clientset.Interface, masterNodes sets.Stri
}
}
pods.Items = filteredPods
return GetPodsScheduled(masterNodes, pods)
return GetPodsScheduled(workerNodes, pods)
}

// getDeletingPods returns whether there are any pods marked for deletion.
Expand Down
29 changes: 13 additions & 16 deletions test/e2e/scheduling/predicates.go
Expand Up @@ -53,7 +53,7 @@ const (
var localStorageVersion = utilversion.MustParseSemantic("v1.8.0-beta.0")

// variable set in BeforeEach, never modified afterwards
var masterNodes sets.String
var workerNodes sets.String

type pausePodConfig struct {
Name string
Expand Down Expand Up @@ -95,17 +95,14 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {

framework.AllNodesReady(cs, time.Minute)

// NOTE: Here doesn't get nodeList for supporting a master nodes which can host workload pods.
masterNodes, _, err = e2enode.GetMasterAndWorkerNodes(cs)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we remove e2enode.GetMasterAndWorkerNodes(), or mark it as deprecated?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go still calls e2enode.GetMasterAndWorkerNodes() and it takes more time for removing that.
It is a good idea to make it as deprecated.
I will update this PR soon.

if err != nil {
framework.Logf("Unexpected error occurred: %v", err)
}
nodeList, err = e2enode.GetReadySchedulableNodes(cs)
if err != nil {
framework.Logf("Unexpected error occurred: %v", err)
}

framework.ExpectNoErrorWithOffset(0, err)
for _, n := range nodeList.Items {
workerNodes.Insert(n.Name)
}

err = framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
Expand Down Expand Up @@ -135,7 +132,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
nodeMaxAllocatable = allocatable.Value()
}
}
WaitForStableCluster(cs, masterNodes)
WaitForStableCluster(cs, workerNodes)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How about updating the parameter masterNodes of WaitForStableCluster() and getScheduledAndUnscheduledPods to workerNodes?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nice point, I forgot to update it (^^;)


pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
Expand Down Expand Up @@ -215,7 +212,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
var beardsecond v1.ResourceName = "example.com/beardsecond"

ginkgo.BeforeEach(func() {
WaitForStableCluster(cs, masterNodes)
WaitForStableCluster(cs, workerNodes)
ginkgo.By("Add RuntimeClass and fake resource")

// find a node which can run a pod:
Expand Down Expand Up @@ -323,7 +320,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
Description: Scheduling Pods MUST fail if the resource requests exceed Machine capacity.
*/
framework.ConformanceIt("validates resource limits of pods that are allowed to run ", func() {
WaitForStableCluster(cs, masterNodes)
WaitForStableCluster(cs, workerNodes)
nodeMaxAllocatable := int64(0)
nodeToAllocatableMap := make(map[string]int64)
for _, node := range nodeList.Items {
Expand Down Expand Up @@ -436,7 +433,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
podName := "restricted-pod"

WaitForStableCluster(cs, masterNodes)
WaitForStableCluster(cs, workerNodes)

conf := pausePodConfig{
Name: podName,
Expand Down Expand Up @@ -491,7 +488,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
podName := "restricted-pod"

WaitForStableCluster(cs, masterNodes)
WaitForStableCluster(cs, workerNodes)

conf := pausePodConfig{
Name: podName,
Expand Down Expand Up @@ -933,7 +930,7 @@ func WaitForSchedulerAfterAction(f *framework.Framework, action Action, ns, podN
func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) {
allPods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
scheduledPods, notScheduledPods := GetPodsScheduled(masterNodes, allPods)
scheduledPods, notScheduledPods := GetPodsScheduled(workerNodes, allPods)

framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))
framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))
Expand Down Expand Up @@ -1045,10 +1042,10 @@ func translateIPv4ToIPv6(ip string) string {
return ip
}

// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods on worker nodes.
func GetPodsScheduled(workerNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
for _, pod := range pods.Items {
if !masterNodes.Has(pod.Spec.NodeName) {
if workerNodes.Has(pod.Spec.NodeName) {
if pod.Spec.NodeName != "" {
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
framework.ExpectEqual(scheduledCondition != nil, true)
Expand Down
5 changes: 4 additions & 1 deletion test/e2e/scheduling/preemption.go
Expand Up @@ -97,11 +97,14 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
}

e2enode.WaitForTotalHealthy(cs, time.Minute)
masterNodes, nodeList, err = e2enode.GetMasterAndWorkerNodes(cs)
nodeList, err = e2enode.GetReadySchedulableNodes(cs)
if err != nil {
framework.Logf("Unexpected error occurred: %v", err)
}
framework.ExpectNoErrorWithOffset(0, err)
for _, n := range nodeList.Items {
workerNodes.Insert(n.Name)
}

err = framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/scheduling/priorities.go
Expand Up @@ -144,7 +144,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
var err error

e2enode.WaitForTotalHealthy(cs, time.Minute)
_, nodeList, err = e2enode.GetMasterAndWorkerNodes(cs)
nodeList, err = e2enode.GetReadySchedulableNodes(cs)
if err != nil {
framework.Logf("Unexpected error occurred: %v", err)
}
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go
Expand Up @@ -94,7 +94,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
scParameters = make(map[string]string)
_, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err)
masternodes, _, err := e2enode.GetMasterAndWorkerNodes(client)
masternodes, _, err := e2enode.DeprecatedGetMasterAndWorkerNodes(client)
framework.ExpectNoError(err)
gomega.Expect(masternodes).NotTo(gomega.BeEmpty())
masterNode = masternodes.List()[0]
Expand Down