Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

use framework.ExpectNoError() for daemon_set.go and deployment.go #77579

Merged
merged 1 commit into from
May 14, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
72 changes: 36 additions & 36 deletions test/e2e/apps/daemon_set.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
e2elog.Logf("unable to dump pods: %v", err)
}
err = clearDaemonSetNodeLabels(f.ClientSet)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})

f = framework.NewDefaultFramework("daemonsets")
Expand All @@ -106,12 +106,12 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
c = f.ClientSet

updatedNS, err := updateNamespaceAnnotations(c, ns)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ns = updatedNS.Name

err = clearDaemonSetNodeLabels(c)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})

/*
Expand All @@ -124,19 +124,19 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {

ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = checkDaemonStatus(f, dsName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.")
podList := listDaemonPods(c, ns, label)
pod := podList.Items[0]
err = c.CoreV1().Pods(ns).Delete(pod.Name, nil)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to revive")
})
Expand All @@ -153,7 +153,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ds := newDaemonSet(dsName, image, complexLabel)
ds.Spec.Template.Spec.NodeSelector = nodeSelector
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Initially, daemon pods should not be running on any nodes.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
Expand All @@ -169,14 +169,14 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
err = checkDaemonStatus(f, dsName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled")
nodeSelector[daemonsetColorLabel] = "green"
greenNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
framework.ExpectNoError(err, "error removing labels on node")
gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")

ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`,
Expand All @@ -188,7 +188,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
err = checkDaemonStatus(f, dsName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})

// We defer adding this test to conformance pending the disposition of moving DaemonSet scheduling logic to the
Expand Down Expand Up @@ -216,7 +216,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
},
}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Initially, daemon pods should not be running on any nodes.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
Expand All @@ -232,13 +232,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
err = checkDaemonStatus(f, dsName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Remove the node label and wait for daemons to be unscheduled")
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
framework.ExpectNoError(err, "error removing labels on node")
gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
})

/*
Expand All @@ -250,13 +250,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {

ginkgo.By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = checkDaemonStatus(f, dsName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.")
podList := listDaemonPods(c, ns, label)
Expand All @@ -282,15 +282,15 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.OnDeleteDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start")

// Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 1)
first := curHistory(listDaemonHistories(c, ns, label), ds)
firstHash := first.Labels[apps.DefaultDaemonSetUniqueLabelKey]
Expand All @@ -300,19 +300,19 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.By("Update daemon pods image.")
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Check that daemon pods images aren't updated.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start")

// Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 2)
cur := curHistory(listDaemonHistories(c, ns, label), ds)
gomega.Expect(cur.Revision).To(gomega.Equal(int64(2)))
Expand All @@ -331,15 +331,15 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start")

// Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 1)
cur := curHistory(listDaemonHistories(c, ns, label), ds)
hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
Expand All @@ -349,26 +349,26 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.By("Update daemon pods image.")
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

// Time to complete the rolling upgrade is proportional to the number of nodes in the cluster.
// Get the number of nodes, and set the timeout appropriately.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
nodeCount := len(nodes.Items)
retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second

ginkgo.By("Check that daemon pods images are updated.")
err = wait.PollImmediate(dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start")

// Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 2)
cur = curHistory(listDaemonHistories(c, ns, label), ds)
hash = cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
Expand All @@ -389,7 +389,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

e2elog.Logf("Check that daemon pods launch on every node of the cluster")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
Expand All @@ -401,11 +401,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

// Make sure we're in the middle of a rollout
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

pods := listDaemonPods(c, ns, label)
var existingPods, newPods []*v1.Pod
Expand Down Expand Up @@ -433,11 +433,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
update.Spec.Template.Spec.Containers[0].Image = image
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

e2elog.Logf("Make sure DaemonSet rollback is complete")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

// After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted
pods = listDaemonPods(c, ns, label)
Expand Down Expand Up @@ -487,7 +487,7 @@ func listDaemonPods(c clientset.Interface, ns string, label map[string]string) *
selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.CoreV1().Pods(ns).List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(len(podList.Items)).To(gomega.BeNumerically(">", 0))
return podList
}
Expand Down Expand Up @@ -748,7 +748,7 @@ func listDaemonHistories(c clientset.Interface, ns string, label map[string]stri
selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
historyList, err := c.AppsV1().ControllerRevisions(ns).List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(len(historyList.Items)).To(gomega.BeNumerically(">", 0))
return historyList
}
Expand All @@ -761,7 +761,7 @@ func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *a
// Every history should have the hash label
gomega.Expect(len(history.Labels[apps.DefaultDaemonSetUniqueLabelKey])).To(gomega.BeNumerically(">", 0))
match, err := daemon.Match(ds, history)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
if match {
curHistory = history
foundCurHistories++
Expand Down