Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: use framework.ExpectNoError instead of gomega #78478

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
10 changes: 5 additions & 5 deletions test/e2e/apimachinery/chunking.go
Expand Up @@ -81,7 +81,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
for {
opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1)
list, err := client.List(opts)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
e2elog.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit))

Expand Down Expand Up @@ -110,7 +110,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
ginkgo.By("retrieving those results all at once")
opts := metav1.ListOptions{Limit: numberOfTotalResources + 1}
list, err := client.List(opts)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
gomega.Expect(list.Items).To(gomega.HaveLen(numberOfTotalResources))
})

Expand All @@ -124,7 +124,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
opts := metav1.ListOptions{}
opts.Limit = oneTenth
list, err := client.List(opts)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
firstToken := list.Continue
firstRV := list.ResourceVersion
if list.GetContinue() == "" {
Expand Down Expand Up @@ -163,7 +163,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
ginkgo.By("retrieving the second page again with the token received with the error message")
opts.Continue = inconsistentToken
list, err = client.List(opts)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given inconsistent continue token %s and limit: %d", ns, opts.Continue, opts.Limit)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given inconsistent continue token %s and limit: %d", ns, opts.Continue, opts.Limit)
gomega.Expect(list.ResourceVersion).ToNot(gomega.Equal(firstRV))
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("==", opts.Limit))
found := int(oneTenth)
Expand All @@ -183,7 +183,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
lastRV := list.ResourceVersion
for {
list, err := client.List(opts)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
if list.GetContinue() == "" {
gomega.Expect(list.GetRemainingItemCount()).To(gomega.BeNil())
} else {
Expand Down
12 changes: 6 additions & 6 deletions test/e2e/storage/drivers/in_tree.go
Expand Up @@ -859,13 +859,13 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, v
}
// h.prepPod will be reused in cleanupDriver.
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(prepPod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating hostPath init pod")
framework.ExpectNoError(err, "while creating hostPath init pod")

err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for hostPath init pod to succeed")
framework.ExpectNoError(err, "while waiting for hostPath init pod to succeed")

err = framework.DeletePodWithWait(f, f.ClientSet, pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while deleting hostPath init pod")
framework.ExpectNoError(err, "while deleting hostPath init pod")
return &hostPathSymlinkVolume{
sourcePath: sourcePath,
targetPath: targetPath,
Expand All @@ -881,13 +881,13 @@ func (v *hostPathSymlinkVolume) DeleteVolume() {
v.prepPod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", cmd}

pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(v.prepPod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating hostPath teardown pod")
framework.ExpectNoError(err, "while creating hostPath teardown pod")

err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for hostPath teardown pod to succeed")
framework.ExpectNoError(err, "while waiting for hostPath teardown pod to succeed")

err = framework.DeletePodWithWait(f, f.ClientSet, pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while deleting hostPath teardown pod")
framework.ExpectNoError(err, "while deleting hostPath teardown pod")
}

// emptydir
Expand Down
10 changes: 5 additions & 5 deletions test/e2e/storage/regional_pd.go
Expand Up @@ -221,13 +221,13 @@ func testZonalFailover(c clientset.Interface, ns string) {
pod := getPod(c, ns, regionalPDLabels)
nodeName := pod.Spec.NodeName
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
podZone := node.Labels[v1.LabelZoneFailureDomain]

ginkgo.By("tainting nodes in the zone the pod is scheduled in")
selector := labels.SelectorFromSet(labels.Set(map[string]string{v1.LabelZoneFailureDomain: podZone}))
nodesInZone, err := c.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: selector.String()})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
removeTaintFunc := addTaint(c, ns, nodesInZone.Items, podZone)

defer func() {
Expand Down Expand Up @@ -305,13 +305,13 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string)
reversePatches[node.Name] = reversePatchBytes

_, err = c.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
}

return func() {
for nodeName, reversePatch := range reversePatches {
_, err := c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, reversePatch)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
}
}
Expand Down Expand Up @@ -534,7 +534,7 @@ func newPodTemplate(labels map[string]string) *v1.PodTemplateSpec {

func getTwoRandomZones(c clientset.Interface) []string {
zones, err := framework.GetClusterZones(c)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(zones.Len()).To(gomega.BeNumerically(">=", 2),
"The test should only be run in multizone clusters.")

Expand Down
35 changes: 17 additions & 18 deletions test/e2e/storage/testsuites/subpath.go
Expand Up @@ -36,7 +36,6 @@ import (
"time"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)

var (
Expand Down Expand Up @@ -151,7 +150,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
if l.pod != nil {
ginkgo.By("Deleting pod")
err := framework.DeletePodWithWait(f, f.ClientSet, l.pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while deleting pod")
framework.ExpectNoError(err, "while deleting pod")
l.pod = nil
}

Expand Down Expand Up @@ -427,20 +426,20 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
ginkgo.By(fmt.Sprintf("Creating pod %s", l.pod.Name))
removeUnusedContainers(l.pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(l.pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating pod")
framework.ExpectNoError(err, "while creating pod")
defer func() {
ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
framework.DeletePodWithWait(f, f.ClientSet, pod)
}()

// Wait for pod to be running
err = framework.WaitForPodRunningInNamespace(f.ClientSet, l.pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for pod to be running")
framework.ExpectNoError(err, "while waiting for pod to be running")

// Exec into container that mounted the volume, delete subpath directory
rmCmd := fmt.Sprintf("rm -r %s", l.subPathDir)
_, err = podContainerExec(l.pod, 1, rmCmd)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while removing subpath directory")
framework.ExpectNoError(err, "while removing subpath directory")

// Delete pod (from defer) and wait for it to be successfully deleted
})
Expand Down Expand Up @@ -713,7 +712,7 @@ func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg strin
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating pod")
framework.ExpectNoError(err, "while creating pod")
defer func() {
framework.DeletePodWithWait(f, f.ClientSet, pod)
}()
Expand Down Expand Up @@ -792,17 +791,17 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating pod")
framework.ExpectNoError(err, "while creating pod")
defer func() {
framework.DeletePodWithWait(f, f.ClientSet, pod)
}()
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for pod to be running")
framework.ExpectNoError(err, "while waiting for pod to be running")

ginkgo.By("Failing liveness probe")
out, err := podContainerExec(pod, 1, fmt.Sprintf("rm %v", probeFilePath))
e2elog.Logf("Pod exec output: %v", out)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while failing liveness probe")
framework.ExpectNoError(err, "while failing liveness probe")

// Check that container has restarted
ginkgo.By("Waiting for container to restart")
Expand All @@ -824,7 +823,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
}
return false, nil
})
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for container to restart")
framework.ExpectNoError(err, "while waiting for container to restart")

// Fix liveness probe
ginkgo.By("Rewriting the file")
Expand All @@ -836,7 +835,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
}
out, err = podContainerExec(pod, 1, writeCmd)
e2elog.Logf("Pod exec output: %v", out)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while rewriting the probe file")
framework.ExpectNoError(err, "while rewriting the probe file")

// Wait for container restarts to stabilize
ginkgo.By("Waiting for container to stop restarting")
Expand Down Expand Up @@ -865,7 +864,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
}
return false, nil
})
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for container to stabilize")
framework.ExpectNoError(err, "while waiting for container to stabilize")
}

func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) {
Expand All @@ -885,27 +884,27 @@ func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating pod")
framework.ExpectNoError(err, "while creating pod")

err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for pod to be running")
framework.ExpectNoError(err, "while waiting for pod to be running")

pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while getting pod")
framework.ExpectNoError(err, "while getting pod")

utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)
}

func formatVolume(f *framework.Framework, pod *v1.Pod) {
ginkgo.By(fmt.Sprintf("Creating pod to format volume %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating volume init pod")
framework.ExpectNoError(err, "while creating volume init pod")

err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for volume init pod to succeed")
framework.ExpectNoError(err, "while waiting for volume init pod to succeed")

err = framework.DeletePodWithWait(f, f.ClientSet, pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while deleting volume init pod")
framework.ExpectNoError(err, "while deleting volume init pod")
}

func podContainerExec(pod *v1.Pod, containerIndex int, command string) (string, error) {
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/volume_provisioning.go
Expand Up @@ -1230,7 +1230,7 @@ func deleteProvisionedVolumesAndDisks(c clientset.Interface, pvs []*v1.Persisten

func getRandomClusterZone(c clientset.Interface) string {
zones, err := framework.GetClusterZones(c)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(len(zones)).ToNot(gomega.Equal(0))

zonesList := zones.UnsortedList()
Expand Down