Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor: use framework.ExpectNoError instead in e2e tests #77159

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
360 changes: 180 additions & 180 deletions test/e2e/apimachinery/resource_quota.go

Large diffs are not rendered by default.

1 change: 0 additions & 1 deletion test/e2e/instrumentation/logging/elasticsearch/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ go_library(
"//test/e2e/instrumentation/common:go_default_library",
"//test/e2e/instrumentation/logging/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
],
)

Expand Down
9 changes: 4 additions & 5 deletions test/e2e/instrumentation/logging/elasticsearch/kibana.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import (
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/wait"
)

Expand Down Expand Up @@ -67,17 +66,17 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
}
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

// Wait for the Kibana pod(s) to enter the running state.
ginkgo.By("Checking to make sure the Kibana pods are running")
label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
for _, pod := range pods.Items {
err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}

ginkgo.By("Checking to make sure we get a response from the Kibana UI.")
Expand All @@ -101,5 +100,5 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
}
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
20 changes: 10 additions & 10 deletions test/e2e/kubectl/kubectl.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ func runKubectlRetryOrDie(args ...string) string {
// Expect no errors to be present after retries are finished
// Copied from framework #ExecOrDie
framework.Logf("stdout: %q", output)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
return output
}

Expand Down Expand Up @@ -936,7 +936,7 @@ metadata:
*/
framework.ConformanceIt("should check if kubectl describe prints relevant information for rc and pods ", func() {
kv, err := framework.KubectlVersion()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
framework.SkipUnlessServerVersionGTE(kv, c.Discovery())
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename)))
serviceJSON := readTestFileOrDie(redisServiceFilename)
Expand Down Expand Up @@ -1002,7 +1002,7 @@ metadata:
// Node
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
node := nodes.Items[0]
output = framework.RunKubectlOrDie("describe", "node", node.Name)
requiredStrings = [][]string{
Expand Down Expand Up @@ -1091,10 +1091,10 @@ metadata:
}
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

service, err := c.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

if len(service.Spec.Ports) != 1 {
framework.Failf("1 port is expected")
Expand Down Expand Up @@ -1238,7 +1238,7 @@ metadata:
forEachPod(func(pod v1.Pod) {
ginkgo.By("checking for a matching strings")
_, err := framework.LookForStringInLog(ns, pod.Name, containerName, "The server is now ready to accept connections", framework.PodStartTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("limiting log lines")
out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--tail=1")
Expand Down Expand Up @@ -1486,7 +1486,7 @@ metadata:
}
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})

/*
Expand Down Expand Up @@ -1703,7 +1703,7 @@ metadata:
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))

err := jobutil.WaitForJobGone(c, ns, jobName, wait.ForeverTestTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("verifying the job " + jobName + " was deleted")
_, err = c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
Expand Down Expand Up @@ -2064,7 +2064,7 @@ func validateGuestbookApp(c clientset.Interface, ns string) {
framework.Logf("Waiting for all frontend pods to be Running.")
label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
framework.Logf("Waiting for frontend to serve content.")
if !waitForGuestbookResponse(c, "get", "", `{"data": ""}`, guestbookStartupTimeout, ns) {
framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds())
Expand Down Expand Up @@ -2149,7 +2149,7 @@ func forEachReplicationController(c clientset.Interface, ns, selectorKey, select
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
options := metav1.ListOptions{LabelSelector: label.String()}
rcs, err = c.CoreV1().ReplicationControllers(ns).List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
if len(rcs.Items) > 0 {
break
}
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/servicecatalog/podpreset.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
if errors.IsNotFound(err) {
framework.Skipf("podpresets requires k8s.io/api/settings/v1alpha1 to be enabled")
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("creating the pod")
name := "pod-preset-pod"
Expand Down Expand Up @@ -195,7 +195,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
if errors.IsNotFound(err) {
framework.Skipf("podpresets requires k8s.io/api/settings/v1alpha1 to be enabled")
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("creating the pod")
name := "pod-preset-pod"
Expand Down
1 change: 0 additions & 1 deletion test/e2e/ui/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
],
)

Expand Down
7 changes: 3 additions & 4 deletions test/e2e/ui/dashboard.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ import (
testutils "k8s.io/kubernetes/test/utils"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)

var _ = SIGDescribe("Kubernetes Dashboard", func() {
Expand All @@ -52,12 +51,12 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() {
ginkgo.It("should check that the kubernetes-dashboard instance is alive", func() {
ginkgo.By("Checking whether the kubernetes-dashboard service exists.")
err := framework.WaitForService(f.ClientSet, uiNamespace, uiServiceName, true, framework.Poll, framework.ServiceStartTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Checking to make sure the kubernetes-dashboard pods are running")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": uiAppName}))
err = testutils.WaitForPodsWithLabelRunning(f.ClientSet, uiNamespace, selector)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

ginkgo.By("Checking to make sure we get a response from the kubernetes-dashboard.")
err = wait.Poll(framework.Poll, serverStartTimeout, func() (bool, error) {
Expand Down Expand Up @@ -90,6 +89,6 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() {
// Don't return err here as it aborts polling.
return status == http.StatusOK, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
})
8 changes: 4 additions & 4 deletions test/e2e/upgrades/cassandra.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
}
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
framework.Logf("Service endpoint is up")

ginkgo.By("Adding 2 dummy users")
Expand All @@ -105,7 +105,7 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {

ginkgo.By("Verifying that the users exist")
users, err := t.listUsers()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(len(users)).To(gomega.Equal(2))
}

Expand Down Expand Up @@ -151,7 +151,7 @@ func (t *CassandraUpgradeTest) addUser(name string) error {
// getServiceIP is a helper method to extract the Ingress IP from the service.
func (t *CassandraUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 {
return ""
Expand Down Expand Up @@ -212,6 +212,6 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
// Teardown does one final check of the data's availability.
func (t *CassandraUpgradeTest) Teardown(f *framework.Framework) {
users, err := t.listUsers()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(len(users) >= t.successfulWrites).To(gomega.BeTrue())
}
8 changes: 4 additions & 4 deletions test/e2e/upgrades/etcd.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
}
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
framework.Logf("Service endpoint is up")

ginkgo.By("Adding 2 dummy users")
Expand All @@ -100,7 +100,7 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {

ginkgo.By("Verifying that the users exist")
users, err := t.listUsers()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(len(users)).To(gomega.Equal(2))
}

Expand Down Expand Up @@ -143,7 +143,7 @@ func (t *EtcdUpgradeTest) addUser(name string) error {

func (t *EtcdUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 {
return ""
Expand Down Expand Up @@ -200,6 +200,6 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg
// Teardown does one final check of the data's availability.
func (t *EtcdUpgradeTest) Teardown(f *framework.Framework) {
users, err := t.listUsers()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(len(users) >= t.successfulWrites).To(gomega.BeTrue())
}
8 changes: 4 additions & 4 deletions test/e2e/upgrades/mysql.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ func mysqlKubectlCreate(ns, file string) {

func (t *MySQLUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 {
return ""
Expand Down Expand Up @@ -105,7 +105,7 @@ func (t *MySQLUpgradeTest) Setup(f *framework.Framework) {
}
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
framework.Logf("Service endpoint is up")

ginkgo.By("Adding 2 names to the database")
Expand All @@ -114,7 +114,7 @@ func (t *MySQLUpgradeTest) Setup(f *framework.Framework) {

ginkgo.By("Verifying that the 2 names have been inserted")
count, err := t.countNames()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(count).To(gomega.Equal(2))
}

Expand Down Expand Up @@ -166,7 +166,7 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up
// Teardown performs one final check of the data's availability.
func (t *MySQLUpgradeTest) Teardown(f *framework.Framework) {
count, err := t.countNames()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(count >= t.successfulWrites).To(gomega.BeTrue())
}

Expand Down
10 changes: 5 additions & 5 deletions test/e2e/upgrades/nvidia-gpu.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func (t *NvidiaGPUUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
if upgrade == MasterUpgrade || upgrade == ClusterUpgrade {
// MasterUpgrade should be totally hitless.
job, err := jobutil.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(job.Status.Failed).To(gomega.BeZero(), "Job pods failed during master upgrade: %v", job.Status.Failed)
}
}
Expand Down Expand Up @@ -88,11 +88,11 @@ func (t *NvidiaGPUUpgradeTest) startJob(f *framework.Framework) {
}
ns := f.Namespace.Name
_, err := jobutil.CreateJob(f.ClientSet, ns, testJob)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
framework.Logf("Created job %v", testJob)
ginkgo.By("Waiting for gpu job pod start")
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, ns, testJob.Name, 1)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Done with gpu job pod start")
}

Expand All @@ -101,9 +101,9 @@ func (t *NvidiaGPUUpgradeTest) verifyJobPodSuccess(f *framework.Framework) {
// Wait for client pod to complete.
ns := f.Namespace.Name
err := jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, "cuda-add", 1)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, "cuda-add")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
createdPod := pods.Items[0].Name
framework.Logf("Created pod %v", createdPod)
f.PodClient().WaitForSuccess(createdPod, 5*time.Minute)
Expand Down
8 changes: 4 additions & 4 deletions test/e2e/upgrades/sysctl.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,14 +55,14 @@ func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, u
case MasterUpgrade, ClusterUpgrade:
ginkgo.By("Checking the safe sysctl pod keeps running on master upgrade")
pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodRunning))
}

ginkgo.By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade")
pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(t.invalidPod.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
if err == nil {
gomega.Expect(pod.Status.Phase).NotTo(gomega.Equal(v1.PodRunning))
Expand All @@ -86,7 +86,7 @@ func (t *SysctlUpgradeTest) verifySafeSysctlWork(f *framework.Framework) *v1.Pod

ginkgo.By("Making sure the valid pod launches")
ev, err := f.PodClient().WaitForErrorEventOrSuccess(t.validPod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
Expand All @@ -104,7 +104,7 @@ func (t *SysctlUpgradeTest) verifyUnsafeSysctlsAreRejected(f *framework.Framewor

ginkgo.By("Making sure the invalid pod failed")
ev, err := f.PodClient().WaitForErrorEventOrSuccess(invalidPod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/windows/density.go
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
defer wg.Done()

err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)

gomega.Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
30*time.Second, 10*time.Minute)).NotTo(gomega.HaveOccurred())
Expand Down