Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test/e2e/common/node: enhance assertions #110127

Merged
merged 1 commit into from
May 25, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 3 additions & 1 deletion test/e2e/common/node/configmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,9 @@ var _ = SIGDescribe("ConfigMap", func() {
break
}
}
framework.ExpectEqual(testConfigMapFound, true, "failed to find ConfigMap by label selector")
if !testConfigMapFound {
framework.Failf("failed to find ConfigMap %s/%s by label selector", testNamespaceName, testConfigMap.ObjectMeta.Name)
}

ginkgo.By("deleting the ConfigMap by collection with a label selector")
err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{
Expand Down
12 changes: 9 additions & 3 deletions test/e2e/common/node/container_probe.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,9 @@ var _ = SIGDescribe("Probing container", func() {
framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err)
framework.ExpectEqual(isReady, true, "pod should be ready")
if !isReady {
framework.Failf("pod %s/%s should be ready", f.Namespace.Name, p.Name)
}

// We assume the pod became ready when the container became ready. This
// is true for a single container pod.
Expand Down Expand Up @@ -110,7 +112,9 @@ var _ = SIGDescribe("Probing container", func() {
framework.ExpectNoError(err)

isReady, _ := testutils.PodRunningReady(p)
framework.ExpectNotEqual(isReady, true, "pod should be not ready")
if isReady {
framework.Failf("pod %s/%s should be not ready", f.Namespace.Name, p.Name)
}

restartCount := getRestartCount(p)
framework.ExpectEqual(restartCount, 0, "pod should have a restart count of 0 but got %v", restartCount)
Expand Down Expand Up @@ -430,7 +434,9 @@ var _ = SIGDescribe("Probing container", func() {

isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err)
framework.ExpectEqual(isReady, true, "pod should be ready")
if !isReady {
framework.Failf("pod %s/%s should be ready", f.Namespace.Name, p.Name)
}

readyIn := readyTime.Sub(startedTime)
framework.Logf("Container started at %v, pod became ready at %v, %v after startupProbe succeeded", startedTime, readyTime, readyIn)
Expand Down
8 changes: 6 additions & 2 deletions test/e2e/common/node/init_container.go
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,9 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {

framework.ExpectEqual(len(endPod.Status.InitContainerStatuses), 2)
for _, status := range endPod.Status.InitContainerStatuses {
framework.ExpectEqual(status.Ready, true)
if !status.Ready {
framework.Failf("init container %s should be in Ready status", status.Name)
}
gomega.Expect(status.State.Terminated).NotTo(gomega.BeNil())
gomega.Expect(status.State.Terminated.ExitCode).To(gomega.BeZero())
}
Expand Down Expand Up @@ -312,7 +314,9 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {

framework.ExpectEqual(len(endPod.Status.InitContainerStatuses), 2)
for _, status := range endPod.Status.InitContainerStatuses {
framework.ExpectEqual(status.Ready, true)
if !status.Ready {
framework.Failf("init container %s should be in Ready status", status.Name)
}
jwtty marked this conversation as resolved.
Show resolved Hide resolved
gomega.Expect(status.State.Terminated).NotTo(gomega.BeNil())
gomega.Expect(status.State.Terminated.ExitCode).To(gomega.BeZero())
}
Expand Down
18 changes: 14 additions & 4 deletions test/e2e/common/node/lease.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
admissionapi "k8s.io/pod-security-admission/api"
"k8s.io/utils/pointer"

"github.com/google/go-cmp/cmp"
)

func getPatchBytes(oldLease, newLease *coordinationv1.Lease) ([]byte, error) {
Expand Down Expand Up @@ -89,7 +91,9 @@ var _ = SIGDescribe("Lease", func() {

readLease, err := leaseClient.Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err, "couldn't read Lease")
framework.ExpectEqual(apiequality.Semantic.DeepEqual(lease.Spec, readLease.Spec), true)
if !apiequality.Semantic.DeepEqual(lease.Spec, readLease.Spec) {
framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(lease.Spec, readLease.Spec))
}

createdLease.Spec = coordinationv1.LeaseSpec{
HolderIdentity: pointer.StringPtr("holder2"),
Expand All @@ -104,7 +108,9 @@ var _ = SIGDescribe("Lease", func() {

readLease, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err, "couldn't read Lease")
framework.ExpectEqual(apiequality.Semantic.DeepEqual(createdLease.Spec, readLease.Spec), true)
if !apiequality.Semantic.DeepEqual(createdLease.Spec, readLease.Spec) {
framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(createdLease.Spec, readLease.Spec))
}

patchedLease := readLease.DeepCopy()
patchedLease.Spec = coordinationv1.LeaseSpec{
Expand All @@ -122,7 +128,9 @@ var _ = SIGDescribe("Lease", func() {

readLease, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err, "couldn't read Lease")
framework.ExpectEqual(apiequality.Semantic.DeepEqual(patchedLease.Spec, readLease.Spec), true)
if !apiequality.Semantic.DeepEqual(patchedLease.Spec, readLease.Spec) {
framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(patchedLease.Spec, readLease.Spec))
}

name2 := "lease2"
lease2 := &coordinationv1.Lease{
Expand Down Expand Up @@ -157,7 +165,9 @@ var _ = SIGDescribe("Lease", func() {
framework.ExpectNoError(err, "deleting Lease failed")

_, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectEqual(apierrors.IsNotFound(err), true)
if !apierrors.IsNotFound(err) {
framework.Failf("expected IsNotFound error, got %#v", err)
}

leaseClient = f.ClientSet.CoordinationV1().Leases(metav1.NamespaceAll)
// Number of leases may be high in large clusters, as Lease object is
Expand Down
12 changes: 9 additions & 3 deletions test/e2e/common/node/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -816,15 +816,19 @@ var _ = SIGDescribe("Pods", func() {
ginkgo.By("submitting the pod to kubernetes")
f.PodClient().Create(pod)
e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectEqual(podClient.PodIsReady(podName), false, "Expect pod's Ready condition to be false initially.")
if podClient.PodIsReady(podName) {
framework.Failf("Expect pod(%s/%s)'s Ready condition to be false initially.", f.Namespace.Name, pod.Name)
}

ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1))
_, err := podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), metav1.PatchOptions{}, "status")
framework.ExpectNoError(err)
// Sleep for 10 seconds.
time.Sleep(syncLoopFrequency)
// Verify the pod is still not ready
framework.ExpectEqual(podClient.PodIsReady(podName), false, "Expect pod's Ready condition to be false with only one condition in readinessGates equal to True")
if podClient.PodIsReady(podName) {
framework.Failf("Expect pod(%s/%s)'s Ready condition to be false with only one condition in readinessGates equal to True", f.Namespace.Name, pod.Name)
}

ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2))
_, err = podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), metav1.PatchOptions{}, "status")
Expand Down Expand Up @@ -1067,7 +1071,9 @@ var _ = SIGDescribe("Pods", func() {
postDeletePodJSON, _ = json.Marshal(postDeletePod)
}
framework.ExpectError(err, "pod %v found in namespace %v, but it should be deleted: %s", testPodName, testNamespaceName, string(postDeletePodJSON))
framework.ExpectEqual(apierrors.IsNotFound(err), true, fmt.Sprintf("expected IsNotFound error, got %#v", err))
if !apierrors.IsNotFound(err) {
framework.Failf("expected IsNotFound error, got %#v", err)
}
})
})

Expand Down
36 changes: 27 additions & 9 deletions test/e2e/common/node/runtimeclass.go
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,9 @@ var _ = SIGDescribe("RuntimeClass", func() {
}
}
}
framework.ExpectEqual(found, true, fmt.Sprintf("expected RuntimeClass API group/version, got %#v", discoveryGroups.Groups))
if !found {
framework.Failf("expected RuntimeClass API group/version, got %#v", discoveryGroups.Groups)
}
}

ginkgo.By("getting /apis/node.k8s.io")
Expand All @@ -234,7 +236,9 @@ var _ = SIGDescribe("RuntimeClass", func() {
break
}
}
framework.ExpectEqual(found, true, fmt.Sprintf("expected RuntimeClass API version, got %#v", group.Versions))
if !found {
framework.Failf("expected RuntimeClass API version, got %#v", group.Versions)
}
}

ginkgo.By("getting /apis/node.k8s.io/" + rcVersion)
Expand All @@ -248,7 +252,9 @@ var _ = SIGDescribe("RuntimeClass", func() {
found = true
}
}
framework.ExpectEqual(found, true, fmt.Sprintf("expected runtimeclasses, got %#v", resources.APIResources))
if !found {
framework.Failf("expected runtimeclasses, got %#v", resources.APIResources)
}
}

// Main resource create/read/update/watch operations
Expand All @@ -257,7 +263,9 @@ var _ = SIGDescribe("RuntimeClass", func() {
createdRC, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{})
framework.ExpectNoError(err)
_, err = rcClient.Create(context.TODO(), rc, metav1.CreateOptions{})
framework.ExpectEqual(apierrors.IsAlreadyExists(err), true, fmt.Sprintf("expected 409, got %#v", err))
if !apierrors.IsAlreadyExists(err) {
framework.Failf("expected 409, got %#v", err)
}
_, err = rcClient.Create(context.TODO(), rc2, metav1.CreateOptions{})
framework.ExpectNoError(err)

Expand Down Expand Up @@ -296,10 +304,14 @@ var _ = SIGDescribe("RuntimeClass", func() {
for sawAdded, sawPatched, sawUpdated := false, false, false; !sawAdded && !sawPatched && !sawUpdated; {
select {
case evt, ok := <-rcWatch.ResultChan():
framework.ExpectEqual(ok, true, "watch channel should not close")
if !ok {
framework.Fail("watch channel should not close")
}
if evt.Type == watch.Modified {
watchedRC, isRC := evt.Object.(*nodev1.RuntimeClass)
framework.ExpectEqual(isRC, true, fmt.Sprintf("expected RC, got %T", evt.Object))
if !isRC {
framework.Failf("expected RC, got %T", evt.Object)
}
if watchedRC.Annotations["patched"] == "true" {
framework.Logf("saw patched annotations")
sawPatched = true
Expand All @@ -311,7 +323,9 @@ var _ = SIGDescribe("RuntimeClass", func() {
}
} else if evt.Type == watch.Added {
_, isRC := evt.Object.(*nodev1.RuntimeClass)
framework.ExpectEqual(isRC, true, fmt.Sprintf("expected RC, got %T", evt.Object))
if !isRC {
framework.Failf("expected RC, got %T", evt.Object)
}
sawAdded = true
}

Expand All @@ -327,7 +341,9 @@ var _ = SIGDescribe("RuntimeClass", func() {
err = rcClient.Delete(context.TODO(), createdRC.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
_, err = rcClient.Get(context.TODO(), createdRC.Name, metav1.GetOptions{})
framework.ExpectEqual(apierrors.IsNotFound(err), true, fmt.Sprintf("expected 404, got %#v", err))
if !apierrors.IsNotFound(err) {
framework.Failf("expected 404, got %#v", err)
}
rcs, err = rcClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(rcs.Items), 2, "filtered list should have 2 items")
Expand Down Expand Up @@ -360,7 +376,9 @@ func createRuntimeClass(f *framework.Framework, name, handler string, overhead *
func expectPodRejection(f *framework.Framework, pod *v1.Pod) {
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectError(err, "should be forbidden")
framework.ExpectEqual(apierrors.IsForbidden(err), true, "should be forbidden error")
if !apierrors.IsForbidden(err) {
framework.Failf("expected forbidden error, got %#v", err)
}
}

// expectPodSuccess waits for the given pod to terminate successfully.
Expand Down
8 changes: 6 additions & 2 deletions test/e2e/common/node/secrets.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,9 @@ var _ = SIGDescribe("Secrets", func() {
break
}
}
framework.ExpectEqual(foundCreatedSecret, true, "unable to find secret by its value")
if !foundCreatedSecret {
framework.Failf("unable to find secret %s/%s by name", f.Namespace.Name, secretTestName)
}

ginkgo.By("patching the secret")
// patch the secret in the test namespace
Expand Down Expand Up @@ -230,7 +232,9 @@ var _ = SIGDescribe("Secrets", func() {
break
}
}
framework.ExpectEqual(foundCreatedSecret, false, "secret was not deleted successfully")
if foundCreatedSecret {
framework.Failf("secret %s/%s was not deleted successfully", f.Namespace.Name, secretTestName)
}
})
})

Expand Down