Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert "Use ExpectEqual test/e2e_node" #85931

Merged
merged 1 commit into from Dec 5, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
11 changes: 6 additions & 5 deletions test/e2e_node/apparmor_test.go
Expand Up @@ -27,7 +27,7 @@ import (
"strconv"
"strings"

v1 "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
Expand Down Expand Up @@ -63,8 +63,9 @@ var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor
return
}
state := status.ContainerStatuses[0].State.Terminated
framework.ExpectNotEqual(state, nil, "ContainerState: %+v", status.ContainerStatuses[0].State)
framework.ExpectEqual(state.ExitCode, 0, "ContainerStateTerminated: %+v", state)
gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
gomega.Expect(state.ExitCode).To(gomega.Not(gomega.BeZero()), "ContainerStateTerminated: %+v", state)

})
ginkgo.It("should enforce a permissive profile", func() {
status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"audit-write")
Expand All @@ -73,8 +74,8 @@ var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor
return
}
state := status.ContainerStatuses[0].State.Terminated
framework.ExpectNotEqual(state, nil, "ContainerState: %+v", status.ContainerStatuses[0].State)
framework.ExpectEqual(state.ExitCode, 0, "ContainerStateTerminated: %+v", state)
gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
gomega.Expect(state.ExitCode).To(gomega.BeZero(), "ContainerStateTerminated: %+v", state)
})
})
} else {
Expand Down
6 changes: 3 additions & 3 deletions test/e2e_node/container_manager_test.go
Expand Up @@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
ginkgo.Context("once the node is setup", func() {
ginkgo.It("container runtime's oom-score-adj should be -999", func() {
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
framework.ExpectEqual(err, nil, "failed to get list of container runtime pids")
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of container runtime pids")
for _, pid := range runtimePids {
gomega.Eventually(func() error {
return validateOOMScoreAdjSetting(pid, -999)
Expand All @@ -88,7 +88,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
})
ginkgo.It("Kubelet's oom-score-adj should be -999", func() {
kubeletPids, err := getPidsForProcess(kubeletProcessName, "")
framework.ExpectEqual(err, nil, "failed to get list of kubelet pids")
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of kubelet pids")
framework.ExpectEqual(len(kubeletPids), 1, "expected only one kubelet process; found %d", len(kubeletPids))
gomega.Eventually(func() error {
return validateOOMScoreAdjSetting(kubeletPids[0], -999)
Expand All @@ -100,7 +100,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
// created before this test, and may not be infra
// containers. They should be excluded from the test.
existingPausePIDs, err := getPidsForProcess("pause", "")
framework.ExpectEqual(err, nil, "failed to list all pause processes on the node")
gomega.Expect(err).To(gomega.BeNil(), "failed to list all pause processes on the node")
existingPausePIDSet := sets.NewInt(existingPausePIDs...)

podClient := f.PodClient()
Expand Down
7 changes: 4 additions & 3 deletions test/e2e_node/critical_pod_test.go
Expand Up @@ -32,6 +32,7 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)

const (
Expand Down Expand Up @@ -85,7 +86,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C
})

_, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(systemCriticalPriority)
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true, "failed to create PriorityClasses with an error: %v", err)
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue(), "failed to create PriorityClasses with an error: %v", err)

// Create pods, starting with non-critical so that the critical preempts the other pods.
f.PodClient().CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed})
Expand Down Expand Up @@ -156,9 +157,9 @@ func getTestPod(critical bool, name string, resources v1.ResourceRequirements) *
pod.Spec.PriorityClassName = systemCriticalPriorityName
pod.Spec.Priority = &value

framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), true, "pod should be a critical pod")
gomega.Expect(kubelettypes.IsCriticalPod(pod)).To(gomega.BeTrue(), "pod should be a critical pod")
} else {
framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), false, "pod should not be a critical pod")
gomega.Expect(kubelettypes.IsCriticalPod(pod)).To(gomega.BeFalse(), "pod should not be a critical pod")
}
return pod
}
10 changes: 5 additions & 5 deletions test/e2e_node/device_plugin_test.go
Expand Up @@ -22,7 +22,7 @@ import (

"regexp"

v1 "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
Expand Down Expand Up @@ -97,20 +97,20 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
deviceIDRE := "stub devices: (Dev-[0-9]+)"
devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
framework.ExpectNotEqual(devID1, "")
gomega.Expect(devID1).To(gomega.Not(gomega.Equal("")))

podResources, err := getNodeDevices()
var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources
framework.Logf("pod resources %v", podResources)
framework.ExpectEqual(err, nil)
gomega.Expect(err).To(gomega.BeNil())
framework.ExpectEqual(len(podResources.PodResources), 2)
for _, res := range podResources.GetPodResources() {
if res.Name == pod1.Name {
resourcesForOurPod = res
}
}
framework.Logf("resourcesForOurPod %v", resourcesForOurPod)
framework.ExpectNotEqual(resourcesForOurPod, nil)
gomega.Expect(resourcesForOurPod).NotTo(gomega.BeNil())
framework.ExpectEqual(resourcesForOurPod.Name, pod1.Name)
framework.ExpectEqual(resourcesForOurPod.Namespace, pod1.Namespace)
framework.ExpectEqual(len(resourcesForOurPod.Containers), 1)
Expand Down Expand Up @@ -181,7 +181,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ginkgo.By("Checking that pod got a different fake device")
devID2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)

framework.ExpectNotEqual(devID1, devID2)
gomega.Expect(devID1).To(gomega.Not(gomega.Equal(devID2)))

ginkgo.By("By deleting the pods and waiting for container removal")
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e_node/e2e_node_suite_test.go
Expand Up @@ -307,7 +307,7 @@ func getNode(c *clientset.Clientset) (*v1.Node, error) {
if nodes == nil {
return nil, fmt.Errorf("the node list is nil")
}
framework.ExpectNotEqual(len(nodes.Items) > 1, true, "the number of nodes is more than 1.")
gomega.Expect(len(nodes.Items) > 1).NotTo(gomega.BeTrue(), "the number of nodes is more than 1.")
if len(nodes.Items) == 0 {
return nil, fmt.Errorf("empty node list: %+v", nodes)
}
Expand Down
12 changes: 6 additions & 6 deletions test/e2e_node/eviction_test.go
Expand Up @@ -23,7 +23,7 @@ import (
"strings"
"time"

v1 "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
Expand Down Expand Up @@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
})
ginkgo.BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
})
ginkgo.AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
Expand Down Expand Up @@ -359,7 +359,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
})
ginkgo.BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
})
ginkgo.AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
Expand Down Expand Up @@ -412,7 +412,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis
})
ginkgo.BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
})
ginkgo.AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
Expand Down Expand Up @@ -661,7 +661,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
if expectedStarvedResource != noStarvedResource {
// Check the eviction.StarvedResourceKey
starved, found := event.Annotations[eviction.StarvedResourceKey]
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
pod.Name, expectedStarvedResource)
starvedResource := v1.ResourceName(starved)
framework.ExpectEqual(starvedResource, expectedStarvedResource, "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
Expand All @@ -671,7 +671,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
if expectedStarvedResource == v1.ResourceMemory {
// Check the eviction.OffendingContainersKey
offendersString, found := event.Annotations[eviction.OffendingContainersKey]
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
pod.Name)
offendingContainers := strings.Split(offendersString, ",")
framework.ExpectEqual(len(offendingContainers), 1, "Expected to find the offending container's usage in the %s annotation, but no container was found",
Expand Down
4 changes: 2 additions & 2 deletions test/e2e_node/node_problem_detector_linux.go
Expand Up @@ -24,7 +24,7 @@ import (
"path"
"time"

v1 "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
Expand Down Expand Up @@ -104,7 +104,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete

nodeTime = time.Now()
bootTime, err = util.GetBootTime()
framework.ExpectEqual(err, nil)
gomega.Expect(err).To(gomega.BeNil())

// Set lookback duration longer than node up time.
// Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds.
Expand Down
3 changes: 2 additions & 1 deletion test/e2e_node/startup_probe_test.go
Expand Up @@ -30,6 +30,7 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)

const (
Expand Down Expand Up @@ -178,7 +179,7 @@ var _ = framework.KubeDescribe("StartupProbe [Serial] [Disruptive] [NodeAlphaFea

isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err)
framework.ExpectEqual(isReady, true, "pod should be ready")
gomega.Expect(isReady).To(gomega.BeTrue(), "pod should be ready")

// We assume the pod became ready when the container became ready. This
// is true for a single container pod.
Expand Down