Skip to content
Permalink
Browse files

Use ExpectEqual test/e2e_node

  • Loading branch information
tanjunchen committed Dec 2, 2019
1 parent f6337c7 commit 561ee6ece989fd58be52df186bdc58ac44292eb5
@@ -27,7 +27,7 @@ import (
"strconv"
"strings"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -63,9 +63,8 @@ var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor
return
}
state := status.ContainerStatuses[0].State.Terminated
gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
gomega.Expect(state.ExitCode).To(gomega.Not(gomega.BeZero()), "ContainerStateTerminated: %+v", state)

framework.ExpectNotEqual(state, nil, "ContainerState: %+v", status.ContainerStatuses[0].State)
framework.ExpectEqual(state.ExitCode, 0, "ContainerStateTerminated: %+v", state)
})
ginkgo.It("should enforce a permissive profile", func() {
status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"audit-write")
@@ -74,8 +73,8 @@ var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor
return
}
state := status.ContainerStatuses[0].State.Terminated
gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
gomega.Expect(state.ExitCode).To(gomega.BeZero(), "ContainerStateTerminated: %+v", state)
framework.ExpectNotEqual(state, nil, "ContainerState: %+v", status.ContainerStatuses[0].State)
framework.ExpectEqual(state.ExitCode, 0, "ContainerStateTerminated: %+v", state)
})
})
} else {
@@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
ginkgo.Context("once the node is setup", func() {
ginkgo.It("container runtime's oom-score-adj should be -999", func() {
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of container runtime pids")
framework.ExpectEqual(err, nil, "failed to get list of container runtime pids")
for _, pid := range runtimePids {
gomega.Eventually(func() error {
return validateOOMScoreAdjSetting(pid, -999)
@@ -88,7 +88,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
})
ginkgo.It("Kubelet's oom-score-adj should be -999", func() {
kubeletPids, err := getPidsForProcess(kubeletProcessName, "")
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of kubelet pids")
framework.ExpectEqual(err, nil, "failed to get list of kubelet pids")
framework.ExpectEqual(len(kubeletPids), 1, "expected only one kubelet process; found %d", len(kubeletPids))
gomega.Eventually(func() error {
return validateOOMScoreAdjSetting(kubeletPids[0], -999)
@@ -100,7 +100,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
// created before this test, and may not be infra
// containers. They should be excluded from the test.
existingPausePIDs, err := getPidsForProcess("pause", "")
gomega.Expect(err).To(gomega.BeNil(), "failed to list all pause processes on the node")
framework.ExpectEqual(err, nil, "failed to list all pause processes on the node")
existingPausePIDSet := sets.NewInt(existingPausePIDs...)

podClient := f.PodClient()
@@ -32,7 +32,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)

const (
@@ -86,7 +85,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C
})

_, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(systemCriticalPriority)
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue(), "failed to create PriorityClasses with an error: %v", err)
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true, "failed to create PriorityClasses with an error: %v", err)

// Create pods, starting with non-critical so that the critical preempts the other pods.
f.PodClient().CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed})
@@ -157,9 +156,9 @@ func getTestPod(critical bool, name string, resources v1.ResourceRequirements) *
pod.Spec.PriorityClassName = systemCriticalPriorityName
pod.Spec.Priority = &value

gomega.Expect(kubelettypes.IsCriticalPod(pod)).To(gomega.BeTrue(), "pod should be a critical pod")
framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), true, "pod should be a critical pod")
} else {
gomega.Expect(kubelettypes.IsCriticalPod(pod)).To(gomega.BeFalse(), "pod should not be a critical pod")
framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), false, "pod should not be a critical pod")
}
return pod
}
@@ -22,7 +22,7 @@ import (

"regexp"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
@@ -97,20 +97,20 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
deviceIDRE := "stub devices: (Dev-[0-9]+)"
devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
gomega.Expect(devID1).To(gomega.Not(gomega.Equal("")))
framework.ExpectNotEqual(devID1, "")

podResources, err := getNodeDevices()
var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources
framework.Logf("pod resources %v", podResources)
gomega.Expect(err).To(gomega.BeNil())
framework.ExpectEqual(err, nil)
framework.ExpectEqual(len(podResources.PodResources), 2)
for _, res := range podResources.GetPodResources() {
if res.Name == pod1.Name {
resourcesForOurPod = res
}
}
framework.Logf("resourcesForOurPod %v", resourcesForOurPod)
gomega.Expect(resourcesForOurPod).NotTo(gomega.BeNil())
framework.ExpectNotEqual(resourcesForOurPod, nil)
framework.ExpectEqual(resourcesForOurPod.Name, pod1.Name)
framework.ExpectEqual(resourcesForOurPod.Namespace, pod1.Namespace)
framework.ExpectEqual(len(resourcesForOurPod.Containers), 1)
@@ -181,7 +181,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ginkgo.By("Checking that pod got a different fake device")
devID2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)

gomega.Expect(devID1).To(gomega.Not(gomega.Equal(devID2)))
framework.ExpectNotEqual(devID1, devID2)

ginkgo.By("By deleting the pods and waiting for container removal")
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions)
@@ -307,7 +307,7 @@ func getNode(c *clientset.Clientset) (*v1.Node, error) {
if nodes == nil {
return nil, fmt.Errorf("the node list is nil")
}
gomega.Expect(len(nodes.Items) > 1).NotTo(gomega.BeTrue(), "the number of nodes is more than 1.")
framework.ExpectNotEqual(len(nodes.Items) > 1, true, "the number of nodes is more than 1.")
if len(nodes.Items) == 0 {
return nil, fmt.Errorf("empty node list: %+v", nodes)
}
@@ -23,7 +23,7 @@ import (
"strings"
"time"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
@@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
})
ginkgo.BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
})
ginkgo.AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
@@ -359,7 +359,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
})
ginkgo.BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
})
ginkgo.AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
@@ -412,7 +412,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis
})
ginkgo.BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
})
ginkgo.AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
@@ -661,7 +661,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
if expectedStarvedResource != noStarvedResource {
// Check the eviction.StarvedResourceKey
starved, found := event.Annotations[eviction.StarvedResourceKey]
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
pod.Name, expectedStarvedResource)
starvedResource := v1.ResourceName(starved)
framework.ExpectEqual(starvedResource, expectedStarvedResource, "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
@@ -671,7 +671,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
if expectedStarvedResource == v1.ResourceMemory {
// Check the eviction.OffendingContainersKey
offendersString, found := event.Annotations[eviction.OffendingContainersKey]
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
pod.Name)
offendingContainers := strings.Split(offendersString, ",")
framework.ExpectEqual(len(offendingContainers), 1, "Expected to find the offending container's usage in the %s annotation, but no container was found",
@@ -24,7 +24,7 @@ import (
"path"
"time"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
@@ -104,7 +104,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete

nodeTime = time.Now()
bootTime, err = util.GetBootTime()
gomega.Expect(err).To(gomega.BeNil())
framework.ExpectEqual(err, nil)

// Set lookback duration longer than node up time.
// Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds.
@@ -30,7 +30,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)

const (
@@ -179,7 +178,7 @@ var _ = framework.KubeDescribe("StartupProbe [Serial] [Disruptive] [NodeAlphaFea

isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err)
gomega.Expect(isReady).To(gomega.BeTrue(), "pod should be ready")
framework.ExpectEqual(isReady, true, "pod should be ready")

// We assume the pod became ready when the container became ready. This
// is true for a single container pod.

0 comments on commit 561ee6e

Please sign in to comment.
You can’t perform that action at this time.