Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

e2e_node: clean up non-recommended import #80680

Merged
merged 1 commit into from Jul 31, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
36 changes: 18 additions & 18 deletions test/e2e_node/apparmor_test.go
Expand Up @@ -39,51 +39,51 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"

"github.com/davecgh/go-spew/spew"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/klog"
)

var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() {
if isAppArmorEnabled() {
BeforeEach(func() {
By("Loading AppArmor profiles for testing")
ginkgo.BeforeEach(func() {
ginkgo.By("Loading AppArmor profiles for testing")
framework.ExpectNoError(loadTestProfiles(), "Could not load AppArmor test profiles")
})
Context("when running with AppArmor", func() {
ginkgo.Context("when running with AppArmor", func() {
f := framework.NewDefaultFramework("apparmor-test")

It("should reject an unloaded profile", func() {
ginkgo.It("should reject an unloaded profile", func() {
status := runAppArmorTest(f, false, apparmor.ProfileNamePrefix+"non-existent-profile")
expectSoftRejection(status)
})
It("should enforce a profile blocking writes", func() {
ginkgo.It("should enforce a profile blocking writes", func() {
status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"deny-write")
if len(status.ContainerStatuses) == 0 {
e2elog.Failf("Unexpected pod status: %s", spew.Sdump(status))
return
}
state := status.ContainerStatuses[0].State.Terminated
Expect(state).ToNot(BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
Expect(state.ExitCode).To(Not(BeZero()), "ContainerStateTerminated: %+v", state)
gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
gomega.Expect(state.ExitCode).To(gomega.Not(gomega.BeZero()), "ContainerStateTerminated: %+v", state)

})
It("should enforce a permissive profile", func() {
ginkgo.It("should enforce a permissive profile", func() {
status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"audit-write")
if len(status.ContainerStatuses) == 0 {
e2elog.Failf("Unexpected pod status: %s", spew.Sdump(status))
return
}
state := status.ContainerStatuses[0].State.Terminated
Expect(state).ToNot(BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
Expect(state.ExitCode).To(BeZero(), "ContainerStateTerminated: %+v", state)
gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
gomega.Expect(state.ExitCode).To(gomega.BeZero(), "ContainerStateTerminated: %+v", state)
})
})
} else {
Context("when running without AppArmor", func() {
ginkgo.Context("when running without AppArmor", func() {
f := framework.NewDefaultFramework("apparmor-test")

It("should reject a pod with an AppArmor profile", func() {
ginkgo.It("should reject a pod with an AppArmor profile", func() {
status := runAppArmorTest(f, false, apparmor.ProfileRuntimeDefault)
expectSoftRejection(status)
})
Expand Down Expand Up @@ -199,10 +199,10 @@ func createPodWithAppArmor(f *framework.Framework, profile string) *v1.Pod {

func expectSoftRejection(status v1.PodStatus) {
args := []interface{}{"PodStatus: %+v", status}
Expect(status.Phase).To(Equal(v1.PodPending), args...)
Expect(status.Reason).To(Equal("AppArmor"), args...)
Expect(status.Message).To(ContainSubstring("AppArmor"), args...)
Expect(status.ContainerStatuses[0].State.Waiting.Reason).To(Equal("Blocked"), args...)
gomega.Expect(status.Phase).To(gomega.Equal(v1.PodPending), args...)
gomega.Expect(status.Reason).To(gomega.Equal("AppArmor"), args...)
gomega.Expect(status.Message).To(gomega.ContainSubstring("AppArmor"), args...)
gomega.Expect(status.ContainerStatuses[0].State.Waiting.Reason).To(gomega.Equal("Blocked"), args...)
}

func isAppArmorEnabled() bool {
Expand Down
28 changes: 14 additions & 14 deletions test/e2e_node/container_log_rotation_test.go
Expand Up @@ -28,8 +28,8 @@ import (
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/test/e2e/framework"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)

const (
Expand All @@ -42,8 +42,8 @@ const (

var _ = framework.KubeDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func() {
f := framework.NewDefaultFramework("container-log-rotation-test")
Context("when a container generates a lot of log", func() {
BeforeEach(func() {
ginkgo.Context("when a container generates a lot of log", func() {
ginkgo.BeforeEach(func() {
if framework.TestContext.ContainerRuntime != kubetypes.RemoteContainerRuntime {
framework.Skipf("Skipping ContainerLogRotation test since the container runtime is not remote")
}
Expand All @@ -55,8 +55,8 @@ var _ = framework.KubeDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive
initialConfig.ContainerLogMaxSize = testContainerLogMaxSize
})

It("should be rotated and limited to a fixed amount of files", func() {
By("create log container")
ginkgo.It("should be rotated and limited to a fixed amount of files", func() {
ginkgo.By("create log container")
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-container-log-rotation",
Expand All @@ -78,30 +78,30 @@ var _ = framework.KubeDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive
},
}
pod = f.PodClient().CreateSync(pod)
By("get container log path")
Expect(len(pod.Status.ContainerStatuses)).To(Equal(1))
ginkgo.By("get container log path")
gomega.Expect(len(pod.Status.ContainerStatuses)).To(gomega.Equal(1))
id := kubecontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID
r, _, err := getCRIClient()
framework.ExpectNoError(err)
status, err := r.ContainerStatus(id)
framework.ExpectNoError(err)
logPath := status.GetLogPath()
By("wait for container log being rotated to max file limit")
Eventually(func() (int, error) {
ginkgo.By("wait for container log being rotated to max file limit")
gomega.Eventually(func() (int, error) {
logs, err := kubelogs.GetAllLogs(logPath)
if err != nil {
return 0, err
}
return len(logs), nil
}, rotationEventuallyTimeout, rotationPollInterval).Should(Equal(testContainerLogMaxFiles), "should eventually rotate to max file limit")
By("make sure container log number won't exceed max file limit")
Consistently(func() (int, error) {
}, rotationEventuallyTimeout, rotationPollInterval).Should(gomega.Equal(testContainerLogMaxFiles), "should eventually rotate to max file limit")
ginkgo.By("make sure container log number won't exceed max file limit")
gomega.Consistently(func() (int, error) {
logs, err := kubelogs.GetAllLogs(logPath)
if err != nil {
return 0, err
}
return len(logs), nil
}, rotationConsistentlyTimeout, rotationPollInterval).Should(BeNumerically("<=", testContainerLogMaxFiles), "should never exceed max file limit")
}, rotationConsistentlyTimeout, rotationPollInterval).Should(gomega.BeNumerically("<=", testContainerLogMaxFiles), "should never exceed max file limit")
})
})
})
62 changes: 31 additions & 31 deletions test/e2e_node/container_manager_test.go
Expand Up @@ -35,8 +35,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)

Expand Down Expand Up @@ -76,32 +76,32 @@ func validateOOMScoreAdjSettingIsInRange(pid int, expectedMinOOMScoreAdj, expect

var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
f := framework.NewDefaultFramework("kubelet-container-manager")
Describe("Validate OOM score adjustments [NodeFeature:OOMScoreAdj]", func() {
Context("once the node is setup", func() {
It("container runtime's oom-score-adj should be -999", func() {
ginkgo.Describe("Validate OOM score adjustments [NodeFeature:OOMScoreAdj]", func() {
ginkgo.Context("once the node is setup", func() {
ginkgo.It("container runtime's oom-score-adj should be -999", func() {
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
Expect(err).To(BeNil(), "failed to get list of container runtime pids")
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of container runtime pids")
for _, pid := range runtimePids {
Eventually(func() error {
gomega.Eventually(func() error {
return validateOOMScoreAdjSetting(pid, -999)
}, 5*time.Minute, 30*time.Second).Should(BeNil())
}, 5*time.Minute, 30*time.Second).Should(gomega.BeNil())
}
})
It("Kubelet's oom-score-adj should be -999", func() {
ginkgo.It("Kubelet's oom-score-adj should be -999", func() {
kubeletPids, err := getPidsForProcess(kubeletProcessName, "")
Expect(err).To(BeNil(), "failed to get list of kubelet pids")
Expect(len(kubeletPids)).To(Equal(1), "expected only one kubelet process; found %d", len(kubeletPids))
Eventually(func() error {
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of kubelet pids")
gomega.Expect(len(kubeletPids)).To(gomega.Equal(1), "expected only one kubelet process; found %d", len(kubeletPids))
gomega.Eventually(func() error {
return validateOOMScoreAdjSetting(kubeletPids[0], -999)
}, 5*time.Minute, 30*time.Second).Should(BeNil())
}, 5*time.Minute, 30*time.Second).Should(gomega.BeNil())
})
Context("", func() {
It("pod infra containers oom-score-adj should be -998 and best effort container's should be 1000", func() {
ginkgo.Context("", func() {
ginkgo.It("pod infra containers oom-score-adj should be -998 and best effort container's should be 1000", func() {
// Take a snapshot of existing pause processes. These were
// created before this test, and may not be infra
// containers. They should be excluded from the test.
existingPausePIDs, err := getPidsForProcess("pause", "")
Expect(err).To(BeNil(), "failed to list all pause processes on the node")
gomega.Expect(err).To(gomega.BeNil(), "failed to list all pause processes on the node")
existingPausePIDSet := sets.NewInt(existingPausePIDs...)

podClient := f.PodClient()
Expand All @@ -120,8 +120,8 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
},
})
var pausePids []int
By("checking infra container's oom-score-adj")
Eventually(func() error {
ginkgo.By("checking infra container's oom-score-adj")
gomega.Eventually(func() error {
pausePids, err = getPidsForProcess("pause", "")
if err != nil {
return fmt.Errorf("failed to get list of pause pids: %v", err)
Expand All @@ -136,10 +136,10 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
}
}
return nil
}, 2*time.Minute, time.Second*4).Should(BeNil())
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
var shPids []int
By("checking besteffort container's oom-score-adj")
Eventually(func() error {
ginkgo.By("checking besteffort container's oom-score-adj")
gomega.Eventually(func() error {
shPids, err = getPidsForProcess("serve_hostname", "")
if err != nil {
return fmt.Errorf("failed to get list of serve hostname process pids: %v", err)
Expand All @@ -148,12 +148,12 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
return fmt.Errorf("expected only one serve_hostname process; found %d", len(shPids))
}
return validateOOMScoreAdjSetting(shPids[0], 1000)
}, 2*time.Minute, time.Second*4).Should(BeNil())
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())
})
// Log the running containers here to help debugging.
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
By("Dump all running containers")
ginkgo.AfterEach(func() {
if ginkgo.CurrentGinkgoTestDescription().Failed {
ginkgo.By("Dump all running containers")
runtime, _, err := getCRIClient()
framework.ExpectNoError(err)
containers, err := runtime.ListContainers(&runtimeapi.ContainerFilter{
Expand All @@ -169,7 +169,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
}
})
})
It("guaranteed container's oom-score-adj should be -998", func() {
ginkgo.It("guaranteed container's oom-score-adj should be -998", func() {
podClient := f.PodClient()
podName := "guaranteed" + string(uuid.NewUUID())
podClient.Create(&v1.Pod{
Expand All @@ -195,7 +195,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
ngPids []int
err error
)
Eventually(func() error {
gomega.Eventually(func() error {
ngPids, err = getPidsForProcess("nginx", "")
if err != nil {
return fmt.Errorf("failed to get list of nginx process pids: %v", err)
Expand All @@ -207,10 +207,10 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
}

return nil
}, 2*time.Minute, time.Second*4).Should(BeNil())
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())

})
It("burstable container's oom-score-adj should be between [2, 1000)", func() {
ginkgo.It("burstable container's oom-score-adj should be between [2, 1000)", func() {
podClient := f.PodClient()
podName := "burstable" + string(uuid.NewUUID())
podClient.Create(&v1.Pod{
Expand All @@ -236,7 +236,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
wsPids []int
err error
)
Eventually(func() error {
gomega.Eventually(func() error {
wsPids, err = getPidsForProcess("test-webserver", "")
if err != nil {
return fmt.Errorf("failed to get list of test-webserver process pids: %v", err)
Expand All @@ -247,7 +247,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
}
}
return nil
}, 2*time.Minute, time.Second*4).Should(BeNil())
}, 2*time.Minute, time.Second*4).Should(gomega.BeNil())

// TODO: Test the oom-score-adj logic for burstable more accurately.
})
Expand Down