Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enforce test style on e2e_node and e2e_kubeadm tests #78779

Merged
merged 2 commits into from
Jun 19, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion hack/verify-test-code.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
cd "${KUBE_ROOT}"

# NOTE: This checks e2e test code without the e2e framework which contains Expect().To(HaveOccurred())
mapfile -t all_e2e_files < <(find test/e2e -name '*.go' | grep -v 'test/e2e/framework/')
mapfile -t all_e2e_files < <(find test/e2e{,_node,_kubeadm} -name '*.go' | grep -v 'test/e2e/framework/')
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

NOTE: see previous discussions in this repo: we don't use mapfile because it doesn't exist on macOS. there are examples of alternatives eleswhere in hack/ such as the verify-shellcheck script (read loop)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this particular change looks fine, but the original implementation probably should not have introduced mapfile usage

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@BenTheElder I see, thanks for your comment. I will follow the mapfile issue with another PR with some digging.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is a tool inside hack/lib/util.sh called kube::util::read-array that works the same way as mapfile -t, and works in bash3 😄 @oomichi @BenTheElder But yeah, that can be fixed in another PR.

function kube::util::read-array {

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@oomichi @odinuge thanks! that's exactly the thing to do, let's do that in another PR. feel free to ping me on that one for approval 😅

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

#79186 is the follow-up PR.

errors_expect_no_error=()
for file in "${all_e2e_files[@]}"
do
Expand Down
4 changes: 1 addition & 3 deletions test/e2e_node/benchmark_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,6 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/perftype"
nodeperftype "k8s.io/kubernetes/test/e2e_node/perftype"

. "github.com/onsi/gomega"
)

const (
Expand Down Expand Up @@ -156,7 +154,7 @@ func getThroughputPerfData(batchLag time.Duration, e2eLags []framework.PodLatenc
func getTestNodeInfo(f *framework.Framework, testName, testDesc string) map[string]string {
nodeName := framework.TestContext.NodeName
node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)

cpu, ok := node.Status.Capacity[v1.ResourceCPU]
if !ok {
Expand Down
4 changes: 2 additions & 2 deletions test/e2e_node/container_log_rotation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,9 +82,9 @@ var _ = framework.KubeDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive
Expect(len(pod.Status.ContainerStatuses)).To(Equal(1))
id := kubecontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID
r, _, err := getCRIClient()
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
status, err := r.ContainerStatus(id)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
logPath := status.GetLogPath()
By("wait for container log being rotated to max file limit")
Eventually(func() (int, error) {
Expand Down
4 changes: 2 additions & 2 deletions test/e2e_node/container_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,13 +155,13 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
if CurrentGinkgoTestDescription().Failed {
By("Dump all running containers")
runtime, _, err := getCRIClient()
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
containers, err := runtime.ListContainers(&runtimeapi.ContainerFilter{
State: &runtimeapi.ContainerStateValue{
State: runtimeapi.ContainerState_CONTAINER_RUNNING,
},
})
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
e2elog.Logf("Running containers:")
for _, c := range containers {
e2elog.Logf("%+v", c)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e_node/cpu_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ func getLocalNodeCPUDetails(f *framework.Framework) (cpuCapVal int64, cpuAllocVa

func waitForContainerRemoval(containerName, podName, podNS string) {
rs, _, err := getCRIClient()
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
Eventually(func() bool {
containers, err := rs.ListContainers(&runtimeapi.ContainerFilter{
LabelSelector: map[string]string{
Expand Down
2 changes: 1 addition & 1 deletion test/e2e_node/density_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -456,7 +456,7 @@ func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, inter
func getPodStartLatency(node string) (framework.KubeletLatencyMetrics, error) {
latencyMetrics := framework.KubeletLatencyMetrics{}
ms, err := metrics.GrabKubeletMetricsWithoutProxy(node, "/metrics")
Expect(err).NotTo(HaveOccurred(), "Failed to get kubelet metrics without proxy in node %s", node)
framework.ExpectNoError(err, "Failed to get kubelet metrics without proxy in node %s", node)

for _, samples := range ms {
for _, sample := range samples {
Expand Down
6 changes: 3 additions & 3 deletions test/e2e_node/e2e_node_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ func maskLocksmithdOnCoreos() {
}
if bytes.Contains(data, []byte("ID=coreos")) {
output, err := exec.Command("systemctl", "mask", "--now", "locksmithd").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("should be able to mask locksmithd - output: %q", string(output)))
framework.ExpectNoError(err, fmt.Sprintf("should be able to mask locksmithd - output: %q", string(output)))
klog.Infof("Locksmithd is masked successfully")
}
}
Expand All @@ -229,7 +229,7 @@ func waitForNodeReady() {
nodeReadyPollInterval = 1 * time.Second
)
client, err := getAPIServerClient()
Expect(err).NotTo(HaveOccurred(), "should be able to get apiserver client.")
framework.ExpectNoError(err, "should be able to get apiserver client.")
Eventually(func() error {
node, err := getNode(client)
if err != nil {
Expand Down Expand Up @@ -273,7 +273,7 @@ func updateTestContext() error {
// getNode gets node object from the apiserver.
func getNode(c *clientset.Clientset) (*v1.Node, error) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred(), "should be able to list nodes.")
framework.ExpectNoError(err, "should be able to list nodes.")
if nodes == nil {
return nil, fmt.Errorf("the node list is nil.")
}
Expand Down
6 changes: 3 additions & 3 deletions test/e2e_node/eviction_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
})
AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
specs := []podEvictSpec{
{
Expand Down Expand Up @@ -364,7 +364,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
})
AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
specs := []podEvictSpec{
{
Expand Down Expand Up @@ -417,7 +417,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis
})
AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
specs := []podEvictSpec{
{
Expand Down
2 changes: 1 addition & 1 deletion test/e2e_node/garbage_collector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
BeforeEach(func() {
var err error
runtime, _, err = getCRIClient()
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
for _, pod := range test.testPods {
// Initialize the getContainerNames function to use CRI runtime client.
Expand Down
2 changes: 1 addition & 1 deletion test/e2e_node/hugepages_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ func runHugePagesTests(f *framework.Framework) {
verifyPod := makePodToVerifyHugePages("pod"+podUID, resource.MustParse("50Mi"))
f.PodClient().Create(verifyPod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
}

Expand Down
8 changes: 4 additions & 4 deletions test/e2e_node/node_problem_detector_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,13 +161,13 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
}.AsSelector().String()
eventListOptions = metav1.ListOptions{FieldSelector: selector}
By("Create the test log file")
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
By("Create config map for the node problem detector")
_, err = c.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: configName},
Data: map[string]string{path.Base(configFile): config},
})
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
By("Create the node problem detector")
hostPathType := new(v1.HostPathType)
*hostPathType = v1.HostPathType(string(v1.HostPathFileOrCreate))
Expand Down Expand Up @@ -238,7 +238,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
},
})
pod, err := f.PodClient().Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
// TODO: remove hardcoded kubelet volume directory path
// framework.TestContext.KubeVolumeDir is currently not populated for node e2e
hostLogFile = "/var/lib/kubelet/pods/" + string(pod.UID) + "/volumes/kubernetes.io~empty-dir" + logFile
Expand Down Expand Up @@ -340,7 +340,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
if test.messageNum > 0 {
By(fmt.Sprintf("Inject %d logs: %q", test.messageNum, test.message))
err := injectLog(hostLogFile, test.timestamp, test.message, test.messageNum)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
}

By(fmt.Sprintf("Wait for %d temp events generated", test.tempEvents))
Expand Down
2 changes: 1 addition & 1 deletion test/e2e_node/pids_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ func runPodPidsLimitTests(f *framework.Framework) {
verifyPod := makePodToVerifyPids("pod"+podUID, resource.MustParse("1024"))
f.PodClient().Create(verifyPod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
}

Expand Down
30 changes: 16 additions & 14 deletions test/e2e_node/pods_container_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/klog"
)

Expand Down Expand Up @@ -161,7 +160,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
pod := makePodToVerifyCgroups(cgroupsToVerify)
f.PodClient().Create(pod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
})
})
Expand Down Expand Up @@ -199,15 +198,16 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
pod := makePodToVerifyCgroups(cgroupsToVerify)
f.PodClient().Create(pod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
Expect(f.PodClient().Delete(guaranteedPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
err := f.PodClient().Delete(guaranteedPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
framework.ExpectNoError(err)
pod := makePodToVerifyCgroupRemoved("pod" + podUID)
f.PodClient().Create(pod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err)
})
})
})
Expand Down Expand Up @@ -243,15 +243,16 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
pod := makePodToVerifyCgroups(cgroupsToVerify)
f.PodClient().Create(pod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
Expect(f.PodClient().Delete(bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
err := f.PodClient().Delete(bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
framework.ExpectNoError(err)
pod := makePodToVerifyCgroupRemoved("besteffort/pod" + podUID)
f.PodClient().Create(pod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err)
})
})
})
Expand Down Expand Up @@ -287,15 +288,16 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
pod := makePodToVerifyCgroups(cgroupsToVerify)
f.PodClient().Create(pod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
Expect(f.PodClient().Delete(burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
err := f.PodClient().Delete(burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
framework.ExpectNoError(err)
pod := makePodToVerifyCgroupRemoved("burstable/pod" + podUID)
f.PodClient().Create(pod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err)
})
})
})
Expand Down
2 changes: 1 addition & 1 deletion test/e2e_node/resource_collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -374,7 +374,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
defer wg.Done()

err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)

Expect(e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
30*time.Second, 10*time.Minute)).NotTo(HaveOccurred())
Expand Down
3 changes: 1 addition & 2 deletions test/e2e_node/resource_usage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)

var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
Expand Down Expand Up @@ -190,7 +189,7 @@ func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimi

// Obtain memory PerfData
usagePerContainer, err := rc.GetLatest()
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
e2elog.Logf("%s", formatResourceUsageStats(usagePerContainer))

usagePerNode := make(framework.ResourceUsagePerNode)
Expand Down
3 changes: 1 addition & 2 deletions test/e2e_node/runtime_conformance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ import (
"k8s.io/kubernetes/test/e2e_node/services"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)

var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
Expand Down Expand Up @@ -84,7 +83,7 @@ var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {

configFile := filepath.Join(services.KubeletRootDirectory, "config.json")
err := ioutil.WriteFile(configFile, []byte(auth), 0644)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
defer os.Remove(configFile)

// checkContainerStatus checks whether the container status matches expectation.
Expand Down
5 changes: 2 additions & 3 deletions test/e2e_node/volume_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ import (
"fmt"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)

var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
Expand Down Expand Up @@ -73,7 +72,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
},
})
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, memoryBackedPod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
By("Verifying the memory backed volume was removed from node", func() {
volumePath := fmt.Sprintf("/tmp/%s/volumes/kubernetes.io~empty-dir/%s", string(memoryBackedPod.UID), volumeName)
Expand Down Expand Up @@ -121,7 +120,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
}
<-time.After(10 * time.Second)
}
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
})
})
Expand Down