Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Shorten eviction tests, and increase test suite timeout #46441

Merged
merged 1 commit into from
Jun 13, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
36 changes: 11 additions & 25 deletions test/e2e_node/allocatable_eviction_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import (
"fmt"
"time"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
"k8s.io/kubernetes/pkg/apis/componentconfig"
Expand All @@ -34,44 +34,30 @@ import (
// Eviction Policy is described here:
// https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/kubelet-eviction.md

var _ = framework.KubeDescribe("AllocatableEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
f := framework.NewDefaultFramework("allocatable-eviction-test")
var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
f := framework.NewDefaultFramework("memory-allocatable-eviction-test")

podTestSpecs := []podTestSpec{
{
evictionPriority: 1, // This pod should be evicted before the innocent pod
pod: *getMemhogPod("memory-hog-pod", "memory-hog", v1.ResourceRequirements{}),
pod: getMemhogPod("memory-hog-pod", "memory-hog", v1.ResourceRequirements{}),
},
{
evictionPriority: 0, // This pod should never be evicted
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "innocent-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: "gcr.io/google_containers/busybox:1.24",
Name: "normal-memory-usage-container",
Command: []string{
"sh",
"-c", //make one big (5 Gb) file
"dd if=/dev/urandom of=largefile bs=5000000000 count=1; while true; do sleep 5; done",
},
},
},
},
},
pod: getInnocentPod(),
},
}
evictionTestTimeout := 40 * time.Minute
evictionTestTimeout := 10 * time.Minute
testCondition := "Memory Pressure"

Context(fmt.Sprintf("when we run containers that should cause %s", testCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *componentconfig.KubeletConfiguration) {
initialConfig.EvictionHard = "memory.available<10%"
// Set large system and kube reserved values to trigger allocatable thresholds far before hard eviction thresholds.
initialConfig.SystemReserved = componentconfig.ConfigurationMap(map[string]string{"memory": "1Gi"})
initialConfig.KubeReserved = componentconfig.ConfigurationMap(map[string]string{"memory": "1Gi"})
kubeReserved := getNodeCPUAndMemoryCapacity(f)[v1.ResourceMemory]
Copy link
Member

@Random-Liu Random-Liu Jun 8, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So you're using default eviction threshold? Why did you remove the configuration?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, the default is 250Mb. 10% seemed arbitrary, and I think using the default is a better way to check if it is working.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Then please add a comment here to explain that. :)

// The default hard eviction threshold is 250Mb, so Allocatable = Capacity - Reserved - 250Mb
// We want Allocatable = 50Mb, so set Reserved = Capacity - Allocatable - 250Mb = Capacity - 300Mb
kubeReserved.Sub(resource.MustParse("300Mi"))
initialConfig.KubeReserved = componentconfig.ConfigurationMap(map[string]string{"memory": kubeReserved.String()})
initialConfig.EnforceNodeAllocatable = []string{cm.NodeAllocatableEnforcementKey}
initialConfig.ExperimentalNodeAllocatableIgnoreEvictionThreshold = false
initialConfig.CgroupsPerQOS = true
Expand Down
81 changes: 44 additions & 37 deletions test/e2e_node/inode_eviction_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package e2e_node

import (
"fmt"
"path/filepath"
"time"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -45,44 +46,37 @@ const (
var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
f := framework.NewDefaultFramework("inode-eviction-test")

volumeMountPath := "/test-empty-dir-mnt"
podTestSpecs := []podTestSpec{
{
evictionPriority: 1, // This pod should be evicted before the normal memory usage pod
pod: v1.Pod{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "container-inode-hog-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: "gcr.io/google_containers/busybox:1.24",
Name: "container-inode-hog-pod",
Command: []string{
"sh",
"-c", // Make 100 billion small files (more than we have inodes)
"i=0; while [[ $i -lt 100000000000 ]]; do touch smallfile$i.txt; sleep 0.001; i=$((i+=1)); done;",
},
Image: "gcr.io/google_containers/busybox:1.24",
Name: "container-inode-hog-container",
Command: getInodeConsumingCommand(""),
},
},
},
},
},
{
evictionPriority: 1, // This pod should be evicted before the normal memory usage pod
pod: v1.Pod{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "volume-inode-hog-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: "gcr.io/google_containers/busybox:1.24",
Name: "volume-inode-hog-pod",
Command: []string{
"sh",
"-c", // Make 100 billion small files (more than we have inodes)
"i=0; while [[ $i -lt 100000000000 ]]; do touch /test-empty-dir-mnt/smallfile$i.txt; sleep 0.001; i=$((i+=1)); done;",
},
Image: "gcr.io/google_containers/busybox:1.24",
Name: "volume-inode-hog-container",
Command: getInodeConsumingCommand(volumeMountPath),
VolumeMounts: []v1.VolumeMount{
{MountPath: "/test-empty-dir-mnt", Name: "test-empty-dir"},
{MountPath: volumeMountPath, Name: "test-empty-dir"},
},
},
},
Expand All @@ -94,31 +88,15 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flak
},
{
evictionPriority: 0, // This pod should never be evicted
pod: v1.Pod{
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The original test case seems useful, why don't we keep it?

ObjectMeta: metav1.ObjectMeta{Name: "normal-memory-usage-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: "gcr.io/google_containers/busybox:1.24",
Name: "normal-memory-usage-pod",
Command: []string{
"sh",
"-c", //make one big (5 Gb) file
"dd if=/dev/urandom of=largefile bs=5000000000 count=1; while true; do sleep 5; done",
},
},
},
},
},
pod: getInnocentPod(),
},
}
evictionTestTimeout := 30 * time.Minute
testCondition := "Disk Pressure due to Inodes"

Context(fmt.Sprintf("when we run containers that should cause %s", testCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *componentconfig.KubeletConfiguration) {
initialConfig.EvictionHard = "nodefs.inodesFree<50%"
initialConfig.EvictionHard = "nodefs.inodesFree<70%"
})
// Place the remainder of the test within a context so that the kubelet config is set before and after the test.
Context("With kubeconfig updated", func() {
Expand All @@ -133,7 +111,7 @@ type podTestSpec struct {
// If two are ranked at 1, either is permitted to fail before the other.
// The test ends when all other than the 0 have been evicted
evictionPriority int
pod v1.Pod
pod *v1.Pod
}

// runEvictionTest sets up a testing environment given the provided nodes, and checks a few things:
Expand All @@ -148,7 +126,7 @@ func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs
By("seting up pods to be used by tests")
for _, spec := range podTestSpecs {
By(fmt.Sprintf("creating pod with container: %s", spec.pod.Name))
f.PodClient().CreateSync(&spec.pod)
f.PodClient().CreateSync(spec.pod)
}
})

Expand Down Expand Up @@ -342,3 +320,32 @@ func hasInodePressure(f *framework.Framework, testCondition string) (bool, error
}
return hasPressure, nil
}

// returns a pod that does not use any resources
func getInnocentPod() *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "innocent-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: "gcr.io/google_containers/busybox:1.24",
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: Why not just use pause container?

Name: "innocent-container",
Command: []string{
"sh",
"-c", //make one large file
"dd if=/dev/urandom of=largefile bs=5000000000 count=1; while true; do sleep 5; done",
},
},
},
},
}
}

func getInodeConsumingCommand(path string) []string {
return []string{
"sh",
"-c",
fmt.Sprintf("i=0; while true; do touch %s${i}.txt; sleep 0.001; i=$((i+=1)); done;", filepath.Join(path, "smallfile")),
}
}
2 changes: 2 additions & 0 deletions test/e2e_node/jenkins/jenkins-flaky.properties
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,5 @@ GINKGO_FLAGS='--focus="\[Flaky\]"'
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
PARALLELISM=1
TIMEOUT=2h

6 changes: 3 additions & 3 deletions test/e2e_node/local_storage_allocatable_eviction_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ var _ = framework.KubeDescribe("LocalStorageAllocatableEviction [Slow] [Serial]
podTestSpecs = []podTestSpec{
{
evictionPriority: 1, // This pod should be evicted before the innocent pod
pod: v1.Pod{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "container-disk-hog-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Expand All @@ -74,7 +74,7 @@ var _ = framework.KubeDescribe("LocalStorageAllocatableEviction [Slow] [Serial]

{
evictionPriority: 0, // This pod should never be evicted
pod: v1.Pod{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "idle-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Expand Down Expand Up @@ -151,7 +151,7 @@ func runLocalStorageEvictionTest(f *framework.Framework, conditionType v1.NodeCo
By("seting up pods to be used by tests")
for _, spec := range *podTestSpecsP {
By(fmt.Sprintf("creating pod with container: %s", spec.pod.Name))
f.PodClient().CreateSync(&spec.pod)
f.PodClient().CreateSync(spec.pod)
}
})

Expand Down