Skip to content

Commit

Permalink
cleanup: move the test of TaintBasedEvictions features to sig-node
Browse files Browse the repository at this point in the history
1. move the integration test of TaintBasedEvictions to test/integration/node
2. move the e2e test of TaintBasedEvictions to e2e_node
  • Loading branch information
fengzixu committed Feb 14, 2020
1 parent 0e37bce commit 689295b
Show file tree
Hide file tree
Showing 9 changed files with 716 additions and 272 deletions.
1 change: 1 addition & 0 deletions test/e2e_node/BUILD
Expand Up @@ -135,6 +135,7 @@ go_test(
"startup_probe_test.go",
"summary_test.go",
"system_node_critical_test.go",
"taints.go"
"topology_manager_test.go",
"volume_manager_test.go",
],
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/scheduling/taints.go → test/e2e_node/taints.go
Expand Up @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/

package scheduling
package e2enode

import (
"context"
Expand Down
43 changes: 43 additions & 0 deletions test/integration/node/BUILD
@@ -0,0 +1,43 @@
package(default_visibility = ["//visibility:public"])

load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
)

go_test(
name = "go_default_test",
size = "large",
srcs = [
"main_test.go",
"lifecycle_test.go",
],
tags = ["integration"],
deps = [
"//pkg/features:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
"//test/integration:go_default_library",
"//test/integration/framework:go_default_library",
],
)

filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)

filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
5 changes: 5 additions & 0 deletions test/integration/node/OWNERS
@@ -0,0 +1,5 @@
# See the OWNERS docs at https://go.k8s.io/owners

approvers:
reviewers:
- sig-node-reviewers
289 changes: 289 additions & 0 deletions test/integration/node/lifecycle_test.go
@@ -0,0 +1,289 @@
package node

import (
"context"
"fmt"
"testing"
"time"

v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/admission"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/controller/nodelifecycle"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds"
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
"k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
)

// TestTaintBasedEvictions tests related cases for the TaintBasedEvictions feature
func TestTaintBasedEvictions(t *testing.T) {
// we need at least 2 nodes to prevent lifecycle manager from entering "fully-disrupted" mode
nodeCount := 3
zero := int64(0)
gracePeriod := int64(1)
heartbeatInternal := time.Second * 2
testPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "testpod1", DeletionGracePeriodSeconds: &zero},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "container", Image: imageutils.GetPauseImageName()},
},
Tolerations: []v1.Toleration{
{
Key: v1.TaintNodeNotReady,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
},
},
TerminationGracePeriodSeconds: &gracePeriod,
},
}
tolerationSeconds := []int64{200, 300, 0}
tests := []struct {
name string
nodeTaints []v1.Taint
nodeConditions []v1.NodeCondition
pod *v1.Pod
waitForPodCondition string
}{
{
name: "Taint based evictions for NodeNotReady and 200 tolerationseconds",
nodeTaints: []v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoExecute}},
nodeConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}},
pod: testPod,
waitForPodCondition: "updated with tolerationSeconds of 200",
},
{
name: "Taint based evictions for NodeNotReady with no pod tolerations",
nodeTaints: []v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoExecute}},
nodeConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}},
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "testpod1"},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "container", Image: imageutils.GetPauseImageName()},
},
},
},
waitForPodCondition: "updated with tolerationSeconds=300",
},
{
name: "Taint based evictions for NodeNotReady and 0 tolerationseconds",
nodeTaints: []v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoExecute}},
nodeConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}},
pod: testPod,
waitForPodCondition: "terminating",
},
{
name: "Taint based evictions for NodeUnreachable",
nodeTaints: []v1.Taint{{Key: v1.TaintNodeUnreachable, Effect: v1.TaintEffectNoExecute}},
nodeConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionUnknown}},
},
}

// Enable TaintBasedEvictions
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintBasedEvictions, true)()

// Build admission chain handler.
podTolerations := podtolerationrestriction.NewPodTolerationsPlugin(&pluginapi.Configuration{})
admission := admission.NewChainHandler(
podTolerations,
defaulttolerationseconds.NewDefaultTolerationSeconds(),
)
for i, test := range tests {
t.Run(test.name, func(t *testing.T) {
testCtx := initTestMaster(t, "taint-based-evictions", admission)

// Build clientset and informers for controllers.
externalClientset := kubernetes.NewForConfigOrDie(&restclient.Config{
QPS: -1,
Host: testCtx.httpServer.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second)
podTolerations.SetExternalKubeClientSet(externalClientset)
podTolerations.SetExternalKubeInformerFactory(externalInformers)

testCtx = initTestScheduler(t, testCtx, true, nil)
defer cleanupTest(t, testCtx)
cs := testCtx.clientSet
informers := testCtx.informerFactory
_, err := cs.CoreV1().Namespaces().Create(context.TODO(), testCtx.ns, metav1.CreateOptions{})
if err != nil {
t.Errorf("Failed to create namespace %+v", err)
}

// Start NodeLifecycleController for taint.
nc, err := nodelifecycle.NewNodeLifecycleController(
informers.Coordination().V1().Leases(),
informers.Core().V1().Pods(),
informers.Core().V1().Nodes(),
informers.Apps().V1().DaemonSets(),
cs,
5*time.Second, // Node monitor grace period
time.Minute, // Node startup grace period
time.Millisecond, // Node monitor period
time.Second, // Pod eviction timeout
100, // Eviction limiter QPS
100, // Secondary eviction limiter QPS
50, // Large cluster threshold
0.55, // Unhealthy zone threshold
true, // Run taint manager
true, // Use taint based evictions
)
if err != nil {
t.Errorf("Failed to create node controller: %v", err)
return
}

go nc.Run(testCtx.ctx.Done())

// Waiting for all controller sync.
externalInformers.Start(testCtx.ctx.Done())
externalInformers.WaitForCacheSync(testCtx.ctx.Done())
informers.Start(testCtx.ctx.Done())
informers.WaitForCacheSync(testCtx.ctx.Done())

nodeRes := v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4000m"),
v1.ResourceMemory: resource.MustParse("16Gi"),
v1.ResourcePods: resource.MustParse("110"),
}

var nodes []*v1.Node
for i := 0; i < nodeCount; i++ {
nodes = append(nodes, &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("node-%d", i),
Labels: map[string]string{v1.LabelZoneRegion: "region1", v1.LabelZoneFailureDomain: "zone1"},
},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Capacity: nodeRes,
Allocatable: nodeRes,
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Now(),
},
},
},
})
if _, err := cs.CoreV1().Nodes().Create(context.TODO(), nodes[i], metav1.CreateOptions{}); err != nil {
t.Errorf("Failed to create node, err: %v", err)
}
}

neededNode := nodes[1]
if test.pod != nil {
test.pod.Name = fmt.Sprintf("testpod-%d", i)
if len(test.pod.Spec.Tolerations) > 0 {
test.pod.Spec.Tolerations[0].TolerationSeconds = &tolerationSeconds[i]
}

test.pod, err = cs.CoreV1().Pods(testCtx.ns.Name).Create(context.TODO(), test.pod, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Test Failed: error: %v, while creating pod", err)
}

if err := waitForPodToSchedule(cs, test.pod); err != nil {
t.Errorf("Failed to schedule pod %s/%s on the node, err: %v",
test.pod.Namespace, test.pod.Name, err)
}
test.pod, err = cs.CoreV1().Pods(testCtx.ns.Name).Get(context.TODO(), test.pod.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Test Failed: error: %v, while creating pod", err)
}
neededNode, err = cs.CoreV1().Nodes().Get(context.TODO(), test.pod.Spec.NodeName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Error while getting node associated with pod %v with err %v", test.pod.Name, err)
}
}

// Regularly send heartbeat event to APIServer so that the cluster doesn't enter fullyDisruption mode.
// TODO(Huang-Wei): use "NodeDisruptionExclusion" feature to simply the below logic when it's beta.
for i := 0; i < nodeCount; i++ {
var conditions []v1.NodeCondition
// If current node is not <neededNode>
if neededNode.Name != nodes[i].Name {
conditions = []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
},
}
} else {
c, err := nodeReadyStatus(test.nodeConditions)
if err != nil {
t.Error(err)
}
// Need to distinguish NodeReady/False and NodeReady/Unknown.
// If we try to update the node with condition NotReady/False, i.e. expect a NotReady:NoExecute taint
// we need to keep sending the update event to keep it alive, rather than just sending once.
if c == v1.ConditionFalse {
conditions = test.nodeConditions
} else if c == v1.ConditionUnknown {
// If it's expected to update the node with condition NotReady/Unknown,
// i.e. expect a Unreachable:NoExecute taint,
// we need to only send the update event once to simulate the network unreachable scenario.
nodeCopy := nodeCopyWithConditions(nodes[i], test.nodeConditions)
if err := updateNodeStatus(cs, nodeCopy); err != nil && !apierrors.IsNotFound(err) {
t.Errorf("Cannot update node: %v", err)
}
continue
}
}
// Keeping sending NodeReady/True or NodeReady/False events.
go func(i int) {
for {
select {
case <-testCtx.ctx.Done():
return
case <-time.Tick(heartbeatInternal):
nodeCopy := nodeCopyWithConditions(nodes[i], conditions)
if err := updateNodeStatus(cs, nodeCopy); err != nil && !apierrors.IsNotFound(err) {
t.Errorf("Cannot update node: %v", err)
}
}
}
}(i)
}

if err := waitForNodeTaints(cs, neededNode, test.nodeTaints); err != nil {
t.Errorf("Failed to taint node in test %d <%s>, err: %v", i, neededNode.Name, err)
}

if test.pod != nil {
err = pod.WaitForPodCondition(cs, testCtx.ns.Name, test.pod.Name, test.waitForPodCondition, time.Second*15, func(pod *v1.Pod) (bool, error) {
// as node is unreachable, pod0 is expected to be in Terminating status
// rather than getting deleted
if tolerationSeconds[i] == 0 {
return pod.DeletionTimestamp != nil, nil
}
if seconds, err := getTolerationSeconds(pod.Spec.Tolerations); err == nil {
return seconds == tolerationSeconds[i], nil
}
return false, nil
})
if err != nil {
pod, _ := cs.CoreV1().Pods(testCtx.ns.Name).Get(context.TODO(), test.pod.Name, metav1.GetOptions{})
t.Fatalf("Error: %v, Expected test pod to be %s but it's %v", err, test.waitForPodCondition, pod)
}
cleanupPods(cs, t, []*v1.Pod{test.pod})
}
cleanupNodes(cs, t)
waitForSchedulerCacheCleanup(testCtx.scheduler, t)
})
}
}
27 changes: 27 additions & 0 deletions test/integration/node/main_test.go
@@ -0,0 +1,27 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package node

import (
"testing"

"k8s.io/kubernetes/test/integration/framework"
)

func TestMain(m *testing.M) {
framework.EtcdMain(m.Run)
}

0 comments on commit 689295b

Please sign in to comment.