Skip to content

Commit

Permalink
add e2e test for restart kubelet
Browse files Browse the repository at this point in the history
when nodes removes the label that satisfies the pod affinity, the running pods are not affected, but restarting the kubelet will kill these pods.

Signed-off-by: joey <zchengjoey@gmail.com>
  • Loading branch information
chengjoey committed Apr 22, 2024
1 parent 76de052 commit a63cc95
Show file tree
Hide file tree
Showing 2 changed files with 106 additions and 2 deletions.
86 changes: 86 additions & 0 deletions test/e2e/common/node/kubelet.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,10 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
"k8s.io/kubernetes/test/e2e/storage/utils"
admissionapi "k8s.io/pod-security-admission/api"

"github.com/onsi/ginkgo/v2"
Expand Down Expand Up @@ -260,3 +262,87 @@ var _ = SIGDescribe("Kubelet with pods in a privileged namespace", func() {
})
})
})

var _ = SIGDescribe("Kubelet rejects pods that do not satisfy affinity after restart", func() {
f := framework.NewDefaultFramework("kubelet-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() {
podClient = e2epod.NewPodClient(f)
})
ginkgo.Context("when scheduling a pod to a node that meet affinity, then let node unschedulable and restart kubelet", func() {
podName := "pod-" + string(uuid.NewUUID())
/*
Release: v1.30
Testname: Kubelet, restart kubelet, pod rejected
Description: Create a Pod with a node affinity that matches a node label. Delete the node label to make the pod unschedulable. Restart the kubelet. The pod MUST be terminated.
*/
framework.ConformanceIt("should be rejected by the Kubelet", f.WithNodeConformance(), func(ctx context.Context) {
var (
nodeKey = "node-key"
nodeUniqueKey = string(uuid.NewUUID())
)
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err, "Failed to get a random ready schedulable node")
// add a unique label to the node
node.Labels[nodeKey] = nodeUniqueKey
node, err = f.ClientSet.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update node %q", node.Name)
pod := e2epod.NewAgnhostPod(f.Namespace.Name, podName, nil, nil, nil)
pod.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeKey,
Operator: v1.NodeSelectorOpIn,
Values: []string{nodeUniqueKey},
},
},
},
},
},
},
}
pod.Spec.RestartPolicy = v1.RestartPolicyNever
pod = podClient.CreateSync(ctx, pod)

// delete the node label to make the node not satisfy the pod affinity
delete(node.Labels, nodeKey)
_, err = f.ClientSet.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update node %q", node.Name)

// pod is still running
time.Sleep(5 * time.Second)
pod, err = podClient.Get(ctx, podName, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get pod %q", podName)
// if not restart kubelet, the pod should still be Running phase
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodRunning))

// restart kubelet, WithHostPID for hostExec to restart kubelet
hostExec := utils.NewHostExec(f, utils.WithHostPID())
err = restartKubelet(ctx, node, hostExec)
framework.ExpectNoError(err, "Failed to restart kubelet on node %q", node.Name)
time.Sleep(5 * time.Second)
pod, err = podClient.Get(ctx, podName, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get pod %q", podName)

// after restart kubelet, the pod should be terminated
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodFailed))
})
})
})

func restartKubelet(ctx context.Context, node *v1.Node, hostExec utils.HostExec) error {
// restart kubelet
res, err := hostExec.Execute(ctx, "systemctl restart kubelet", node)
if err != nil {
return err
}
if res.Code != 0 {
return fmt.Errorf("failed to restart kubelet: %s", res.Stderr)
}
return err
}
22 changes: 20 additions & 2 deletions test/e2e/storage/utils/host_exec.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,14 +57,29 @@ type HostExec interface {
type hostExecutor struct {
*framework.Framework
nodeExecPods map[string]*v1.Pod

// hostPid whether use the host's pid namespace
hostPID bool
}

type ExecOption func(executor *hostExecutor)

func WithHostPID() ExecOption {
return func(executor *hostExecutor) {
executor.hostPID = true
}
}

// NewHostExec returns a HostExec
func NewHostExec(framework *framework.Framework) HostExec {
return &hostExecutor{
func NewHostExec(framework *framework.Framework, opts ...ExecOption) HostExec {
hostExec := &hostExecutor{
Framework: framework,
nodeExecPods: make(map[string]*v1.Pod),
}
for _, opt := range opts {
opt(hostExec)
}
return hostExec
}

// launchNodeExecPod launches a hostexec pod for local PV and waits
Expand Down Expand Up @@ -111,6 +126,9 @@ func (h *hostExecutor) launchNodeExecPod(ctx context.Context, node string) *v1.P
return &privileged
}(true),
}
if h.hostPID {
hostExecPod.Spec.HostPID = true
}
pod, err := cs.CoreV1().Pods(ns.Name).Create(ctx, hostExecPod, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, pod.Name, pod.Namespace, f.Timeouts.PodStart)
Expand Down

0 comments on commit a63cc95

Please sign in to comment.