From a57bf83a7f6a56ea3ee42e68a81a24de5a15bcff Mon Sep 17 00:00:00 2001 From: joey Date: Fri, 26 Apr 2024 22:04:53 +0800 Subject: [PATCH] add e2e test when a plugin (in-tree or out-of-tree) return non-existent/illegal nodes, the pod should be scheduled normally if suitable nodes exists Signed-off-by: joey --- test/e2e/scheduling/predicates.go | 73 +++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 02e420e416669..5183c04a5aeba 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -39,6 +39,7 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eruntimeclass "k8s.io/kubernetes/test/e2e/framework/node/runtimeclass" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2erc "k8s.io/kubernetes/test/e2e/framework/rc" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" testutils "k8s.io/kubernetes/test/utils" @@ -857,6 +858,78 @@ var _ = SIGDescribe("SchedulerPredicates", framework.WithSerial(), func() { ginkgo.By("Expect all pods are scheduled and running") framework.ExpectNoError(e2epod.WaitForPodsRunning(ctx, cs, ns, replicas, time.Minute)) }) + + // Regression test for an extended scenario for https://issues.k8s.io/123465 + ginkgo.It("when a plugin (in-tree or out-of-tree) return non-existent/illegal nodes, the pod should be scheduled normally if suitable nodes exists", func(ctx context.Context) { + nodeName := GetNodeThatCanRunPod(ctx, f) + nonExistentPodName1 := string(uuid.NewUUID()) + nonExistentPodName2 := string(uuid.NewUUID()) + hostLabel := "kubernetes.io/hostname" + localPath := "/tmp" + podName := "bind-pv-with-non-existent-nodes" + pvcName := "pvc-" + string(uuid.NewUUID()) + _, pvc, err := e2epv.CreatePVPVC(ctx, cs, f.Timeouts, e2epv.PersistentVolumeConfig{ + PVSource: v1.PersistentVolumeSource{ + Local: &v1.LocalVolumeSource{ + Path: localPath, + }, + }, + Prebind: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{Name: pvcName, Namespace: ns}, + }, + NodeAffinity: &v1.VolumeNodeAffinity{ + Required: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: hostLabel, + Operator: v1.NodeSelectorOpIn, + // add non-existent nodes to the list + Values: []string{nodeName, nonExistentPodName1, nonExistentPodName2}, + }, + }, + }, + }, + }, + }, + }, e2epv.PersistentVolumeClaimConfig{ + Name: pvcName, + }, ns, true) + framework.ExpectNoError(err) + bindPvPod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "pause", + Image: imageutils.GetE2EImage(imageutils.Pause), + VolumeMounts: []v1.VolumeMount{ + { + Name: "data", + MountPath: "/tmp", + }, + }, + }, + }, + Volumes: []v1.Volume{ + { + Name: "data", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + }, + }, + }, + }, + }, + } + _, err = f.ClientSet.CoreV1().Pods(ns).Create(ctx, bindPvPod, metav1.CreateOptions{}) + framework.ExpectNoError(err) + framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, f.ClientSet, ns, podName)) + }) }) func patchPod(cs clientset.Interface, old, new *v1.Pod) (*v1.Pod, error) {