diff --git a/pkg/scheduler/testing/wrappers.go b/pkg/scheduler/testing/wrappers.go index 72a5912401f72..03dc895ad9b3d 100644 --- a/pkg/scheduler/testing/wrappers.go +++ b/pkg/scheduler/testing/wrappers.go @@ -882,6 +882,20 @@ func (p *PersistentVolumeWrapper) HostPathVolumeSource(src *v1.HostPathVolumeSou return p } +// NodeAffinityIn creates a HARD node affinity (with the operator In) +// // and injects into the pv. +func (p *PersistentVolumeWrapper) NodeAffinityIn(key string, vals []string) *PersistentVolumeWrapper { + if p.Spec.NodeAffinity == nil { + p.Spec.NodeAffinity = &v1.VolumeNodeAffinity{} + } + if p.Spec.NodeAffinity.Required == nil { + p.Spec.NodeAffinity.Required = &v1.NodeSelector{} + } + nodeSelector := MakeNodeSelector().In(key, vals).Obj() + p.Spec.NodeAffinity.Required.NodeSelectorTerms = append(p.Spec.NodeAffinity.Required.NodeSelectorTerms, nodeSelector.NodeSelectorTerms...) + return p +} + // ResourceClaimWrapper wraps a ResourceClaim inside. type ResourceClaimWrapper struct{ resourcev1alpha2.ResourceClaim } diff --git a/test/integration/scheduler/filters/filters_test.go b/test/integration/scheduler/filters/filters_test.go index 18c64833c5edf..bfe8a0c7c188f 100644 --- a/test/integration/scheduler/filters/filters_test.go +++ b/test/integration/scheduler/filters/filters_test.go @@ -26,6 +26,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/kubernetes" @@ -2036,6 +2037,53 @@ func TestUnschedulablePodBecomesSchedulable(t *testing.T) { return deletePod(cs, "pod-to-be-deleted", ns) }, }, + { + name: "pod with pvc has node-affinity to non-existent/illegal nodes", + init: func(cs kubernetes.Interface, ns string) error { + storage := v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}} + volType := v1.HostPathDirectoryOrCreate + pv, err := testutils.CreatePV(cs, st.MakePersistentVolume(). + Name("pv-has-non-existent-nodes"). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}). + Capacity(storage.Requests). + HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: &volType}). + NodeAffinityIn("kubernetes.io/hostname", []string{"node-available", string(uuid.NewUUID()), string(uuid.NewUUID())}). // one node exist, two don't + Obj()) + if err != nil { + return fmt.Errorf("cannot create pv: %w", err) + } + _, err = testutils.CreatePVC(cs, st.MakePersistentVolumeClaim(). + Name("pvc-has-non-existent-nodes"). + Namespace(ns). + Annotation(volume.AnnBindCompleted, "true"). + VolumeName(pv.Name). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}). + Resources(storage). + Obj()) + if err != nil { + return fmt.Errorf("cannot create pvc: %w", err) + } + return nil + }, + pod: &testutils.PausePodConfig{ + Name: "pod-with-pvc-has-non-existent-nodes", + Volumes: []v1.Volume{{ + Name: "volume", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc-has-non-existent-nodes", + }, + }, + }}, + }, + update: func(cs kubernetes.Interface, ns string) error { + _, err := createNode(cs, st.MakeNode().Label("kubernetes.io/hostname", "node-available").Name("node-available").Obj()) + if err != nil { + return fmt.Errorf("cannot create node: %w", err) + } + return nil + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) {