Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(NodeVolumeLimits): return Skip in PreFilter #115398

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
12 changes: 12 additions & 0 deletions pkg/scheduler/apis/config/testing/defaults/defaults.go
Expand Up @@ -38,6 +38,10 @@ var PluginsV1beta2 = &config.Plugins{
{Name: names.NodeResourcesFit},
{Name: names.NodePorts},
{Name: names.VolumeRestrictions},
{Name: names.EBSLimits},
{Name: names.GCEPDLimits},
{Name: names.NodeVolumeLimits},
{Name: names.AzureDiskLimits},
{Name: names.PodTopologySpread},
{Name: names.InterPodAffinity},
{Name: names.VolumeBinding},
Expand Down Expand Up @@ -206,6 +210,10 @@ var ExpandedPluginsV1beta3 = &config.Plugins{
{Name: names.NodePorts},
{Name: names.NodeResourcesFit},
{Name: names.VolumeRestrictions},
{Name: names.EBSLimits},
{Name: names.GCEPDLimits},
{Name: names.NodeVolumeLimits},
{Name: names.AzureDiskLimits},
{Name: names.VolumeBinding},
{Name: names.VolumeZone},
{Name: names.PodTopologySpread},
Expand Down Expand Up @@ -385,6 +393,10 @@ var ExpandedPluginsV1 = &config.Plugins{
{Name: names.NodePorts},
{Name: names.NodeResourcesFit},
{Name: names.VolumeRestrictions},
{Name: names.EBSLimits},
{Name: names.GCEPDLimits},
{Name: names.NodeVolumeLimits},
{Name: names.AzureDiskLimits},
{Name: names.VolumeBinding},
{Name: names.VolumeZone},
{Name: names.PodTopologySpread},
Expand Down
4 changes: 4 additions & 0 deletions pkg/scheduler/apis/config/v1beta2/default_plugins.go
Expand Up @@ -39,6 +39,10 @@ func getDefaultPlugins() *v1beta2.Plugins {
{Name: names.NodeResourcesFit},
{Name: names.NodePorts},
{Name: names.VolumeRestrictions},
{Name: names.EBSLimits},
{Name: names.GCEPDLimits},
{Name: names.NodeVolumeLimits},
{Name: names.AzureDiskLimits},
{Name: names.PodTopologySpread},
{Name: names.InterPodAffinity},
{Name: names.VolumeBinding},
Expand Down
8 changes: 8 additions & 0 deletions pkg/scheduler/apis/config/v1beta2/default_plugins_test.go
Expand Up @@ -51,6 +51,10 @@ func TestApplyFeatureGates(t *testing.T) {
{Name: names.NodeResourcesFit},
{Name: names.NodePorts},
{Name: names.VolumeRestrictions},
{Name: names.EBSLimits},
{Name: names.GCEPDLimits},
{Name: names.NodeVolumeLimits},
{Name: names.AzureDiskLimits},
{Name: names.PodTopologySpread},
{Name: names.InterPodAffinity},
{Name: names.VolumeBinding},
Expand Down Expand Up @@ -141,6 +145,10 @@ func TestApplyFeatureGates(t *testing.T) {
{Name: names.NodeResourcesFit},
{Name: names.NodePorts},
{Name: names.VolumeRestrictions},
{Name: names.EBSLimits},
{Name: names.GCEPDLimits},
{Name: names.NodeVolumeLimits},
{Name: names.AzureDiskLimits},
{Name: names.PodTopologySpread},
{Name: names.InterPodAffinity},
{Name: names.VolumeBinding},
Expand Down
4 changes: 4 additions & 0 deletions pkg/scheduler/apis/config/v1beta2/defaults_test.go
Expand Up @@ -341,6 +341,10 @@ func TestSchedulerDefaults(t *testing.T) {
{Name: names.NodeResourcesFit},
{Name: names.NodePorts},
{Name: names.VolumeRestrictions},
{Name: names.EBSLimits},
{Name: names.GCEPDLimits},
{Name: names.NodeVolumeLimits},
{Name: names.AzureDiskLimits},
{Name: names.PodTopologySpread},
{Name: names.InterPodAffinity},
{Name: names.VolumeBinding},
Expand Down
21 changes: 21 additions & 0 deletions pkg/scheduler/framework/plugins/nodevolumelimits/csi.go
Expand Up @@ -60,6 +60,7 @@ type CSILimits struct {
translator InTreeToCSITranslator
}

var _ framework.PreFilterPlugin = &CSILimits{}
var _ framework.FilterPlugin = &CSILimits{}
var _ framework.EnqueueExtensions = &CSILimits{}

Expand All @@ -80,6 +81,26 @@ func (pl *CSILimits) EventsToRegister() []framework.ClusterEvent {
}
}

// PreFilter invoked at the prefilter extension point
//
// If the pod haven't those types of volumes, we'll skip the Filter phase
func (pl *CSILimits) PreFilter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) {
volumes := pod.Spec.Volumes
for i := range volumes {
vol := &volumes[i]
if vol.PersistentVolumeClaim != nil || vol.Ephemeral != nil || pl.translator.IsInlineMigratable(vol) {
return nil, nil
}
}

return nil, framework.NewStatus(framework.Skip)
}

// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *CSILimits) PreFilterExtensions() framework.PreFilterExtensions {
return nil
}

// Filter invoked at the filter extension point.
func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
// If the new pod doesn't have any volume attached to it, the predicate will always be true
Expand Down
162 changes: 146 additions & 16 deletions pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go
Expand Up @@ -17,13 +17,13 @@ limitations under the License.
package nodevolumelimits

import (
"context"
"errors"
"fmt"
"reflect"
"strings"
"testing"

"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
Expand All @@ -36,6 +36,7 @@ import (
fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/fake"
st "k8s.io/kubernetes/pkg/scheduler/testing"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/utils/ktesting"
"k8s.io/utils/pointer"
)

Expand Down Expand Up @@ -189,19 +190,105 @@ func TestCSILimits(t *testing.T) {
},
},
}

onlyConfigmapAndSecretVolPod := &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{},
},
},
{
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{},
},
},
},
},
}
pvcPodWithConfigmapAndSecret := &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{},
},
},
{
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{},
},
},
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "csi-ebs.csi.aws.com-0"},
},
},
},
},
}
ephemeralPodWithConfigmapAndSecret := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: ephemeralVolumePod.Namespace,
Name: ephemeralVolumePod.Name,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{},
},
},
{
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{},
},
},
{
Name: "xyz",
VolumeSource: v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{},
},
},
},
},
}
inlineMigratablePodWithConfigmapAndSecret := &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{},
},
},
{
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{},
},
},
{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "aws-inline1",
},
},
},
},
},
}
tests := []struct {
newPod *v1.Pod
existingPods []*v1.Pod
extraClaims []v1.PersistentVolumeClaim
filterName string
maxVols int
driverNames []string
test string
migrationEnabled bool
ephemeralEnabled bool
limitSource string
wantStatus *framework.Status
newPod *v1.Pod
existingPods []*v1.Pod
extraClaims []v1.PersistentVolumeClaim
filterName string
maxVols int
driverNames []string
test string
migrationEnabled bool
ephemeralEnabled bool
limitSource string
wantStatus *framework.Status
wantPreFilterStatus *framework.Status
}{
{
newPod: csiEBSOneVolPod,
Expand Down Expand Up @@ -485,6 +572,42 @@ func TestCSILimits(t *testing.T) {
maxVols: 4,
test: "persistent okay when node volume limit > pods ephemeral CSI volume + persistent volume",
},
{
newPod: onlyConfigmapAndSecretVolPod,
filterName: "csi",
maxVols: 2,
driverNames: []string{ebsCSIDriverName},
test: "skip Filter when the pod only uses secrets and configmaps",
limitSource: "node",
wantPreFilterStatus: framework.NewStatus(framework.Skip),
},
{
newPod: pvcPodWithConfigmapAndSecret,
filterName: "csi",
maxVols: 2,
driverNames: []string{ebsCSIDriverName},
test: "don't skip Filter when the pod has pvcs",
limitSource: "node",
},
{
newPod: ephemeralPodWithConfigmapAndSecret,
filterName: "csi",
ephemeralEnabled: true,
driverNames: []string{ebsCSIDriverName},
test: "don't skip Filter when the pod has ephemeral volumes",
wantStatus: framework.AsStatus(errors.New(`looking up PVC test/abc-xyz: persistentvolumeclaim "abc-xyz" not found`)),
},
{
newPod: inlineMigratablePodWithConfigmapAndSecret,
existingPods: []*v1.Pod{inTreeTwoVolPod},
filterName: "csi",
maxVols: 2,
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
migrationEnabled: true,
limitSource: "csinode",
test: "don't skip Filter when the pod has inline migratable volumes",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
},
}

// running attachable predicate tests with feature gate and limit present on nodes
Expand All @@ -503,9 +626,16 @@ func TestCSILimits(t *testing.T) {
randomVolumeIDPrefix: rand.String(32),
translator: csiTranslator,
}
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
_, ctx := ktesting.NewTestContext(t)
_, gotPreFilterStatus := p.PreFilter(ctx, nil, test.newPod)
if diff := cmp.Diff(test.wantPreFilterStatus, gotPreFilterStatus); diff != "" {
t.Errorf("PreFilter status does not match (-want, +got): %s", diff)
}
if gotPreFilterStatus.Code() != framework.Skip {
gotStatus := p.Filter(ctx, nil, test.newPod, node)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
tangwz marked this conversation as resolved.
Show resolved Hide resolved
t.Errorf("Filter status does not match: %v, want: %v", gotStatus, test.wantStatus)
}
}
})
}
Expand Down
22 changes: 22 additions & 0 deletions pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go
Expand Up @@ -119,6 +119,7 @@ type nonCSILimits struct {
randomVolumeIDPrefix string
}

var _ framework.PreFilterPlugin = &nonCSILimits{}
var _ framework.FilterPlugin = &nonCSILimits{}
var _ framework.EnqueueExtensions = &nonCSILimits{}

Expand Down Expand Up @@ -208,6 +209,27 @@ func (pl *nonCSILimits) EventsToRegister() []framework.ClusterEvent {
}
}

// PreFilter invoked at the prefilter extension point
//
// If the pod haven't those types of volumes, we'll skip the Filter phase
func (pl *nonCSILimits) PreFilter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) {
volumes := pod.Spec.Volumes
for i := range volumes {
vol := &volumes[i]
_, ok := pl.filter.FilterVolume(vol)
if ok || vol.PersistentVolumeClaim != nil || vol.Ephemeral != nil {
tangwz marked this conversation as resolved.
Show resolved Hide resolved
return nil, nil
}
}

return nil, framework.NewStatus(framework.Skip)
}

// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *nonCSILimits) PreFilterExtensions() framework.PreFilterExtensions {
return nil
}

// Filter invoked at the filter extension point.
func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
// If a pod doesn't have any volume attached to it, the predicate will always be true.
Expand Down