Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove enableNonPreempting field from scheduler codebase #90915

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 0 additions & 2 deletions pkg/scheduler/BUILD
Expand Up @@ -61,7 +61,6 @@ go_test(
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/controller/volume/scheduling:go_default_library",
"//pkg/features:go_default_library",
"//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/apis/config/scheme:go_default_library",
"//pkg/scheduler/core:go_default_library",
Expand Down Expand Up @@ -91,7 +90,6 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
Expand Down
3 changes: 1 addition & 2 deletions pkg/scheduler/core/extender_test.go
Expand Up @@ -600,8 +600,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
informerFactory.Core().V1().PersistentVolumeClaims().Lister(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister(),
false,
schedulerapi.DefaultPercentageOfNodesToScore,
false)
schedulerapi.DefaultPercentageOfNodesToScore)
podIgnored := &v1.Pod{}
result, err := scheduler.Schedule(context.Background(), prof, framework.NewCycleState(), podIgnored)
if test.expectsErr {
Expand Down
11 changes: 4 additions & 7 deletions pkg/scheduler/core/generic_scheduler.go
Expand Up @@ -131,7 +131,6 @@ type genericScheduler struct {
pdbLister policylisters.PodDisruptionBudgetLister
disablePreemption bool
percentageOfNodesToScore int32
enableNonPreempting bool
nextStartNodeIndex int
}

Expand Down Expand Up @@ -259,7 +258,7 @@ func (g *genericScheduler) Preempt(ctx context.Context, prof *profile.Profile, s
if !ok || fitError == nil {
return nil, nil, nil, nil
}
if !podEligibleToPreemptOthers(pod, g.nodeInfoSnapshot.NodeInfos(), g.enableNonPreempting) {
if !podEligibleToPreemptOthers(pod, g.nodeInfoSnapshot.NodeInfos()) {
klog.V(5).Infof("Pod %v/%v is not eligible for more preemption.", pod.Namespace, pod.Name)
return nil, nil, nil, nil
}
Expand Down Expand Up @@ -1053,8 +1052,8 @@ func nodesWherePreemptionMightHelp(nodes []*framework.NodeInfo, fitErr *FitError
// considered for preemption.
// We look at the node that is nominated for this pod and as long as there are
// terminating pods on the node, we don't consider this for preempting more pods.
func podEligibleToPreemptOthers(pod *v1.Pod, nodeInfos framework.NodeInfoLister, enableNonPreempting bool) bool {
if enableNonPreempting && pod.Spec.PreemptionPolicy != nil && *pod.Spec.PreemptionPolicy == v1.PreemptNever {
func podEligibleToPreemptOthers(pod *v1.Pod, nodeInfos framework.NodeInfoLister) bool {
if pod.Spec.PreemptionPolicy != nil && *pod.Spec.PreemptionPolicy == v1.PreemptNever {
klog.V(5).Infof("Pod %v/%v is not eligible for preemption because it has a preemptionPolicy of %v", pod.Namespace, pod.Name, v1.PreemptNever)
return false
}
Expand Down Expand Up @@ -1108,8 +1107,7 @@ func NewGenericScheduler(
pvcLister corelisters.PersistentVolumeClaimLister,
pdbLister policylisters.PodDisruptionBudgetLister,
disablePreemption bool,
percentageOfNodesToScore int32,
enableNonPreempting bool) ScheduleAlgorithm {
percentageOfNodesToScore int32) ScheduleAlgorithm {
return &genericScheduler{
cache: cache,
schedulingQueue: podQueue,
Expand All @@ -1119,6 +1117,5 @@ func NewGenericScheduler(
pdbLister: pdbLister,
disablePreemption: disablePreemption,
percentageOfNodesToScore: percentageOfNodesToScore,
enableNonPreempting: enableNonPreempting,
}
}
14 changes: 5 additions & 9 deletions pkg/scheduler/core/generic_scheduler_test.go
Expand Up @@ -817,8 +817,7 @@ func TestGenericScheduler(t *testing.T) {
pvcLister,
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister(),
false,
schedulerapi.DefaultPercentageOfNodesToScore,
false)
schedulerapi.DefaultPercentageOfNodesToScore)
result, err := scheduler.Schedule(context.Background(), prof, framework.NewCycleState(), test.pod)
if !reflect.DeepEqual(err, test.wErr) {
t.Errorf("Unexpected error: %v, expected: %v", err.Error(), test.wErr)
Expand All @@ -845,7 +844,7 @@ func makeScheduler(nodes []*v1.Node) *genericScheduler {
internalqueue.NewSchedulingQueue(nil),
emptySnapshot,
nil, nil, nil, false,
schedulerapi.DefaultPercentageOfNodesToScore, false)
schedulerapi.DefaultPercentageOfNodesToScore)
cache.UpdateSnapshot(s.(*genericScheduler).nodeInfoSnapshot)
return s.(*genericScheduler)
}
Expand Down Expand Up @@ -1139,8 +1138,7 @@ func TestZeroRequest(t *testing.T) {
nil,
nil,
false,
schedulerapi.DefaultPercentageOfNodesToScore,
false).(*genericScheduler)
schedulerapi.DefaultPercentageOfNodesToScore).(*genericScheduler)
scheduler.nodeInfoSnapshot = snapshot

ctx := context.Background()
Expand Down Expand Up @@ -1619,8 +1617,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
nil,
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister(),
false,
schedulerapi.DefaultPercentageOfNodesToScore,
false)
schedulerapi.DefaultPercentageOfNodesToScore)
g := scheduler.(*genericScheduler)

assignDefaultStartTime(test.pods)
Expand Down Expand Up @@ -2416,8 +2413,7 @@ func TestPreempt(t *testing.T) {
informerFactory.Core().V1().PersistentVolumeClaims().Lister(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister(),
false,
schedulerapi.DefaultPercentageOfNodesToScore,
true)
schedulerapi.DefaultPercentageOfNodesToScore)
state := framework.NewCycleState()
// Some tests rely on PreFilter plugin to compute its CycleState.
preFilterStatus := fwk.RunPreFilterPlugins(context.Background(), state, test.pod)
Expand Down
3 changes: 0 additions & 3 deletions pkg/scheduler/factory.go
Expand Up @@ -101,8 +101,6 @@ type Configurator struct {

podMaxBackoffSeconds int64

enableNonPreempting bool

profiles []schedulerapi.KubeSchedulerProfile
registry framework.Registry
nodeInfoSnapshot *internalcache.Snapshot
Expand Down Expand Up @@ -204,7 +202,6 @@ func (c *Configurator) create() (*Scheduler, error) {
GetPodDisruptionBudgetLister(c.informerFactory),
c.disablePreemption,
c.percentageOfNodesToScore,
c.enableNonPreempting,
)

return &Scheduler{
Expand Down
3 changes: 0 additions & 3 deletions pkg/scheduler/factory_test.go
Expand Up @@ -29,7 +29,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
Expand All @@ -38,7 +37,6 @@ import (
"k8s.io/client-go/tools/events"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
apicore "k8s.io/kubernetes/pkg/apis/core"
kubefeatures "k8s.io/kubernetes/pkg/features"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
frameworkplugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins"
Expand Down Expand Up @@ -471,7 +469,6 @@ func newConfigFactoryWithFrameworkRegistry(
podInitialBackoffSeconds: podInitialBackoffDurationSeconds,
podMaxBackoffSeconds: podMaxBackoffDurationSeconds,
StopEverything: stopCh,
enableNonPreempting: utilfeature.DefaultFeatureGate.Enabled(kubefeatures.NonPreemptingPriority),
registry: registry,
profiles: []schedulerapi.KubeSchedulerProfile{
{SchedulerName: testSchedulerName},
Expand Down
3 changes: 0 additions & 3 deletions pkg/scheduler/scheduler.go
Expand Up @@ -28,15 +28,13 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
kubefeatures "k8s.io/kubernetes/pkg/features"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
"k8s.io/kubernetes/pkg/scheduler/core"
Expand Down Expand Up @@ -279,7 +277,6 @@ func New(client clientset.Interface,
bindTimeoutSeconds: options.bindTimeoutSeconds,
podInitialBackoffSeconds: options.podInitialBackoffSeconds,
podMaxBackoffSeconds: options.podMaxBackoffSeconds,
enableNonPreempting: utilfeature.DefaultFeatureGate.Enabled(kubefeatures.NonPreemptingPriority),
profiles: append([]schedulerapi.KubeSchedulerProfile(nil), options.profiles...),
registry: registry,
nodeInfoSnapshot: snapshot,
Expand Down
2 changes: 0 additions & 2 deletions pkg/scheduler/scheduler_test.go
Expand Up @@ -820,7 +820,6 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.C
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister(),
false,
schedulerapi.DefaultPercentageOfNodesToScore,
false,
)

errChan := make(chan error, 1)
Expand Down Expand Up @@ -1175,7 +1174,6 @@ func TestSchedulerBinding(t *testing.T) {
nil,
false,
0,
false,
)
sched := Scheduler{
Algorithm: algo,
Expand Down