Skip to content

Commit

Permalink
feat: cleanup PodPriority features gate
Browse files Browse the repository at this point in the history
  • Loading branch information
draveness committed Jun 23, 2019
1 parent 0f2b01a commit ca6003b
Show file tree
Hide file tree
Showing 17 changed files with 13 additions and 217 deletions.
7 changes: 0 additions & 7 deletions pkg/api/pod/util.go
Expand Up @@ -315,13 +315,6 @@ func dropDisabledFields(
}
}

if !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) && !podPriorityInUse(oldPodSpec) {
// Set to nil pod's priority fields if the feature is disabled and the old pod
// does not specify any values for these fields.
podSpec.Priority = nil
podSpec.PriorityClassName = ""
}

if !utilfeature.DefaultFeatureGate.Enabled(features.Sysctls) && !sysctlsInUse(oldPodSpec) {
if podSpec.SecurityContext != nil {
podSpec.SecurityContext.Sysctls = nil
Expand Down
116 changes: 0 additions & 116 deletions pkg/api/pod/util_test.go
Expand Up @@ -712,122 +712,6 @@ func TestDropProcMount(t *testing.T) {
}
}

func TestDropPodPriority(t *testing.T) {
podPriority := int32(1000)
podWithoutPriority := func() *api.Pod {
return &api.Pod{
Spec: api.PodSpec{
Priority: nil,
PriorityClassName: "",
},
}
}
podWithPriority := func() *api.Pod {
return &api.Pod{
Spec: api.PodSpec{
Priority: &podPriority,
PriorityClassName: "",
},
}
}
podWithPriorityClassOnly := func() *api.Pod {
return &api.Pod{
Spec: api.PodSpec{
Priority: nil,
PriorityClassName: "HighPriorityClass",
},
}
}
podWithBothPriorityFields := func() *api.Pod {
return &api.Pod{
Spec: api.PodSpec{
Priority: &podPriority,
PriorityClassName: "HighPriorityClass",
},
}
}
podInfo := []struct {
description string
hasPodPriority bool
pod func() *api.Pod
}{
{
description: "pod With no PodPriority fields set",
hasPodPriority: false,
pod: podWithoutPriority,
},
{
description: "feature disabled and pod With PodPriority field set but class name not set",
hasPodPriority: true,
pod: podWithPriority,
},
{
description: "feature disabled and pod With PodPriority ClassName field set but PortPriority not set",
hasPodPriority: true,
pod: podWithPriorityClassOnly,
},
{
description: "feature disabled and pod With both PodPriority ClassName and PodPriority fields set",
hasPodPriority: true,
pod: podWithBothPriorityFields,
},
{
description: "is nil",
hasPodPriority: false,
pod: func() *api.Pod { return nil },
},
}

for _, enabled := range []bool{true, false} {
for _, oldPodInfo := range podInfo {
for _, newPodInfo := range podInfo {
oldPodHasPodPriority, oldPod := oldPodInfo.hasPodPriority, oldPodInfo.pod()
newPodHasPodPriority, newPod := newPodInfo.hasPodPriority, newPodInfo.pod()
if newPod == nil {
continue
}

t.Run(fmt.Sprintf("feature enabled=%v, old pod %v, new pod %v", enabled, oldPodInfo.description, newPodInfo.description), func(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, enabled)()

var oldPodSpec *api.PodSpec
if oldPod != nil {
oldPodSpec = &oldPod.Spec
}
dropDisabledFields(&newPod.Spec, nil, oldPodSpec, nil)

// old pod should never be changed
if !reflect.DeepEqual(oldPod, oldPodInfo.pod()) {
t.Errorf("old pod changed: %v", diff.ObjectReflectDiff(oldPod, oldPodInfo.pod()))
}

switch {
case enabled || oldPodHasPodPriority:
// new pod should not be changed if the feature is enabled, or if the old pod had PodPriority
if !reflect.DeepEqual(newPod, newPodInfo.pod()) {
t.Errorf("new pod changed: %v", diff.ObjectReflectDiff(newPod, newPodInfo.pod()))
}
case newPodHasPodPriority:
// new pod should be changed
if reflect.DeepEqual(newPod, newPodInfo.pod()) {
t.Errorf("new pod was not changed")
}
// new pod should not have PodPriority
if !reflect.DeepEqual(newPod, podWithoutPriority()) {
t.Errorf("new pod had PodPriority: %v", diff.ObjectReflectDiff(newPod, podWithoutPriority()))
}
default:
// new pod should not need to be changed
if !reflect.DeepEqual(newPod, newPodInfo.pod()) {
t.Errorf("new pod changed: %v", diff.ObjectReflectDiff(newPod, newPodInfo.pod()))
}
}
})
}
}
}
}

func TestDropEmptyDirSizeLimit(t *testing.T) {
sizeLimit := resource.MustParse("1Gi")
podWithEmptyDirSizeLimit := func() *api.Pod {
Expand Down
1 change: 0 additions & 1 deletion pkg/apis/core/validation/validation_test.go
Expand Up @@ -6332,7 +6332,6 @@ func TestValidatePodSpec(t *testing.T) {
minGroupID := int64(0)
maxGroupID := int64(2147483647)

defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.RuntimeClass, true)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()

Expand Down
2 changes: 1 addition & 1 deletion pkg/features/kube_features.go
Expand Up @@ -509,7 +509,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
Sysctls: {Default: true, PreRelease: featuregate.Beta},
DebugContainers: {Default: false, PreRelease: featuregate.Alpha},
PodShareProcessNamespace: {Default: true, PreRelease: featuregate.Beta},
PodPriority: {Default: true, PreRelease: featuregate.GA},
PodPriority: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.18
TaintNodesByCondition: {Default: true, PreRelease: featuregate.Beta},
QOSReserved: {Default: false, PreRelease: featuregate.Alpha},
ExpandPersistentVolumes: {Default: true, PreRelease: featuregate.Beta},
Expand Down
5 changes: 1 addition & 4 deletions pkg/kubeapiserver/options/plugins.go
Expand Up @@ -138,12 +138,9 @@ func DefaultOffAdmissionPlugins() sets.String {
validatingwebhook.PluginName, //ValidatingAdmissionWebhook
resourcequota.PluginName, //ResourceQuota
storageobjectinuseprotection.PluginName, //StorageObjectInUseProtection
podpriority.PluginName, //PodPriority
)

if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
defaultOnPlugins.Insert(podpriority.PluginName) //PodPriority
}

if utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition) {
defaultOnPlugins.Insert(nodetaint.PluginName) //TaintNodesByCondition
}
Expand Down
7 changes: 0 additions & 7 deletions pkg/kubelet/eviction/eviction_manager_test.go
Expand Up @@ -182,7 +182,6 @@ type podToMake struct {

// TestMemoryPressure
func TestMemoryPressure(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
podMaker := makePodWithMemoryStats
summaryStatsMaker := makeMemoryStats
podsToMake := []podToMake{
Expand Down Expand Up @@ -401,7 +400,6 @@ func parseQuantity(value string) resource.Quantity {

func TestDiskPressureNodeFs(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()

podMaker := makePodWithDiskStats
summaryStatsMaker := makeDiskStats
Expand Down Expand Up @@ -600,7 +598,6 @@ func TestDiskPressureNodeFs(t *testing.T) {

// TestMinReclaim verifies that min-reclaim works as desired.
func TestMinReclaim(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
podMaker := makePodWithMemoryStats
summaryStatsMaker := makeMemoryStats
podsToMake := []podToMake{
Expand Down Expand Up @@ -740,7 +737,6 @@ func TestMinReclaim(t *testing.T) {

func TestNodeReclaimFuncs(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()

podMaker := makePodWithDiskStats
summaryStatsMaker := makeDiskStats
Expand Down Expand Up @@ -917,7 +913,6 @@ func TestNodeReclaimFuncs(t *testing.T) {
}

func TestInodePressureNodeFsInodes(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
podMaker := func(name string, priority int32, requests v1.ResourceList, limits v1.ResourceList, rootInodes, logInodes, volumeInodes string) (*v1.Pod, statsapi.PodStats) {
pod := newPod(name, priority, []v1.Container{
newContainer(name, requests, limits),
Expand Down Expand Up @@ -1139,7 +1134,6 @@ func TestInodePressureNodeFsInodes(t *testing.T) {

// TestCriticalPodsAreNotEvicted
func TestCriticalPodsAreNotEvicted(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
podMaker := makePodWithMemoryStats
summaryStatsMaker := makeMemoryStats
podsToMake := []podToMake{
Expand Down Expand Up @@ -1280,7 +1274,6 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) {

// TestAllocatableMemoryPressure
func TestAllocatableMemoryPressure(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
podMaker := makePodWithMemoryStats
summaryStatsMaker := makeMemoryStats
podsToMake := []podToMake{
Expand Down
4 changes: 0 additions & 4 deletions pkg/kubelet/eviction/helpers.go
Expand Up @@ -513,10 +513,6 @@ func (ms *multiSorter) Less(i, j int) bool {

// priority compares pods by Priority, if priority is enabled.
func priority(p1, p2 *v1.Pod) int {
if !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
// If priority is not enabled, all pods are equal.
return 0
}
priority1 := schedulerutils.GetPodPriority(p1)
priority2 := schedulerutils.GetPodPriority(p2)
if priority1 == priority2 {
Expand Down
32 changes: 0 additions & 32 deletions pkg/kubelet/eviction/helpers_test.go
Expand Up @@ -483,8 +483,6 @@ func thresholdEqual(a evictionapi.Threshold, b evictionapi.Threshold) bool {
}

func TestOrderedByExceedsRequestMemory(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()

below := newPod("below-requests", -1, []v1.Container{
newContainer("below-requests", newResourceList("", "200Mi", ""), newResourceList("", "", "")),
}, nil)
Expand All @@ -511,7 +509,6 @@ func TestOrderedByExceedsRequestMemory(t *testing.T) {
}

func TestOrderedByExceedsRequestDisk(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
below := newPod("below-requests", -1, []v1.Container{
newContainer("below-requests", v1.ResourceList{v1.ResourceEphemeralStorage: resource.MustParse("200Mi")}, newResourceList("", "", "")),
Expand Down Expand Up @@ -539,7 +536,6 @@ func TestOrderedByExceedsRequestDisk(t *testing.T) {
}

func TestOrderedByPriority(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
low := newPod("low-priority", -134, []v1.Container{
newContainer("low-priority", newResourceList("", "", ""), newResourceList("", "", "")),
}, nil)
Expand All @@ -561,30 +557,6 @@ func TestOrderedByPriority(t *testing.T) {
}
}

func TestOrderedByPriorityDisabled(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, false)()
low := newPod("low-priority", lowPriority, []v1.Container{
newContainer("low-priority", newResourceList("", "", ""), newResourceList("", "", "")),
}, nil)
medium := newPod("medium-priority", defaultPriority, []v1.Container{
newContainer("medium-priority", newResourceList("100m", "100Mi", ""), newResourceList("200m", "200Mi", "")),
}, nil)
high := newPod("high-priority", highPriority, []v1.Container{
newContainer("high-priority", newResourceList("200m", "200Mi", ""), newResourceList("200m", "200Mi", "")),
}, nil)

pods := []*v1.Pod{high, medium, low}
orderedBy(priority).Sort(pods)

// orderedBy(priority) should not change the input ordering, since we did not enable the PodPriority feature gate
expected := []*v1.Pod{high, medium, low}
for i := range expected {
if pods[i] != expected[i] {
t.Errorf("Expected pod: %s, but got: %s", expected[i].Name, pods[i].Name)
}
}
}

func TestOrderedbyDisk(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
pod1 := newPod("best-effort-high", defaultPriority, []v1.Container{
Expand Down Expand Up @@ -719,7 +691,6 @@ func TestOrderedbyDiskDisableLocalStorage(t *testing.T) {
}

func TestOrderedbyInodes(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
low := newPod("low", defaultPriority, []v1.Container{
newContainer("low", newResourceList("", "", ""), newResourceList("", "", "")),
Expand Down Expand Up @@ -763,7 +734,6 @@ func TestOrderedbyInodes(t *testing.T) {

// TestOrderedByPriorityDisk ensures we order pods by priority and then greediest resource consumer
func TestOrderedByPriorityDisk(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolation, true)()
pod1 := newPod("above-requests-low-priority-high-usage", lowPriority, []v1.Container{
newContainer("above-requests-low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")),
Expand Down Expand Up @@ -848,7 +818,6 @@ func TestOrderedByPriorityDisk(t *testing.T) {

// TestOrderedByPriorityInodes ensures we order pods by priority and then greediest resource consumer
func TestOrderedByPriorityInodes(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
pod1 := newPod("low-priority-high-usage", lowPriority, []v1.Container{
newContainer("low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")),
}, []v1.Volume{
Expand Down Expand Up @@ -941,7 +910,6 @@ func TestOrderedByMemory(t *testing.T) {

// TestOrderedByPriorityMemory ensures we order by priority and then memory consumption relative to request.
func TestOrderedByPriorityMemory(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
pod1 := newPod("above-requests-low-priority-high-usage", lowPriority, []v1.Container{
newContainer("above-requests-low-priority-high-usage", newResourceList("", "", ""), newResourceList("", "", "")),
}, nil)
Expand Down
14 changes: 5 additions & 9 deletions pkg/kubelet/types/pod_update.go
Expand Up @@ -146,10 +146,8 @@ func (sp SyncPodType) String() string {
// or equal to SystemCriticalPriority. Both the default scheduler and the kubelet use this function
// to make admission and scheduling decisions.
func IsCriticalPod(pod *v1.Pod) bool {
if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
if pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(*pod.Spec.Priority) {
return true
}
if pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(*pod.Spec.Priority) {
return true
}
if utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) {
if IsCritical(pod.Namespace, pod.Annotations) {
Expand All @@ -165,11 +163,9 @@ func Preemptable(preemptor, preemptee *v1.Pod) bool {
if IsCriticalPod(preemptor) && !IsCriticalPod(preemptee) {
return true
}
if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
if (preemptor != nil && preemptor.Spec.Priority != nil) &&
(preemptee != nil && preemptee.Spec.Priority != nil) {
return *(preemptor.Spec.Priority) > *(preemptee.Spec.Priority)
}
if (preemptor != nil && preemptor.Spec.Priority != nil) &&
(preemptee != nil && preemptee.Spec.Priority != nil) {
return *(preemptor.Spec.Priority) > *(preemptee.Spec.Priority)
}

return false
Expand Down
1 change: 0 additions & 1 deletion pkg/scheduler/BUILD
Expand Up @@ -21,7 +21,6 @@ go_library(
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
Expand Down
3 changes: 1 addition & 2 deletions pkg/scheduler/scheduler.go
Expand Up @@ -42,7 +42,6 @@ import (
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
"k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/kubernetes/pkg/scheduler/util"
)

const (
Expand Down Expand Up @@ -465,7 +464,7 @@ func (sched *Scheduler) scheduleOne() {
// will fit due to the preemption. It is also possible that a different pod will schedule
// into the resources that were preempted, but this is harmless.
if fitError, ok := err.(*core.FitError); ok {
if !util.PodPriorityEnabled() || sched.config.DisablePreemption {
if sched.config.DisablePreemption {
klog.V(3).Infof("Pod priority feature is not enabled or preemption is disabled by scheduler configuration." +
" No preemption is performed.")
} else {
Expand Down
2 changes: 0 additions & 2 deletions pkg/scheduler/util/BUILD
Expand Up @@ -30,12 +30,10 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/scheduler/util",
deps = [
"//pkg/apis/scheduling:go_default_library",
"//pkg/features:go_default_library",
"//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/metrics:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
Expand Down

0 comments on commit ca6003b

Please sign in to comment.