Skip to content

Commit

Permalink
Split pod evictor and evictor filter
Browse files Browse the repository at this point in the history
  • Loading branch information
ingvagabund committed Jun 13, 2022
1 parent 84c8d1c commit edbf8b7
Show file tree
Hide file tree
Showing 28 changed files with 266 additions and 252 deletions.
22 changes: 15 additions & 7 deletions pkg/descheduler/descheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ import (
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
"sigs.k8s.io/descheduler/pkg/utils"
)

func Run(ctx context.Context, rs *options.DeschedulerServer) error {
Expand Down Expand Up @@ -87,7 +88,7 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
return runFn()
}

type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc)
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc)

func cachedClient(
realClient clientset.Interface,
Expand Down Expand Up @@ -283,18 +284,25 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
nodes,
getPodsAssignedToNode,
evictLocalStoragePods,
evictSystemCriticalPods,
ignorePvcPods,
evictBarePods,
!rs.DisableMetrics,
)

for name, strategy := range deschedulerPolicy.Strategies {
if f, ok := strategyFuncs[name]; ok {
if strategy.Enabled {
f(ctx, rs.Client, strategy, nodes, podEvictor, getPodsAssignedToNode)
nodeFit := false
if name != "PodLifeTime" {
if strategy.Params != nil {
nodeFit = strategy.Params.NodeFit
}
}
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, rs.Client, strategy.Params)
if err != nil {
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
continue
}
evictorFilter := evictions.NewEvictorFilter(nodes, getPodsAssignedToNode, evictLocalStoragePods, evictSystemCriticalPods, ignorePvcPods, evictBarePods, evictions.WithNodeFit(nodeFit), evictions.WithPriorityThreshold(thresholdPriority))
f(ctx, rs.Client, strategy, nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
}
} else {
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
Expand Down
46 changes: 18 additions & 28 deletions pkg/descheduler/evictions/evictions.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,17 +51,12 @@ type namespacePodEvictCount map[string]uint
type PodEvictor struct {
client clientset.Interface
nodes []*v1.Node
nodeIndexer podutil.GetPodsAssignedToNodeFunc
policyGroupVersion string
dryRun bool
maxPodsToEvictPerNode *uint
maxPodsToEvictPerNamespace *uint
nodepodCount nodePodEvictedCount
namespacePodCount namespacePodEvictCount
evictFailedBarePods bool
evictLocalStoragePods bool
evictSystemCriticalPods bool
ignorePvcPods bool
metricsEnabled bool
}

Expand All @@ -72,11 +67,6 @@ func NewPodEvictor(
maxPodsToEvictPerNode *uint,
maxPodsToEvictPerNamespace *uint,
nodes []*v1.Node,
nodeIndexer podutil.GetPodsAssignedToNodeFunc,
evictLocalStoragePods bool,
evictSystemCriticalPods bool,
ignorePvcPods bool,
evictFailedBarePods bool,
metricsEnabled bool,
) *PodEvictor {
var nodePodCount = make(nodePodEvictedCount)
Expand All @@ -89,17 +79,12 @@ func NewPodEvictor(
return &PodEvictor{
client: client,
nodes: nodes,
nodeIndexer: nodeIndexer,
policyGroupVersion: policyGroupVersion,
dryRun: dryRun,
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
nodepodCount: nodePodCount,
namespacePodCount: namespacePodCount,
evictLocalStoragePods: evictLocalStoragePods,
evictSystemCriticalPods: evictSystemCriticalPods,
evictFailedBarePods: evictFailedBarePods,
ignorePvcPods: ignorePvcPods,
metricsEnabled: metricsEnabled,
}
}
Expand Down Expand Up @@ -230,21 +215,26 @@ func WithLabelSelector(labelSelector labels.Selector) func(opts *Options) {

type constraint func(pod *v1.Pod) error

type evictable struct {
type EvictorFilter struct {
constraints []constraint
}

// Evictable provides an implementation of IsEvictable(IsEvictable(pod *v1.Pod) bool).
// The method accepts a list of options which allow to extend constraints
// which decides when a pod is considered evictable.
func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
func NewEvictorFilter(
nodes []*v1.Node,
nodeIndexer podutil.GetPodsAssignedToNodeFunc,
evictLocalStoragePods bool,
evictSystemCriticalPods bool,
ignorePvcPods bool,
evictFailedBarePods bool,
opts ...func(opts *Options),
) *EvictorFilter {
options := &Options{}
for _, opt := range opts {
opt(options)
}

ev := &evictable{}
if pe.evictFailedBarePods {
ev := &EvictorFilter{}
if evictFailedBarePods {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
ownerRefList := podutil.OwnerRef(pod)
// Enable evictFailedBarePods to evict bare pods in failed phase
Expand All @@ -263,7 +253,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
return nil
})
}
if !pe.evictSystemCriticalPods {
if !evictSystemCriticalPods {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
// Moved from IsEvictable function to allow for disabling
if utils.IsCriticalPriorityPod(pod) {
Expand All @@ -281,15 +271,15 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
})
}
}
if !pe.evictLocalStoragePods {
if !evictLocalStoragePods {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
if utils.IsPodWithLocalStorage(pod) {
return fmt.Errorf("pod has local storage and descheduler is not configured with evictLocalStoragePods")
}
return nil
})
}
if pe.ignorePvcPods {
if ignorePvcPods {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
if utils.IsPodWithPVC(pod) {
return fmt.Errorf("pod has a PVC and descheduler is configured to ignore PVC pods")
Expand All @@ -299,7 +289,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
}
if options.nodeFit {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
if !nodeutil.PodFitsAnyOtherNode(pe.nodeIndexer, pod, pe.nodes) {
if !nodeutil.PodFitsAnyOtherNode(nodeIndexer, pod, nodes) {
return fmt.Errorf("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable")
}
return nil
Expand All @@ -318,7 +308,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
}

// IsEvictable decides when a pod is evictable
func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
func (ef *EvictorFilter) Filter(pod *v1.Pod) bool {
checkErrs := []error{}

ownerRefList := podutil.OwnerRef(pod)
Expand All @@ -338,7 +328,7 @@ func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
checkErrs = append(checkErrs, fmt.Errorf("pod is terminating"))
}

for _, c := range ev.constraints {
for _, c := range ef.constraints {
if err := c(pod); err != nil {
checkErrs = append(checkErrs, err)
}
Expand Down
27 changes: 11 additions & 16 deletions pkg/descheduler/evictions/evictions_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import (
"testing"

v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
Expand Down Expand Up @@ -639,29 +638,25 @@ func TestIsEvictable(t *testing.T) {
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())

podEvictor := &PodEvictor{
client: fakeClient,
nodes: nodes,
nodeIndexer: getPodsAssignedToNode,
policyGroupVersion: policyv1.SchemeGroupVersion.String(),
dryRun: false,
maxPodsToEvictPerNode: nil,
maxPodsToEvictPerNamespace: nil,
evictLocalStoragePods: test.evictLocalStoragePods,
evictSystemCriticalPods: test.evictSystemCriticalPods,
evictFailedBarePods: test.evictFailedBarePods,
}

var opts []func(opts *Options)
if test.priorityThreshold != nil {
opts = append(opts, WithPriorityThreshold(*test.priorityThreshold))
}
if test.nodeFit {
opts = append(opts, WithNodeFit(true))
}
evictable := podEvictor.Evictable(opts...)

result := evictable.IsEvictable(test.pods[0])
evictorFilter := NewEvictorFilter(
nodes,
getPodsAssignedToNode,
test.evictLocalStoragePods,
test.evictSystemCriticalPods,
false,
test.evictFailedBarePods,
opts...,
)

result := evictorFilter.Filter(test.pods[0])
if result != test.result {
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
}
Expand Down
15 changes: 2 additions & 13 deletions pkg/descheduler/strategies/duplicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,38 +66,27 @@ func RemoveDuplicatePods(
strategy api.DeschedulerStrategy,
nodes []*v1.Node,
podEvictor *evictions.PodEvictor,
evictorFilter *evictions.EvictorFilter,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) {
if err := validateRemoveDuplicatePodsParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemoveDuplicatePods parameters")
return
}
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
if err != nil {
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
return
}

var includedNamespaces, excludedNamespaces sets.String
if strategy.Params != nil && strategy.Params.Namespaces != nil {
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
}

nodeFit := false
if strategy.Params != nil {
nodeFit = strategy.Params.NodeFit
}

evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))

duplicatePods := make(map[podOwner]map[string][]*v1.Pod)
ownerKeyOccurence := make(map[podOwner]int32)
nodeCount := 0
nodeMap := make(map[string]*v1.Node)

podFilter, err := podutil.NewOptions().
WithFilter(evictable.IsEvictable).
WithFilter(evictorFilter.Filter).
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
BuildFilterFunc()
Expand Down
22 changes: 18 additions & 4 deletions pkg/descheduler/strategies/duplicates_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -320,15 +320,25 @@ func TestFindDuplicatePods(t *testing.T) {
nil,
nil,
testCase.nodes,
getPodsAssignedToNode,
false,
)

nodeFit := false
if testCase.strategy.Params != nil {
nodeFit = testCase.strategy.Params.NodeFit
}

evictorFilter := evictions.NewEvictorFilter(
testCase.nodes,
getPodsAssignedToNode,
false,
false,
false,
false,
evictions.WithNodeFit(nodeFit),
)

RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != testCase.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
Expand Down Expand Up @@ -748,15 +758,19 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
nil,
nil,
testCase.nodes,
getPodsAssignedToNode,
false,
)

evictorFilter := evictions.NewEvictorFilter(
testCase.nodes,
getPodsAssignedToNode,
false,
false,
false,
false,
)

RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != testCase.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
Expand Down
9 changes: 2 additions & 7 deletions pkg/descheduler/strategies/failedpods.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ func RemoveFailedPods(
strategy api.DeschedulerStrategy,
nodes []*v1.Node,
podEvictor *evictions.PodEvictor,
evictorFilter *evictions.EvictorFilter,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) {
strategyParams, err := validateAndParseRemoveFailedPodsParams(ctx, client, strategy.Params)
Expand All @@ -41,19 +42,13 @@ func RemoveFailedPods(
return
}

evictable := podEvictor.Evictable(
evictions.WithPriorityThreshold(strategyParams.ThresholdPriority),
evictions.WithNodeFit(strategyParams.NodeFit),
evictions.WithLabelSelector(strategyParams.LabelSelector),
)

var labelSelector *metav1.LabelSelector
if strategy.Params != nil {
labelSelector = strategy.Params.LabelSelector
}

podFilter, err := podutil.NewOptions().
WithFilter(evictable.IsEvictable).
WithFilter(evictorFilter.Filter).
WithNamespaces(strategyParams.IncludedNamespaces).
WithoutNamespaces(strategyParams.ExcludedNamespaces).
WithLabelSelector(labelSelector).
Expand Down
11 changes: 8 additions & 3 deletions pkg/descheduler/strategies/failedpods_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ func TestRemoveFailedPods(t *testing.T) {
}{
{
description: "default empty strategy, 0 failures, 0 evictions",
strategy: api.DeschedulerStrategy{},
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: false}},
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{}, // no pods come back with field selector phase=Failed
Expand Down Expand Up @@ -275,15 +275,20 @@ func TestRemoveFailedPods(t *testing.T) {
nil,
nil,
tc.nodes,
getPodsAssignedToNode,
false,
)

evictorFilter := evictions.NewEvictorFilter(
tc.nodes,
getPodsAssignedToNode,
false,
false,
false,
false,
evictions.WithNodeFit(tc.strategy.Params.NodeFit),
)

RemoveFailedPods(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
RemoveFailedPods(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
Expand Down
Loading

0 comments on commit edbf8b7

Please sign in to comment.