Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cleanup unnecessary func parameters in genericScheduler methods #84015

Merged
merged 1 commit into from
Oct 18, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
44 changes: 16 additions & 28 deletions pkg/scheduler/core/generic_scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -334,8 +334,7 @@ func (g *genericScheduler) Preempt(ctx context.Context, state *framework.CycleSt
if err != nil {
return nil, nil, nil, err
}
nodeToVictims, err := g.selectNodesForPreemption(ctx, state, pod, g.nodeInfoSnapshot.NodeInfoMap, potentialNodes, g.predicates,
g.predicateMetaProducer, g.schedulingQueue, pdbs)
nodeToVictims, err := g.selectNodesForPreemption(ctx, state, pod, potentialNodes, pdbs)
if err != nil {
return nil, nil, nil, err
}
Expand Down Expand Up @@ -488,8 +487,6 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
pod,
meta,
g.nodeInfoSnapshot.NodeInfoMap[nodeName],
g.predicates,
g.schedulingQueue,
g.alwaysCheckAllPredicates,
)
if err != nil {
Expand Down Expand Up @@ -563,13 +560,13 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
// to run on the node given in nodeInfo to meta and nodeInfo. It returns 1) whether
// any pod was added, 2) augmented metadata, 3) augmented CycleState 4) augmented nodeInfo.
func (g *genericScheduler) addNominatedPods(ctx context.Context, pod *v1.Pod, meta predicates.PredicateMetadata, state *framework.CycleState,
nodeInfo *schedulernodeinfo.NodeInfo, queue internalqueue.SchedulingQueue) (bool, predicates.PredicateMetadata,
nodeInfo *schedulernodeinfo.NodeInfo) (bool, predicates.PredicateMetadata,
*framework.CycleState, *schedulernodeinfo.NodeInfo, error) {
if queue == nil || nodeInfo == nil || nodeInfo.Node() == nil {
if g.schedulingQueue == nil || nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen only in tests.
return false, meta, state, nodeInfo, nil
}
nominatedPods := queue.NominatedPodsForNode(nodeInfo.Node().Name)
nominatedPods := g.schedulingQueue.NominatedPodsForNode(nodeInfo.Node().Name)
if len(nominatedPods) == 0 {
return false, meta, state, nodeInfo, nil
}
Expand Down Expand Up @@ -615,8 +612,6 @@ func (g *genericScheduler) podFitsOnNode(
pod *v1.Pod,
meta predicates.PredicateMetadata,
info *schedulernodeinfo.NodeInfo,
predicateFuncs map[string]predicates.FitPredicate,
queue internalqueue.SchedulingQueue,
alwaysCheckAllPredicates bool,
) (bool, []predicates.PredicateFailureReason, *framework.Status, error) {
var failedPredicates []predicates.PredicateFailureReason
Expand Down Expand Up @@ -647,7 +642,7 @@ func (g *genericScheduler) podFitsOnNode(
nodeInfoToUse := info
if i == 0 {
var err error
podsAdded, metaToUse, stateToUse, nodeInfoToUse, err = g.addNominatedPods(ctx, pod, meta, state, info, queue)
podsAdded, metaToUse, stateToUse, nodeInfoToUse, err = g.addNominatedPods(ctx, pod, meta, state, info)
if err != nil {
return false, []predicates.PredicateFailureReason{}, nil, err
}
Expand All @@ -662,7 +657,7 @@ func (g *genericScheduler) podFitsOnNode(
err error
)

if predicate, exist := predicateFuncs[predicateKey]; exist {
if predicate, exist := g.predicates[predicateKey]; exist {
fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse)
if err != nil {
return false, []predicates.PredicateFailureReason{}, nil, err
Expand Down Expand Up @@ -732,7 +727,7 @@ func PrioritizeNodes(
errs = append(errs, err)
}

results := make([]framework.NodeScoreList, len(priorityConfigs), len(priorityConfigs))
results := make([]framework.NodeScoreList, len(priorityConfigs))

// DEPRECATED: we can remove this when all priorityConfigs implement the
// Map-Reduce pattern.
Expand Down Expand Up @@ -1010,32 +1005,27 @@ func (g *genericScheduler) selectNodesForPreemption(
ctx context.Context,
state *framework.CycleState,
pod *v1.Pod,
nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
potentialNodes []*v1.Node,
fitPredicates map[string]predicates.FitPredicate,
metadataProducer predicates.PredicateMetadataProducer,
queue internalqueue.SchedulingQueue,
pdbs []*policy.PodDisruptionBudget,
) (map[*v1.Node]*extenderv1.Victims, error) {
nodeToVictims := map[*v1.Node]*extenderv1.Victims{}
var resultLock sync.Mutex

// We can use the same metadata producer for all nodes.
meta := metadataProducer(pod, nodeNameToInfo)
meta := g.predicateMetaProducer(pod, g.nodeInfoSnapshot.NodeInfoMap)
checkNode := func(i int) {
nodeName := potentialNodes[i].Name
if nodeNameToInfo[nodeName] == nil {
if g.nodeInfoSnapshot.NodeInfoMap[nodeName] == nil {
return
}
nodeInfoCopy := nodeNameToInfo[nodeName].Clone()
nodeInfoCopy := g.nodeInfoSnapshot.NodeInfoMap[nodeName].Clone()
var metaCopy predicates.PredicateMetadata
if meta != nil {
metaCopy = meta.ShallowCopy()
}
stateCopy := state.Clone()
stateCopy.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: metaCopy})
pods, numPDBViolations, fits := g.selectVictimsOnNode(
ctx, stateCopy, pod, metaCopy, nodeInfoCopy, fitPredicates, queue, pdbs)
pods, numPDBViolations, fits := g.selectVictimsOnNode(ctx, stateCopy, pod, metaCopy, nodeInfoCopy, pdbs)
if fits {
resultLock.Lock()
victims := extenderv1.Victims{
Expand Down Expand Up @@ -1110,8 +1100,6 @@ func (g *genericScheduler) selectVictimsOnNode(
pod *v1.Pod,
meta predicates.PredicateMetadata,
nodeInfo *schedulernodeinfo.NodeInfo,
fitPredicates map[string]predicates.FitPredicate,
queue internalqueue.SchedulingQueue,
pdbs []*policy.PodDisruptionBudget,
) ([]*v1.Pod, int, bool) {
var potentialVictims []*v1.Pod
Expand Down Expand Up @@ -1161,7 +1149,7 @@ func (g *genericScheduler) selectVictimsOnNode(
// inter-pod affinity to one or more victims, but we have decided not to
// support this case for performance reasons. Having affinity to lower
// priority pods is not a recommended configuration anyway.
if fits, _, _, err := g.podFitsOnNode(ctx, state, pod, meta, nodeInfo, fitPredicates, queue, false); !fits {
if fits, _, _, err := g.podFitsOnNode(ctx, state, pod, meta, nodeInfo, false); !fits {
if err != nil {
klog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err)
}
Expand All @@ -1179,7 +1167,7 @@ func (g *genericScheduler) selectVictimsOnNode(
if err := addPod(p); err != nil {
return false, err
}
fits, _, _, _ := g.podFitsOnNode(ctx, state, pod, meta, nodeInfo, fitPredicates, queue, false)
fits, _, _, _ := g.podFitsOnNode(ctx, state, pod, meta, nodeInfo, false)
if !fits {
if err := removePod(p); err != nil {
return false, err
Expand Down Expand Up @@ -1215,7 +1203,8 @@ func nodesWherePreemptionMightHelp(nodeNameToInfo map[string]*schedulernodeinfo.
if fitErr.FilteredNodesStatuses[name].Code() == framework.UnschedulableAndUnresolvable {
continue
}
failedPredicates, _ := fitErr.FailedPredicates[name]
failedPredicates := fitErr.FailedPredicates[name]

// If we assume that scheduler looks at all nodes and populates the failedPredicateMap
// (which is the case today), the !found case should never happen, but we'd prefer
// to rely less on such assumptions in the code when checking does not impose
Expand Down Expand Up @@ -1297,8 +1286,7 @@ func NewGenericScheduler(
alwaysCheckAllPredicates bool,
disablePreemption bool,
percentageOfNodesToScore int32,
enableNonPreempting bool,
) ScheduleAlgorithm {
enableNonPreempting bool) ScheduleAlgorithm {
return &genericScheduler{
cache: cache,
schedulingQueue: podQueue,
Expand Down
19 changes: 9 additions & 10 deletions pkg/scheduler/core/generic_scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1061,10 +1061,6 @@ func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
return &node, nil
}

func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) algorithmpredicates.PredicateMetadata {
return algorithmpredicates.GetPredicateMetadata(p, nodeInfo)
}

var smallContainers = []v1.Container{
{
Resources: v1.ResourceRequirements{
Expand Down Expand Up @@ -1384,14 +1380,12 @@ func TestSelectNodesForPreemption(t *testing.T) {
cache.AddNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{"hostname": name}}})
}

predMetaProducer := algorithmpredicates.EmptyPredicateMetadataProducer

filterPlugin.failedNodeReturnCodeMap = filterFailedNodeReturnCodeMap
scheduler := NewGenericScheduler(
nil,
internalqueue.NewSchedulingQueue(nil, nil),
test.predicates,
predMetaProducer,
algorithmpredicates.GetPredicateMetadata,
nil,
priorities.EmptyPriorityMetadataProducer,
filterFramework,
Expand Down Expand Up @@ -1429,7 +1423,8 @@ func TestSelectNodesForPreemption(t *testing.T) {
newnode.ObjectMeta.Labels = map[string]string{"hostname": "newnode"}
nodes = append(nodes, newnode)
state := framework.NewCycleState()
nodeToPods, err := g.selectNodesForPreemption(context.Background(), state, test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
g.nodeInfoSnapshot.NodeInfoMap = nodeNameToInfo
nodeToPods, err := g.selectNodesForPreemption(context.Background(), state, test.pod, nodes, nil)
if err != nil {
t.Error(err)
}
Expand Down Expand Up @@ -1643,17 +1638,21 @@ func TestPickOneNodeForPreemption(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
g := &genericScheduler{
framework: emptyFramework,
framework: emptyFramework,
predicates: test.predicates,
predicateMetaProducer: algorithmpredicates.GetPredicateMetadata,
}
assignDefaultStartTime(test.pods)
g.nodeInfoSnapshot = g.framework.NodeInfoSnapshot()

nodes := []*v1.Node{}
for _, n := range test.nodes {
nodes = append(nodes, makeNode(n, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5))
}
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, nodes)
state := framework.NewCycleState()
candidateNodes, _ := g.selectNodesForPreemption(context.Background(), state, test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
g.nodeInfoSnapshot.NodeInfoMap = nodeNameToInfo
candidateNodes, _ := g.selectNodesForPreemption(context.Background(), state, test.pod, nodes, nil)
node := pickOneNodeForPreemption(candidateNodes)
found := false
for _, nodeName := range test.expected {
Expand Down