Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Prevent scheduler crashing in default preemption plugin #101560

Merged
merged 1 commit into from
May 5, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ func dryRunPreemption(ctx context.Context, fh framework.Handle,
nodeInfoCopy := potentialNodes[(int(offset)+i)%len(potentialNodes)].Clone()
stateCopy := state.Clone()
pods, numPDBViolations, status := selectVictimsOnNode(ctx, fh, stateCopy, pod, nodeInfoCopy, pdbs)
if status.IsSuccess() {
if status.IsSuccess() && len(pods) != 0 {
victims := extenderv1.Victims{
Pods: pods,
NumPDBViolations: int64(numPDBViolations),
Expand All @@ -352,11 +352,14 @@ func dryRunPreemption(ctx context.Context, fh framework.Handle,
if nvcSize > 0 && nvcSize+vcSize >= numCandidates {
cancel()
}
} else {
statusesLock.Lock()
nodeStatuses[nodeInfoCopy.Node().Name] = status
statusesLock.Unlock()
return
}
if status.IsSuccess() && len(pods) == 0 {
status = framework.AsStatus(fmt.Errorf("expected at least one victim pod on node %q", nodeInfoCopy.Node().Name))
}
statusesLock.Lock()
nodeStatuses[nodeInfoCopy.Node().Name] = status
statusesLock.Unlock()
}
fh.Parallelizer().Until(parallelCtx, len(potentialNodes), checkNode)
return append(nonViolatingCandidates.get(), violatingCandidates.get()...), nodeStatuses
Expand Down Expand Up @@ -391,6 +394,18 @@ func CallExtenders(extenders []framework.Extender, pod *v1.Pod, nodeLister frame
}
return nil, framework.AsStatus(err)
}
// Check if the returned victims are valid.
for nodeName, victims := range nodeNameToVictims {
if victims == nil || len(victims.Pods) == 0 {
if extender.IsIgnorable() {
delete(nodeNameToVictims, nodeName)
klog.InfoS("Ignoring node without victims", "node", nodeName)
continue
}
return nil, framework.AsStatus(fmt.Errorf("expected at least one victim pod on node %q", nodeName))
}
}

// Replace victimsMap with new result after preemption. So the
// rest of extenders can continue use it as parameter.
victimsMap = nodeNameToVictims
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -357,13 +357,9 @@ func TestDryRunPreemption(t *testing.T) {
st.MakePod().Name("p1").UID("p1").Node("node1").Priority(midPriority).Obj(),
st.MakePod().Name("p2").UID("p2").Node("node2").Priority(midPriority).Obj(),
},
expected: [][]Candidate{
{
&candidate{victims: &extenderv1.Victims{}, name: "node1"},
&candidate{victims: &extenderv1.Victims{}, name: "node2"},
},
},
expectedNumFilterCalled: []int32{4},
expected: [][]Candidate{{}},
fakeFilterRC: framework.Unschedulable,
expectedNumFilterCalled: []int32{2},
},
{
name: "a pod that fits on one node with no preemption",
Expand All @@ -379,12 +375,9 @@ func TestDryRunPreemption(t *testing.T) {
st.MakePod().Name("p1").UID("p1").Node("node1").Priority(midPriority).Obj(),
st.MakePod().Name("p2").UID("p2").Node("node2").Priority(midPriority).Obj(),
},
expected: [][]Candidate{
{
&candidate{victims: &extenderv1.Victims{}, name: "node1"},
},
},
expectedNumFilterCalled: []int32{3},
expected: [][]Candidate{{}},
fakeFilterRC: framework.Unschedulable,
expectedNumFilterCalled: []int32{2},
},
{
name: "a pod that fits on both nodes when lower priority pods are preempted",
Expand Down Expand Up @@ -1072,16 +1065,6 @@ func TestSelectBestCandidate(t *testing.T) {
pods []*v1.Pod
expected []string // any of the items is valid
}{
{
name: "No node needs preemption",
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"),
nodeNames: []string{"node1"},
pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(largeRes).Obj(),
pods: []*v1.Pod{
st.MakePod().Name("p1").UID("p1").Node("node1").Priority(midPriority).Req(smallRes).StartTime(epochTime).Obj(),
},
expected: []string{"node1"},
},
{
name: "a pod that fits on both nodes when lower priority pods are preempted",
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"),
Expand Down Expand Up @@ -1263,6 +1246,9 @@ func TestSelectBestCandidate(t *testing.T) {
offset, numCandidates := pl.getOffsetAndNumCandidates(int32(len(nodeInfos)))
candidates, _ := dryRunPreemption(context.Background(), fwk, state, tt.pod, nodeInfos, nil, offset, numCandidates)
s := SelectCandidate(candidates)
if s == nil || len(s.Name()) == 0 {
return
}
Comment on lines +1249 to +1251
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can remove the first sub-test "No node needs preemption".

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bump.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

deleted the test case.

found := false
for _, nodeName := range tt.expected {
if nodeName == s.Name() {
Expand Down