Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Disable matching on few selectors. Remove duplicates. #72801

Merged
merged 1 commit into from Jan 15, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
56 changes: 26 additions & 30 deletions pkg/scheduler/algorithm/priorities/selector_spreading.go
Expand Up @@ -84,29 +84,11 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{
}, nil
}

count := int(0)
for _, nodePod := range nodeInfo.Pods() {
if pod.Namespace != nodePod.Namespace {
continue
}
// When we are replacing a failed pod, we often see the previous
// deleted version while scheduling the replacement.
// Ignore the previous deleted version for spreading purposes
// (it can still be considered for resource restrictions etc.)
if nodePod.DeletionTimestamp != nil {
klog.V(4).Infof("skipping pending-deleted pod: %s/%s", nodePod.Namespace, nodePod.Name)
continue
}
for _, selector := range selectors {
if selector.Matches(labels.Set(nodePod.ObjectMeta.Labels)) {
count++
break
}
}
}
count := countMatchingPods(pod.Namespace, selectors, nodeInfo)

return schedulerapi.HostPriority{
Host: node.Name,
Score: int(count),
Score: count,
}, nil
}

Expand Down Expand Up @@ -201,19 +183,29 @@ func (s *ServiceAntiAffinity) getNodeClassificationByLabels(nodes []*v1.Node) (m
return labeledNodes, nonLabeledNodes
}

// filteredPod get pods based on namespace and selector
func filteredPod(namespace string, selector labels.Selector, nodeInfo *schedulernodeinfo.NodeInfo) (pods []*v1.Pod) {
if nodeInfo.Pods() == nil || len(nodeInfo.Pods()) == 0 || selector == nil {
return []*v1.Pod{}
// countMatchingPods cout pods based on namespace and matching all selectors
func countMatchingPods(namespace string, selectors []labels.Selector, nodeInfo *schedulernodeinfo.NodeInfo) int {
if nodeInfo.Pods() == nil || len(nodeInfo.Pods()) == 0 || len(selectors) == 0 {
return 0
}
count := 0
for _, pod := range nodeInfo.Pods() {
// Ignore pods being deleted for spreading purposes
// Similar to how it is done for SelectorSpreadPriority
if namespace == pod.Namespace && pod.DeletionTimestamp == nil && selector.Matches(labels.Set(pod.Labels)) {
pods = append(pods, pod)
if namespace == pod.Namespace && pod.DeletionTimestamp == nil {
matches := true
for _, selector := range selectors {
if !selector.Matches(labels.Set(pod.Labels)) {
matches = false
break
}
}
if matches {
count++
}
}
}
return
return count
}

// CalculateAntiAffinityPriorityMap spreads pods by minimizing the number of pods belonging to the same service
Expand All @@ -232,11 +224,15 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta
firstServiceSelector = getFirstServiceSelector(pod, s.serviceLister)
}
//pods matched namespace,selector on current node
matchedPodsOfNode := filteredPod(pod.Namespace, firstServiceSelector, nodeInfo)
var selectors []labels.Selector
if firstServiceSelector != nil {
selectors = append(selectors, firstServiceSelector)
}
score := countMatchingPods(pod.Namespace, selectors, nodeInfo)

return schedulerapi.HostPriority{
Host: node.Name,
Score: int(len(matchedPodsOfNode)),
Score: score,
}, nil
}

Expand Down
24 changes: 12 additions & 12 deletions pkg/scheduler/algorithm/priorities/selector_spreading_test.go
Expand Up @@ -186,8 +186,8 @@ func TestSelectorSpreadPriority(t *testing.T) {
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
// "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to
// do spreading between all pods. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
// do spreading pod2 and pod3 and not pod1.
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
name: "service with partial pod label matches with service and replication controller",
},
{
Expand All @@ -201,7 +201,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
name: "service with partial pod label matches with service and replica set",
},
{
Expand All @@ -214,8 +214,8 @@ func TestSelectorSpreadPriority(t *testing.T) {
nodes: []string{"machine1", "machine2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
name: "service with partial pod label matches with service and replica set",
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
name: "service with partial pod label matches with service and stateful set",
},
{
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
Expand All @@ -227,9 +227,9 @@ func TestSelectorSpreadPriority(t *testing.T) {
nodes: []string{"machine1", "machine2"},
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
// Taken together Service and Replication Controller should match all Pods, hence result should be equal to one above.
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
name: "disjoined service and replication controller should be treated equally",
// Taken together Service and Replication Controller should match no pods.
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
name: "disjoined service and replication controller matches no pods",
},
{
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
Expand All @@ -242,8 +242,8 @@ func TestSelectorSpreadPriority(t *testing.T) {
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
name: "disjoined service and replica set should be treated equally",
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
name: "disjoined service and replica set matches no pods",
},
{
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
Expand All @@ -255,8 +255,8 @@ func TestSelectorSpreadPriority(t *testing.T) {
nodes: []string{"machine1", "machine2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
name: "disjoined service and replica set should be treated equally",
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
name: "disjoined service and stateful set matches no pods",
},
{
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
Expand Down