Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove nodeName from predicate signature. #24968

Merged
merged 1 commit into from
May 6, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion pkg/kubelet/kubelet.go
Original file line number Diff line number Diff line change
Expand Up @@ -2339,7 +2339,7 @@ func (kl *Kubelet) canAdmitPod(pods []*api.Pod, pod *api.Pod) (bool, string, str
}
nodeInfo := schedulercache.NewNodeInfo(otherPods...)
nodeInfo.SetNode(node)
fit, err := predicates.GeneralPredicates(pod, kl.nodeName, nodeInfo)
fit, err := predicates.GeneralPredicates(pod, nodeInfo)
if !fit {
if re, ok := err.(*predicates.PredicateFailureError); ok {
reason := re.PredicateName
Expand Down
64 changes: 32 additions & 32 deletions plugin/pkg/scheduler/algorithm/predicates/predicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ func isVolumeConflict(volume api.Volume, pod *api.Pod) bool {
// - AWS EBS forbids any two pods mounting the same volume ID
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image.
// TODO: migrate this into some per-volume specific code?
func NoDiskConflict(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func NoDiskConflict(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
for _, v := range pod.Spec.Volumes {
for _, ev := range nodeInfo.Pods() {
if isVolumeConflict(v, ev) {
Expand Down Expand Up @@ -177,7 +177,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []api.Volume, namespace
return nil
}

func (c *MaxPDVolumeCountChecker) predicate(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func (c *MaxPDVolumeCountChecker) predicate(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
newVolumes := make(map[string]bool)
if err := c.filterVolumes(pod.Spec.Volumes, pod.Namespace, newVolumes); err != nil {
return false, err
Expand Down Expand Up @@ -275,10 +275,10 @@ func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolum
return c.predicate
}

func (c *VolumeZoneChecker) predicate(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func (c *VolumeZoneChecker) predicate(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
node := nodeInfo.Node()
if node == nil {
return false, fmt.Errorf("node not found: %q", nodeName)
return false, fmt.Errorf("node not found")
}

nodeConstraints := make(map[string]string)
Expand Down Expand Up @@ -335,7 +335,7 @@ func (c *VolumeZoneChecker) predicate(pod *api.Pod, nodeName string, nodeInfo *s
}
nodeV, _ := nodeConstraints[k]
if v != nodeV {
glog.V(2).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, nodeName, pvName, k)
glog.V(2).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k)
return false, ErrVolumeZoneConflict
}
}
Expand Down Expand Up @@ -391,10 +391,10 @@ func podName(pod *api.Pod) string {
return pod.Namespace + "/" + pod.Name
}

func podFitsResourcesInternal(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func PodFitsResources(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
node := nodeInfo.Node()
if node == nil {
return false, fmt.Errorf("node not found: %q", nodeName)
return false, fmt.Errorf("node not found")
}
allocatable := node.Status.Allocatable
allowedPodNumber := allocatable.Pods().Value()
Expand All @@ -419,14 +419,10 @@ func podFitsResourcesInternal(pod *api.Pod, nodeName string, nodeInfo *scheduler
newInsufficientResourceError(memoryResoureceName, podRequest.memory, nodeInfo.RequestedResource().Memory, totalMemory)
}
glog.V(10).Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.",
podName(pod), nodeName, len(nodeInfo.Pods()), allowedPodNumber)
podName(pod), node.Name, len(nodeInfo.Pods()), allowedPodNumber)
return true, nil
}

func PodFitsResources(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
return podFitsResourcesInternal(pod, nodeName, nodeInfo)
}

// nodeMatchesNodeSelectorTerms checks if a node's labels satisfy a list of node selector terms,
// terms are ORed, and an emtpy a list of terms will match nothing.
func nodeMatchesNodeSelectorTerms(node *api.Node, nodeSelectorTerms []api.NodeSelectorTerm) bool {
Expand Down Expand Up @@ -496,22 +492,26 @@ func PodMatchesNodeLabels(pod *api.Pod, node *api.Node) bool {
return nodeAffinityMatches
}

func PodSelectorMatches(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func PodSelectorMatches(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
node := nodeInfo.Node()
if node == nil {
return false, fmt.Errorf("node not found: %q", nodeName)
return false, fmt.Errorf("node not found")
}
if PodMatchesNodeLabels(pod, node) {
return true, nil
}
return false, ErrNodeSelectorNotMatch
}

func PodFitsHost(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func PodFitsHost(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
if len(pod.Spec.NodeName) == 0 {
return true, nil
}
if pod.Spec.NodeName == nodeName {
node := nodeInfo.Node()
if node == nil {
return false, fmt.Errorf("node not found")
}
if pod.Spec.NodeName == node.Name {
return true, nil
}
return false, ErrPodNotMatchHostName
Expand Down Expand Up @@ -542,10 +542,10 @@ func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicat
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
// A node may have a label with "retiring" as key and the date as the value
// and it may be desirable to avoid scheduling new pods on this node
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
node := nodeInfo.Node()
if node == nil {
return false, fmt.Errorf("node not found: %q", nodeName)
return false, fmt.Errorf("node not found")
}

var exists bool
Expand Down Expand Up @@ -585,7 +585,7 @@ func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister al
// - L is listed in the ServiceAffinity object that is passed into the function
// - the pod does not have any NodeSelector for L
// - some other pod from the same service is already scheduled onto a node that has value V for label L
func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
var affinitySelector labels.Selector

// check if the pod being scheduled has the affinity labels specified in its NodeSelector
Expand Down Expand Up @@ -645,9 +645,9 @@ func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, nodeName string, no
affinitySelector = labels.Set(affinityLabels).AsSelector()
}

node, err := s.nodeInfo.GetNodeInfo(nodeName)
if err != nil {
return false, err
node := nodeInfo.Node()
if node == nil {
return false, fmt.Errorf("node not found")
}

// check if the node matches the selector
Expand All @@ -657,7 +657,7 @@ func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, nodeName string, no
return false, ErrServiceAffinityViolated
}

func PodFitsHostPorts(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
func PodFitsHostPorts(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
wantPorts := getUsedPorts(pod)
if len(wantPorts) == 0 {
return true, nil
Expand Down Expand Up @@ -703,21 +703,21 @@ func haveSame(a1, a2 []string) bool {
return false
}

func GeneralPredicates(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
fit, err := podFitsResourcesInternal(pod, nodeName, nodeInfo)
func GeneralPredicates(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
fit, err := PodFitsResources(pod, nodeInfo)
if !fit {
return fit, err
}

fit, err = PodFitsHost(pod, nodeName, nodeInfo)
fit, err = PodFitsHost(pod, nodeInfo)
if !fit {
return fit, err
}
fit, err = PodFitsHostPorts(pod, nodeName, nodeInfo)
fit, err = PodFitsHostPorts(pod, nodeInfo)
if !fit {
return fit, err
}
fit, err = PodSelectorMatches(pod, nodeName, nodeInfo)
fit, err = PodSelectorMatches(pod, nodeInfo)
if !fit {
return fit, err
}
Expand All @@ -739,10 +739,10 @@ func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister, failu
return checker.InterPodAffinityMatches
}

func (checker *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
node, err := checker.info.GetNodeInfo(nodeName)
if err != nil {
return false, err
func (checker *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, error) {
node := nodeInfo.Node()
if node == nil {
return false, fmt.Errorf("node not found")
}
allPods, err := checker.podLister.List(labels.Everything())
if err != nil {
Expand Down