Skip to content

Commit

Permalink
migrate scheduler api types to sized integers
Browse files Browse the repository at this point in the history
  • Loading branch information
ahmad-diaa committed Sep 12, 2019
1 parent b3c4bde commit 801cc54
Show file tree
Hide file tree
Showing 22 changed files with 75 additions and 73 deletions.
4 changes: 2 additions & 2 deletions pkg/scheduler/algorithm/priorities/even_pods_spread.go
Expand Up @@ -176,7 +176,7 @@ func CalculateEvenPodsSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*sch
// debugging purpose: print the value for each node
// score must be pointer here, otherwise it's always 0
if klog.V(10) {
defer func(score *int, nodeName string) {
defer func(score *int64, nodeName string) {
klog.Infof("%v -> %v: EvenPodsSpreadPriority, Score: (%d)", pod.Name, nodeName, *score)
}(&result[i].Score, node.Name)
}
Expand All @@ -190,7 +190,7 @@ func CalculateEvenPodsSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*sch
continue
}
fScore := float64(schedulerapi.MaxPriority) * (float64(total-t.nodeNameToPodCounts[node.Name]) / float64(maxMinDiff))
result[i].Score = int(fScore)
result[i].Score = int64(fScore)
}

return result, nil
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/algorithm/priorities/image_locality.go
Expand Up @@ -55,7 +55,7 @@ func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *scheduler

return schedulerapi.HostPriority{
Host: node.Name,
Score: score,
Score: int64(score),
}, nil
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/algorithm/priorities/interpod_affinity.go
Expand Up @@ -236,7 +236,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
if maxMinDiff > 0 && pm.counts[node.Name] != nil {
fScore = float64(schedulerapi.MaxPriority) * (float64(*pm.counts[node.Name]-minCount) / float64(maxCount-minCount))
}
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int64(fScore)})
if klog.V(10) {
klog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore))
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/algorithm/priorities/node_affinity.go
Expand Up @@ -69,7 +69,7 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s

return schedulerapi.HostPriority{
Host: node.Name,
Score: int(count),
Score: int64(count),
}, nil
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/algorithm/priorities/node_label.go
Expand Up @@ -56,6 +56,6 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta i
}
return schedulerapi.HostPriority{
Host: node.Name,
Score: score,
Score: int64(score),
}, nil
}
4 changes: 2 additions & 2 deletions pkg/scheduler/algorithm/priorities/reduce.go
Expand Up @@ -25,14 +25,14 @@ import (
// NormalizeReduce generates a PriorityReduceFunction that can normalize the result
// scores to [0, maxPriority]. If reverse is set to true, it reverses the scores by
// subtracting it from maxPriority.
func NormalizeReduce(maxPriority int, reverse bool) PriorityReduceFunction {
func NormalizeReduce(maxPriority int64, reverse bool) PriorityReduceFunction {
return func(
_ *v1.Pod,
_ interface{},
_ map[string]*schedulernodeinfo.NodeInfo,
result schedulerapi.HostPriorityList) error {

var maxCount int
var maxCount int64
for i := range result {
if result[i].Score > maxCount {
maxCount = result[i].Score
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/algorithm/priorities/resource_allocation.go
Expand Up @@ -92,7 +92,7 @@ func (r *ResourceAllocationPriority) PriorityMap(

return schedulerapi.HostPriority{
Host: node.Name,
Score: int(score),
Score: score,
}, nil
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/algorithm/priorities/resource_limits.go
Expand Up @@ -54,7 +54,7 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule
cpuScore := computeScore(podLimits.MilliCPU, allocatableResources.MilliCPU)
memScore := computeScore(podLimits.Memory, allocatableResources.Memory)

score := int(0)
score := int64(0)
if cpuScore == 1 || memScore == 1 {
score = 1
}
Expand Down
24 changes: 12 additions & 12 deletions pkg/scheduler/algorithm/priorities/selector_spreading.go
Expand Up @@ -80,15 +80,15 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{
if len(selectors) == 0 {
return schedulerapi.HostPriority{
Host: node.Name,
Score: int(0),
Score: 0,
}, nil
}

count := countMatchingPods(pod.Namespace, selectors, nodeInfo)

return schedulerapi.HostPriority{
Host: node.Name,
Score: count,
Score: int64(count),
}, nil
}

Expand All @@ -97,9 +97,9 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{
// where zone information is included on the nodes, it favors nodes
// in zones with fewer existing matching pods.
func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error {
countsByZone := make(map[string]int, 10)
maxCountByZone := int(0)
maxCountByNodeName := int(0)
countsByZone := make(map[string]int64, 10)
maxCountByZone := int64(0)
maxCountByNodeName := int64(0)

for i := range result {
if result[i].Score > maxCountByNodeName {
Expand Down Expand Up @@ -141,10 +141,10 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
fScore = (fScore * (1.0 - zoneWeighting)) + (zoneWeighting * zoneScore)
}
}
result[i].Score = int(fScore)
result[i].Score = int64(fScore)
if klog.V(10) {
klog.Infof(
"%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Host, int(fScore),
"%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Host, int64(fScore),
)
}
}
Expand Down Expand Up @@ -232,16 +232,16 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta

return schedulerapi.HostPriority{
Host: node.Name,
Score: score,
Score: int64(score),
}, nil
}

// CalculateAntiAffinityPriorityReduce computes each node score with the same value for a particular label.
// The label to be considered is provided to the struct (ServiceAntiAffinity).
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error {
var numServicePods int
var numServicePods int64
var label string
podCounts := map[string]int{}
podCounts := map[string]int64{}
labelNodesStatus := map[string]string{}
maxPriorityFloat64 := float64(schedulerapi.MaxPriority)

Expand All @@ -261,7 +261,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, m
label, ok := labelNodesStatus[hostPriority.Host]
if !ok {
result[i].Host = hostPriority.Host
result[i].Score = int(0)
result[i].Score = 0
continue
}
// initializing to the default/max node score of maxPriority
Expand All @@ -270,7 +270,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, m
fScore = maxPriorityFloat64 * (float64(numServicePods-podCounts[label]) / float64(numServicePods))
}
result[i].Host = hostPriority.Host
result[i].Score = int(fScore)
result[i].Score = int64(fScore)
}

return nil
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/algorithm/priorities/taint_toleration.go
Expand Up @@ -68,7 +68,7 @@ func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *

return schedulerapi.HostPriority{
Host: node.Name,
Score: countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, tolerationsPreferNoSchedule),
Score: int64(countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, tolerationsPreferNoSchedule)),
}, nil
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/algorithm/priorities/types.go
Expand Up @@ -50,7 +50,7 @@ type PriorityConfig struct {
// TODO: Remove it after migrating all functions to
// Map-Reduce pattern.
Function PriorityFunction
Weight int
Weight int64
}

// EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type.
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/algorithm/scheduler_interface.go
Expand Up @@ -39,7 +39,7 @@ type SchedulerExtender interface {
// Prioritize based on extender-implemented priority functions. The returned scores & weight
// are used to compute the weighted score for an extender. The weighted scores are added to
// the scores computed by Kubernetes scheduler. The total scores are used to do the host selection.
Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error)
Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int64, err error)

// Bind delegates the action of binding a pod to a node to the extender.
Bind(binding *v1.Binding) error
Expand Down
21 changes: 11 additions & 10 deletions pkg/scheduler/api/types.go
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package api

import (
"math"
"time"

v1 "k8s.io/api/core/v1"
Expand All @@ -30,11 +31,11 @@ const (
// MaxInt defines the max signed int value.
MaxInt = int(MaxUint >> 1)
// MaxTotalPriority defines the max total priority value.
MaxTotalPriority = MaxInt
MaxTotalPriority = int64(math.MaxInt64)
// MaxPriority defines the max priority value.
MaxPriority = 10
// MaxWeight defines the max weight value.
MaxWeight = MaxInt / MaxPriority
MaxWeight = int64(math.MaxInt64 / MaxPriority)
// DefaultPercentageOfNodesToScore defines the percentage of nodes of all nodes
// that once found feasible, the scheduler stops looking for more nodes.
DefaultPercentageOfNodesToScore = 50
Expand Down Expand Up @@ -86,7 +87,7 @@ type PriorityPolicy struct {
Name string
// The numeric multiplier for the node scores that the priority function generates
// The weight should be a positive integer
Weight int
Weight int64
// Holds the parameters to configure the given priority function
Argument *PriorityArgument
}
Expand Down Expand Up @@ -157,17 +158,17 @@ type RequestedToCapacityRatioArguments struct {
// UtilizationShapePoint represents single point of priority function shape
type UtilizationShapePoint struct {
// Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100.
Utilization int
Utilization int32
// Score assigned to given utilization (y axis). Valid values are 0 to 10.
Score int
Score int32
}

// ResourceSpec represents single resource for bin packing of priority RequestedToCapacityRatioArguments.
type ResourceSpec struct {
// Name of the resource to be managed by RequestedToCapacityRatio function.
Name v1.ResourceName
// Weight of the resource.
Weight int
Weight int64
}

// ExtenderManagedResource describes the arguments of extended resources
Expand Down Expand Up @@ -220,7 +221,7 @@ type ExtenderConfig struct {
PrioritizeVerb string
// The numeric multiplier for the node scores that the prioritize call generates.
// The weight should be a positive integer
Weight int
Weight int64
// Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender.
// If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender
// can implement this function.
Expand Down Expand Up @@ -271,7 +272,7 @@ type ExtenderPreemptionArgs struct {
// numPDBViolations: the count of violations of PodDisruptionBudget
type Victims struct {
Pods []*v1.Pod
NumPDBViolations int
NumPDBViolations int64
}

// MetaPod represent identifier for a v1.Pod
Expand All @@ -285,7 +286,7 @@ type MetaPod struct {
// numPDBViolations: the count of violations of PodDisruptionBudget
type MetaVictims struct {
Pods []*MetaPod
NumPDBViolations int
NumPDBViolations int64
}

// ExtenderArgs represents the arguments needed by the extender to filter/prioritize
Expand Down Expand Up @@ -341,7 +342,7 @@ type HostPriority struct {
// Name of the host
Host string
// Score associated with the host
Score int
Score int64
}

// HostPriorityList declares a []HostPriority type.
Expand Down
18 changes: 9 additions & 9 deletions pkg/scheduler/api/v1/types.go
Expand Up @@ -39,7 +39,7 @@ type Policy struct {
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
// corresponding to every RequiredDuringScheduling affinity rule.
// HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100.
HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"`
HardPodAffinitySymmetricWeight int32 `json:"hardPodAffinitySymmetricWeight"`

// When AlwaysCheckAllPredicates is set to true, scheduler checks all
// the configured predicates even after one or more of them fails.
Expand All @@ -66,7 +66,7 @@ type PriorityPolicy struct {
Name string `json:"name"`
// The numeric multiplier for the node scores that the priority function generates
// The weight should be non-zero and can be a positive or a negative integer
Weight int `json:"weight"`
Weight int64 `json:"weight"`
// Holds the parameters to configure the given priority function
Argument *PriorityArgument `json:"argument"`
}
Expand Down Expand Up @@ -137,17 +137,17 @@ type RequestedToCapacityRatioArguments struct {
// UtilizationShapePoint represents single point of priority function shape.
type UtilizationShapePoint struct {
// Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100.
Utilization int `json:"utilization"`
Utilization int32 `json:"utilization"`
// Score assigned to given utilization (y axis). Valid values are 0 to 10.
Score int `json:"score"`
Score int32 `json:"score"`
}

// ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments.
type ResourceSpec struct {
// Name of the resource to be managed by RequestedToCapacityRatio function.
Name apiv1.ResourceName `json:"name,casttype=ResourceName"`
// Weight of the resource.
Weight int `json:"weight,omitempty"`
Weight int64 `json:"weight,omitempty"`
}

// ExtenderManagedResource describes the arguments of extended resources
Expand Down Expand Up @@ -200,7 +200,7 @@ type ExtenderConfig struct {
PrioritizeVerb string `json:"prioritizeVerb,omitempty"`
// The numeric multiplier for the node scores that the prioritize call generates.
// The weight should be a positive integer
Weight int `json:"weight,omitempty"`
Weight int64 `json:"weight,omitempty"`
// Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender.
// If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender
// can implement this function.
Expand Down Expand Up @@ -276,7 +276,7 @@ type ExtenderPreemptionArgs struct {
// numPDBViolations: the count of violations of PodDisruptionBudget
type Victims struct {
Pods []*apiv1.Pod `json:"pods"`
NumPDBViolations int `json:"numPDBViolations"`
NumPDBViolations int64 `json:"numPDBViolations"`
}

// MetaPod represent identifier for a v1.Pod
Expand All @@ -290,7 +290,7 @@ type MetaPod struct {
// numPDBViolations: the count of violations of PodDisruptionBudget
type MetaVictims struct {
Pods []*MetaPod `json:"pods"`
NumPDBViolations int `json:"numPDBViolations"`
NumPDBViolations int64 `json:"numPDBViolations"`
}

// FailedNodesMap represents the filtered out nodes, with node names and failure messages
Expand Down Expand Up @@ -333,7 +333,7 @@ type HostPriority struct {
// Name of the host
Host string `json:"host"`
// Score associated with the host
Score int `json:"score"`
Score int64 `json:"score"`
}

// HostPriorityList declares a []HostPriority type.
Expand Down
4 changes: 2 additions & 2 deletions pkg/scheduler/core/extender.go
Expand Up @@ -45,7 +45,7 @@ type HTTPExtender struct {
filterVerb string
prioritizeVerb string
bindVerb string
weight int
weight int64
client *http.Client
nodeCacheCapable bool
managedResources sets.String
Expand Down Expand Up @@ -321,7 +321,7 @@ func (h *HTTPExtender) Filter(
// Prioritize based on extender implemented priority functions. Weight*priority is added
// up for each such priority function. The returned score is added to the score computed
// by Kubernetes scheduler. The total score is used to do the host selection.
func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, int, error) {
func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, int64, error) {
var (
result schedulerapi.HostPriorityList
nodeList *v1.NodeList
Expand Down

0 comments on commit 801cc54

Please sign in to comment.