Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: cache pod limits as part of metadata in priority functions #77498

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 2 additions & 0 deletions pkg/scheduler/algorithm/priorities/metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controlle
// priorityMetadata is a type that is passed as metadata for priority functions
type priorityMetadata struct {
nonZeroRequest *schedulernodeinfo.Resource
podLimits *schedulernodeinfo.Resource
podTolerations []v1.Toleration
affinity *v1.Affinity
podSelectors []labels.Selector
Expand All @@ -62,6 +63,7 @@ func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo
}
return &priorityMetadata{
nonZeroRequest: getNonZeroRequests(pod),
podLimits: getResourceLimits(pod),
podTolerations: getAllTolerationPreferNoSchedule(pod.Spec.Tolerations),
affinity: pod.Spec.Affinity,
podSelectors: getSelectors(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister),
Expand Down
13 changes: 13 additions & 0 deletions pkg/scheduler/algorithm/priorities/metadata_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,12 @@ func TestPriorityMetadata(t *testing.T) {
specifiedReqs.MilliCPU = 200
specifiedReqs.Memory = 2000

nonPodLimits := &schedulernodeinfo.Resource{}

specifiedPodLimits := &schedulernodeinfo.Resource{}
specifiedPodLimits.MilliCPU = 200
specifiedPodLimits.Memory = 2000

tolerations := []v1.Toleration{{
Key: "foo",
Operator: v1.TolerationOpEqual,
Expand Down Expand Up @@ -104,6 +110,10 @@ func TestPriorityMetadata(t *testing.T) {
Image: "image",
ImagePullPolicy: "Always",
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("200m"),
v1.ResourceMemory: resource.MustParse("2000"),
},
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("200m"),
v1.ResourceMemory: resource.MustParse("2000"),
Expand All @@ -128,6 +138,7 @@ func TestPriorityMetadata(t *testing.T) {
pod: podWithTolerationsAndAffinity,
expected: &priorityMetadata{
nonZeroRequest: nonZeroReqs,
podLimits: nonPodLimits,
podTolerations: tolerations,
affinity: podAffinity,
},
Expand All @@ -137,6 +148,7 @@ func TestPriorityMetadata(t *testing.T) {
pod: podWithTolerationsAndRequests,
expected: &priorityMetadata{
nonZeroRequest: specifiedReqs,
podLimits: nonPodLimits,
podTolerations: tolerations,
affinity: nil,
},
Expand All @@ -146,6 +158,7 @@ func TestPriorityMetadata(t *testing.T) {
pod: podWithAffinityAndRequests,
expected: &priorityMetadata{
nonZeroRequest: specifiedReqs,
podLimits: specifiedPodLimits,
podTolerations: nil,
affinity: podAffinity,
},
Expand Down
10 changes: 8 additions & 2 deletions pkg/scheduler/algorithm/priorities/resource_limits.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,14 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule
allocatableResources := nodeInfo.AllocatableResource()

// compute pod limits
podLimits := getResourceLimits(pod)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please move getResourceLimits to metadata as it doesn't belong here anymore

Copy link
Contributor Author

@draveness draveness May 6, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I add fallback logic when metadata is failed to convert to priorityMetadata which is consistent with resource_allocation.go.

	var requested schedulernodeinfo.Resource
	if priorityMeta, ok := meta.(*priorityMetadata); ok {
		requested = *priorityMeta.nonZeroRequest
	} else {
		// We couldn't parse metadata - fallback to computing it.
		requested = *getNonZeroRequests(pod)
	}

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

sgtm

var podLimits *schedulernodeinfo.Resource
if priorityMeta, ok := meta.(*priorityMetadata); ok && priorityMeta != nil {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[1] Fix a bug here.

// We were able to parse metadata, use podLimits from there.
podLimits = priorityMeta.podLimits
} else {
// We couldn't parse metadata - fallback to computing it.
podLimits = getResourceLimits(pod)
}

cpuScore := computeScore(podLimits.MilliCPU, allocatableResources.MilliCPU)
memScore := computeScore(podLimits.Memory, allocatableResources.Memory)
Expand Down Expand Up @@ -83,7 +90,6 @@ func computeScore(limit, allocatable int64) int64 {
// The reason to create this new function is to be consistent with other
// priority functions because most or perhaps all priority functions work
// with schedulernodeinfo.Resource.
// TODO: cache it as part of metadata passed to priority functions.
func getResourceLimits(pod *v1.Pod) *schedulernodeinfo.Resource {
result := &schedulernodeinfo.Resource{}
for _, container := range pod.Spec.Containers {
Expand Down
24 changes: 17 additions & 7 deletions pkg/scheduler/algorithm/priorities/resource_limits_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import (
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)

func TestResourceLimistPriority(t *testing.T) {
func TestResourceLimitsPriority(t *testing.T) {
noResources := v1.PodSpec{
Containers: []v1.Container{},
}
Expand Down Expand Up @@ -140,12 +140,22 @@ func TestResourceLimistPriority(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(nil, test.nodes)
list, err := priorityFunction(ResourceLimitsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("expected %#v, got %#v", test.expectedList, list)

for _, hasMeta := range []bool{true, false} {
var metadata *priorityMetadata
if hasMeta {
metadata = &priorityMetadata{
podLimits: getResourceLimits(test.pod),
}
}

list, err := priorityFunction(ResourceLimitsPriorityMap, nil, metadata)(test.pod, nodeNameToInfo, test.nodes)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("expected %#v, got %#v", test.expectedList, list)
}
}
})
}
Expand Down