Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use memory metrics from the pod cgroup for eviction ranking #64280

Merged
merged 1 commit into from
Dec 4, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
34 changes: 5 additions & 29 deletions pkg/kubelet/eviction/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -389,16 +389,6 @@ func podDiskUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsSt
}, nil
}

// podMemoryUsage aggregates pod memory usage.
func podMemoryUsage(podStats statsapi.PodStats) (v1.ResourceList, error) {
memory := resource.Quantity{Format: resource.BinarySI}
for _, container := range podStats.Containers {
// memory usage (if known)
memory.Add(*memoryUsage(container.Memory))
}
return v1.ResourceList{v1.ResourceMemory: memory}, nil
}

// localEphemeralVolumeNames returns the set of ephemeral volumes for the pod that are local
func localEphemeralVolumeNames(pod *v1.Pod) []string {
result := []string{}
Expand Down Expand Up @@ -543,15 +533,8 @@ func exceedMemoryRequests(stats statsFunc) cmpFunc {
return cmpBool(!p1Found, !p2Found)
}

p1Usage, p1Err := podMemoryUsage(p1Stats)
p2Usage, p2Err := podMemoryUsage(p2Stats)
if p1Err != nil || p2Err != nil {
// prioritize evicting the pod which had an error getting stats
return cmpBool(p1Err != nil, p2Err != nil)
}

p1Memory := p1Usage[v1.ResourceMemory]
p2Memory := p2Usage[v1.ResourceMemory]
p1Memory := memoryUsage(p1Stats.Memory)
p2Memory := memoryUsage(p2Stats.Memory)
p1ExceedsRequests := p1Memory.Cmp(podRequest(p1, v1.ResourceMemory)) == 1
p2ExceedsRequests := p2Memory.Cmp(podRequest(p2, v1.ResourceMemory)) == 1
// prioritize evicting the pod which exceeds its requests
Expand All @@ -569,24 +552,17 @@ func memory(stats statsFunc) cmpFunc {
return cmpBool(!p1Found, !p2Found)
}

p1Usage, p1Err := podMemoryUsage(p1Stats)
p2Usage, p2Err := podMemoryUsage(p2Stats)
if p1Err != nil || p2Err != nil {
// prioritize evicting the pod which had an error getting stats
return cmpBool(p1Err != nil, p2Err != nil)
}

// adjust p1, p2 usage relative to the request (if any)
p1Memory := p1Usage[v1.ResourceMemory]
p1Memory := memoryUsage(p1Stats.Memory)
p1Request := podRequest(p1, v1.ResourceMemory)
p1Memory.Sub(p1Request)

p2Memory := p2Usage[v1.ResourceMemory]
p2Memory := memoryUsage(p2Stats.Memory)
p2Request := podRequest(p2, v1.ResourceMemory)
p2Memory.Sub(p2Request)

// prioritize evicting the pod which has the larger consumption of memory
return p2Memory.Cmp(p1Memory)
return p2Memory.Cmp(*p1Memory)
}
}

Expand Down
42 changes: 14 additions & 28 deletions pkg/kubelet/eviction/helpers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1015,23 +1015,17 @@ func (f *fakeSummaryProvider) Get(updateStats bool) (*statsapi.Summary, error) {

// newPodStats returns a pod stat where each container is using the specified working set
// each pod must have a Name, UID, Namespace
func newPodStats(pod *v1.Pod, containerWorkingSetBytes int64) statsapi.PodStats {
result := statsapi.PodStats{
func newPodStats(pod *v1.Pod, podWorkingSetBytes uint64) statsapi.PodStats {
return statsapi.PodStats{
PodRef: statsapi.PodReference{
Name: pod.Name,
Namespace: pod.Namespace,
UID: string(pod.UID),
},
Memory: &statsapi.MemoryStats{
WorkingSetBytes: &podWorkingSetBytes,
},
}
val := uint64(containerWorkingSetBytes)
for range pod.Spec.Containers {
result.Containers = append(result.Containers, statsapi.ContainerStats{
Memory: &statsapi.MemoryStats{
WorkingSetBytes: &val,
},
})
}
return result
}

func TestMakeSignalObservations(t *testing.T) {
Expand Down Expand Up @@ -1096,9 +1090,9 @@ func TestMakeSignalObservations(t *testing.T) {
podMaker("pod1", "ns2", "uuid2", 1),
podMaker("pod3", "ns3", "uuid3", 1),
}
containerWorkingSetBytes := int64(1024 * 1024 * 1024)
podWorkingSetBytes := uint64(1024 * 1024 * 1024)
for _, pod := range pods {
fakeStats.Pods = append(fakeStats.Pods, newPodStats(pod, containerWorkingSetBytes))
fakeStats.Pods = append(fakeStats.Pods, newPodStats(pod, podWorkingSetBytes))
}
res := quantityMustParse("5Gi")
// Allocatable thresholds are always 100%. Verify that Threshold == Capacity.
Expand Down Expand Up @@ -1171,11 +1165,8 @@ func TestMakeSignalObservations(t *testing.T) {
if !found {
t.Errorf("Pod stats were not found for pod %v", pod.UID)
}
for _, container := range podStats.Containers {
actual := int64(*container.Memory.WorkingSetBytes)
if containerWorkingSetBytes != actual {
t.Errorf("Container working set expected %v, actual: %v", containerWorkingSetBytes, actual)
}
if *podStats.Memory.WorkingSetBytes != podWorkingSetBytes {
t.Errorf("Pod working set expected %v, actual: %v", podWorkingSetBytes, *podStats.Memory.WorkingSetBytes)
}
}
}
Expand Down Expand Up @@ -1851,20 +1842,15 @@ func newPodDiskStats(pod *v1.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed resou
}

func newPodMemoryStats(pod *v1.Pod, workingSet resource.Quantity) statsapi.PodStats {
result := statsapi.PodStats{
workingSetBytes := uint64(workingSet.Value())
return statsapi.PodStats{
PodRef: statsapi.PodReference{
Name: pod.Name, Namespace: pod.Namespace, UID: string(pod.UID),
},
Memory: &statsapi.MemoryStats{
WorkingSetBytes: &workingSetBytes,
},
}
for range pod.Spec.Containers {
workingSetBytes := uint64(workingSet.Value())
result.Containers = append(result.Containers, statsapi.ContainerStats{
Memory: &statsapi.MemoryStats{
WorkingSetBytes: &workingSetBytes,
},
})
}
return result
}

func newResourceList(cpu, memory, disk string) v1.ResourceList {
Expand Down