Skip to content

Commit

Permalink
Merge pull request #119509 from tzneal/fix-describe-node-with-sidecars
Browse files Browse the repository at this point in the history
kubectl: fix describe node output when sidecars are present
  • Loading branch information
k8s-ci-robot committed Aug 16, 2023
2 parents 19deb04 + 479b2d0 commit 12bd9bf
Show file tree
Hide file tree
Showing 2 changed files with 259 additions and 8 deletions.
150 changes: 150 additions & 0 deletions staging/src/k8s.io/kubectl/pkg/describe/describe_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5456,6 +5456,156 @@ func TestDescribeNode(t *testing.T) {
}
}

func TestDescribeNodeWithSidecar(t *testing.T) {
holderIdentity := "holder"
nodeCapacity := mergeResourceLists(
getHugePageResourceList("2Mi", "4Gi"),
getResourceList("8", "24Gi"),
getHugePageResourceList("1Gi", "0"),
)
nodeAllocatable := mergeResourceLists(
getHugePageResourceList("2Mi", "2Gi"),
getResourceList("4", "12Gi"),
getHugePageResourceList("1Gi", "0"),
)

restartPolicy := corev1.ContainerRestartPolicyAlways
fake := fake.NewSimpleClientset(
&corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
UID: "uid",
},
Spec: corev1.NodeSpec{
Unschedulable: true,
},
Status: corev1.NodeStatus{
Capacity: nodeCapacity,
Allocatable: nodeAllocatable,
},
},
&coordinationv1.Lease{
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
Namespace: corev1.NamespaceNodeLease,
},
Spec: coordinationv1.LeaseSpec{
HolderIdentity: &holderIdentity,
AcquireTime: &metav1.MicroTime{Time: time.Now().Add(-time.Hour)},
RenewTime: &metav1.MicroTime{Time: time.Now()},
},
},
&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-with-resources",
Namespace: "foo",
},
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
},
Spec: corev1.PodSpec{
InitContainers: []corev1.Container{
// sidecar, should sum into the total resources
{
Name: "init-container-1",
RestartPolicy: &restartPolicy,
Resources: corev1.ResourceRequirements{
Requests: getResourceList("1", "1Gi"),
},
},
// non-sidecar
{
Name: "init-container-2",
Resources: corev1.ResourceRequirements{
Requests: getResourceList("1", "1Gi"),
},
},
},
Containers: []corev1.Container{
{
Name: "cpu-mem",
Image: "image:latest",
Resources: corev1.ResourceRequirements{
Requests: getResourceList("1", "1Gi"),
Limits: getResourceList("2", "2Gi"),
},
},
{
Name: "hugepages",
Image: "image:latest",
Resources: corev1.ResourceRequirements{
Requests: getHugePageResourceList("2Mi", "512Mi"),
Limits: getHugePageResourceList("2Mi", "512Mi"),
},
},
},
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
},
},
&corev1.EventList{
Items: []corev1.Event{
{
ObjectMeta: metav1.ObjectMeta{
Name: "event-1",
Namespace: "default",
},
InvolvedObject: corev1.ObjectReference{
Kind: "Node",
Name: "bar",
UID: "bar",
},
Message: "Node bar status is now: NodeHasNoDiskPressure",
FirstTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
LastTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
Count: 1,
Type: corev1.EventTypeNormal,
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "event-2",
Namespace: "default",
},
InvolvedObject: corev1.ObjectReference{
Kind: "Node",
Name: "bar",
UID: "0ceac5fb-a393-49d7-b04f-9ea5f18de5e9",
},
Message: "Node bar status is now: NodeReady",
FirstTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
LastTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
Count: 2,
Type: corev1.EventTypeNormal,
},
},
},
)
c := &describeClient{T: t, Namespace: "foo", Interface: fake}
d := NodeDescriber{c}
out, err := d.Describe("foo", "bar", DescriberSettings{ShowEvents: true})
if err != nil {
t.Errorf("unexpected error: %v", err)
}

expectedOut := []string{"Unschedulable", "true", "holder",
`Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 2 (50%) 2 (50%)
memory 2Gi (16%) 2Gi (16%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 512Mi (25%) 512Mi (25%)`,
`Node bar status is now: NodeHasNoDiskPressure`,
`Node bar status is now: NodeReady`}
for _, expected := range expectedOut {
if !strings.Contains(out, expected) {
t.Errorf("expected to find %s in output: %s", expected, out)
}
}
}
func TestDescribeStatefulSet(t *testing.T) {
var partition int32 = 2672
var replicas int32 = 1
Expand Down
117 changes: 109 additions & 8 deletions staging/src/k8s.io/kubectl/pkg/util/resource/resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,29 +32,130 @@ import (
// total container resource requests and to the total container limits which have a
// non-zero quantity.
func PodRequestsAndLimits(pod *corev1.Pod) (reqs, limits corev1.ResourceList) {
reqs, limits = corev1.ResourceList{}, corev1.ResourceList{}
return podRequests(pod), podLimits(pod)
}

// podRequests is a simplified form of PodRequests from k8s.io/kubernetes/pkg/api/v1/resource that doesn't check
// feature gate enablement and avoids adding a dependency on k8s.io/kubernetes/pkg/apis/core/v1 for kubectl.
func podRequests(pod *corev1.Pod) corev1.ResourceList {
// attempt to reuse the maps if passed, or allocate otherwise
reqs := corev1.ResourceList{}

containerStatuses := map[string]*corev1.ContainerStatus{}
for i := range pod.Status.ContainerStatuses {
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
}

for _, container := range pod.Spec.Containers {
addResourceList(reqs, container.Resources.Requests)
addResourceList(limits, container.Resources.Limits)
containerReqs := container.Resources.Requests
cs, found := containerStatuses[container.Name]
if found {
if pod.Status.Resize == corev1.PodResizeStatusInfeasible {
containerReqs = cs.AllocatedResources.DeepCopy()
} else {
containerReqs = max(container.Resources.Requests, cs.AllocatedResources)
}
}
addResourceList(reqs, containerReqs)
}
// init containers define the minimum of any resource

restartableInitContainerReqs := corev1.ResourceList{}
initContainerReqs := corev1.ResourceList{}

for _, container := range pod.Spec.InitContainers {
maxResourceList(reqs, container.Resources.Requests)
maxResourceList(limits, container.Resources.Limits)
containerReqs := container.Resources.Requests

if container.RestartPolicy != nil && *container.RestartPolicy == corev1.ContainerRestartPolicyAlways {
// and add them to the resulting cumulative container requests
addResourceList(reqs, containerReqs)

// track our cumulative restartable init container resources
addResourceList(restartableInitContainerReqs, containerReqs)
containerReqs = restartableInitContainerReqs
} else {
tmp := corev1.ResourceList{}
addResourceList(tmp, containerReqs)
addResourceList(tmp, restartableInitContainerReqs)
containerReqs = tmp
}
maxResourceList(initContainerReqs, containerReqs)
}

// Add overhead for running a pod to the sum of requests and to non-zero limits:
maxResourceList(reqs, initContainerReqs)

// Add overhead for running a pod to the sum of requests if requested:
if pod.Spec.Overhead != nil {
addResourceList(reqs, pod.Spec.Overhead)
}

return reqs
}

// podLimits is a simplified form of PodLimits from k8s.io/kubernetes/pkg/api/v1/resource that doesn't check
// feature gate enablement and avoids adding a dependency on k8s.io/kubernetes/pkg/apis/core/v1 for kubectl.
func podLimits(pod *corev1.Pod) corev1.ResourceList {
limits := corev1.ResourceList{}

for _, container := range pod.Spec.Containers {
addResourceList(limits, container.Resources.Limits)
}

restartableInitContainerLimits := corev1.ResourceList{}
initContainerLimits := corev1.ResourceList{}

for _, container := range pod.Spec.InitContainers {
containerLimits := container.Resources.Limits
// Is the init container marked as a restartable init container?
if container.RestartPolicy != nil && *container.RestartPolicy == corev1.ContainerRestartPolicyAlways {
addResourceList(limits, containerLimits)

// track our cumulative restartable init container resources
addResourceList(restartableInitContainerLimits, containerLimits)
containerLimits = restartableInitContainerLimits
} else {
tmp := corev1.ResourceList{}
addResourceList(tmp, containerLimits)
addResourceList(tmp, restartableInitContainerLimits)
containerLimits = tmp
}

maxResourceList(initContainerLimits, containerLimits)
}

maxResourceList(limits, initContainerLimits)

// Add overhead to non-zero limits if requested:
if pod.Spec.Overhead != nil {
for name, quantity := range pod.Spec.Overhead {
if value, ok := limits[name]; ok && !value.IsZero() {
value.Add(quantity)
limits[name] = value
}
}
}
return

return limits
}

// max returns the result of max(a, b) for each named resource and is only used if we can't
// accumulate into an existing resource list
func max(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList {
result := corev1.ResourceList{}
for key, value := range a {
if other, found := b[key]; found {
if value.Cmp(other) <= 0 {
result[key] = other.DeepCopy()
continue
}
}
result[key] = value.DeepCopy()
}
for key, value := range b {
if _, found := result[key]; !found {
result[key] = value.DeepCopy()
}
}
return result
}

// addResourceList adds the resources in newList to list
Expand Down

0 comments on commit 12bd9bf

Please sign in to comment.