Skip to content

Commit

Permalink
Merge pull request #83780 from krzysied/nc_pod_pointers
Browse files Browse the repository at this point in the history
Using pointers to pod in node lifecycle controller
  • Loading branch information
k8s-ci-robot committed Oct 15, 2019
2 parents 245189b + b1dfa83 commit 2b21f08
Show file tree
Hide file tree
Showing 5 changed files with 37 additions and 23 deletions.
16 changes: 10 additions & 6 deletions pkg/controller/nodelifecycle/node_lifecycle_controller.go
Expand Up @@ -246,7 +246,7 @@ type Controller struct {
nodeLister corelisters.NodeLister
nodeInformerSynced cache.InformerSynced

getPodsAssignedToNode func(nodeName string) ([]v1.Pod, error)
getPodsAssignedToNode func(nodeName string) ([]*v1.Pod, error)

recorder record.EventRecorder

Expand Down Expand Up @@ -419,18 +419,18 @@ func NewNodeLifecycleController(
})

podIndexer := podInformer.Informer().GetIndexer()
nc.getPodsAssignedToNode = func(nodeName string) ([]v1.Pod, error) {
nc.getPodsAssignedToNode = func(nodeName string) ([]*v1.Pod, error) {
objs, err := podIndexer.ByIndex(nodeNameKeyIndex, nodeName)
if err != nil {
return nil, err
}
pods := make([]v1.Pod, 0, len(objs))
pods := make([]*v1.Pod, 0, len(objs))
for _, obj := range objs {
pod, ok := obj.(*v1.Pod)
if !ok {
continue
}
pods = append(pods, *pod)
pods = append(pods, pod)
}
return pods, nil
}
Expand Down Expand Up @@ -700,14 +700,18 @@ func (nc *Controller) doEvictionPass() {
}
}

func listPodsFromNode(kubeClient clientset.Interface, nodeName string) ([]v1.Pod, error) {
func listPodsFromNode(kubeClient clientset.Interface, nodeName string) ([]*v1.Pod, error) {
selector := fields.OneTermEqualSelector(apicore.PodHostField, nodeName).String()
options := metav1.ListOptions{FieldSelector: selector}
pods, err := kubeClient.CoreV1().Pods(metav1.NamespaceAll).List(options)
if err != nil {
return nil, err
}
return pods.Items, nil
rPods := make([]*v1.Pod, len(pods.Items))
for i := range pods.Items {
rPods[i] = &pods.Items[i]
}
return rPods, nil
}

// monitorNodeHealth verifies node health are constantly updated by kubelet, and
Expand Down
10 changes: 7 additions & 3 deletions pkg/controller/nodelifecycle/node_lifecycle_controller_test.go
Expand Up @@ -63,8 +63,8 @@ const (

func alwaysReady() bool { return true }

func fakeGetPodsAssignedToNode(c *fake.Clientset) func(string) ([]v1.Pod, error) {
return func(nodeName string) ([]v1.Pod, error) {
func fakeGetPodsAssignedToNode(c *fake.Clientset) func(string) ([]*v1.Pod, error) {
return func(nodeName string) ([]*v1.Pod, error) {
selector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName})
pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{
FieldSelector: selector.String(),
Expand All @@ -73,7 +73,11 @@ func fakeGetPodsAssignedToNode(c *fake.Clientset) func(string) ([]v1.Pod, error)
if err != nil {
return nil, fmt.Errorf("failed to get Pods assigned to node %v", nodeName)
}
return pods.Items, nil
rPods := make([]*v1.Pod, len(pods.Items))
for i := range pods.Items {
rPods[i] = &pods.Items[i]
}
return rPods, nil
}
}

Expand Down
5 changes: 2 additions & 3 deletions pkg/controller/nodelifecycle/scheduler/taint_manager.go
Expand Up @@ -75,7 +75,7 @@ type GetPodFunc func(name, namespace string) (*v1.Pod, error)
type GetNodeFunc func(name string) (*v1.Node, error)

// GetPodsByNodeNameFunc returns the list of pods assigned to the specified node.
type GetPodsByNodeNameFunc func(nodeName string) ([]v1.Pod, error)
type GetPodsByNodeNameFunc func(nodeName string) ([]*v1.Pod, error)

// NoExecuteTaintManager listens to Taint/Toleration changes and is responsible for removing Pods
// from Nodes tainted with NoExecute Taints.
Expand Down Expand Up @@ -464,8 +464,7 @@ func (tc *NoExecuteTaintManager) handleNodeUpdate(nodeUpdate nodeUpdateItem) {
}

now := time.Now()
for i := range pods {
pod := &pods[i]
for _, pod := range pods {
podNamespacedName := types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}
tc.processPodOnNode(podNamespacedName, node.Name, pod.Spec.Tolerations, taints, now)
}
Expand Down
10 changes: 7 additions & 3 deletions pkg/controller/nodelifecycle/scheduler/taint_manager_test.go
Expand Up @@ -42,16 +42,20 @@ func getPodFromClientset(clientset *fake.Clientset) GetPodFunc {
}

func getPodsAssignedToNode(c *fake.Clientset) GetPodsByNodeNameFunc {
return func(nodeName string) ([]v1.Pod, error) {
return func(nodeName string) ([]*v1.Pod, error) {
selector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName})
pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{
FieldSelector: selector.String(),
LabelSelector: labels.Everything().String(),
})
if err != nil {
return []v1.Pod{}, fmt.Errorf("failed to get Pods assigned to node %v", nodeName)
return []*v1.Pod{}, fmt.Errorf("failed to get Pods assigned to node %v", nodeName)
}
return pods.Items, nil
rPods := make([]*v1.Pod, len(pods.Items))
for i := range pods.Items {
rPods[i] = &pods.Items[i]
}
return rPods, nil
}
}

Expand Down
19 changes: 11 additions & 8 deletions pkg/controller/util/node/controller_utils.go
Expand Up @@ -42,7 +42,7 @@ import (
// DeletePods will delete all pods from master running on given node,
// and return true if any pods were deleted, or were found pending
// deletion.
func DeletePods(kubeClient clientset.Interface, pods []v1.Pod, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore appsv1listers.DaemonSetLister) (bool, error) {
func DeletePods(kubeClient clientset.Interface, pods []*v1.Pod, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore appsv1listers.DaemonSetLister) (bool, error) {
remaining := false
var updateErrList []error

Expand All @@ -51,12 +51,13 @@ func DeletePods(kubeClient clientset.Interface, pods []v1.Pod, recorder record.E
}

for i := range pods {
pod := &pods[i]
// Defensive check, also needed for tests.
if pod.Spec.NodeName != nodeName {
if pods[i].Spec.NodeName != nodeName {
continue
}

// Pod will be modified, so making copy is requiered.
pod := pods[i].DeepCopy()
// Set reason and message in the pod object.
if _, err := SetPodTerminationReason(kubeClient, pod, nodeName); err != nil {
if apierrors.IsConflict(err) {
Expand Down Expand Up @@ -111,26 +112,28 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa

// MarkPodsNotReady updates ready status of given pods running on
// given node from master return true if success
func MarkPodsNotReady(kubeClient clientset.Interface, pods []v1.Pod, nodeName string) error {
func MarkPodsNotReady(kubeClient clientset.Interface, pods []*v1.Pod, nodeName string) error {
klog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)

errMsg := []string{}
for _, pod := range pods {
for i := range pods {
// Defensive check, also needed for tests.
if pod.Spec.NodeName != nodeName {
if pods[i].Spec.NodeName != nodeName {
continue
}

// Pod will be modified, so making copy is requiered.
pod := pods[i].DeepCopy()
for _, cond := range pod.Status.Conditions {
if cond.Type == v1.PodReady {
cond.Status = v1.ConditionFalse
if !utilpod.UpdatePodCondition(&pod.Status, &cond) {
break
}
klog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(&pod)
_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod)
if err != nil {
klog.Warningf("Failed to update status for pod %q: %v", format.Pod(&pod), err)
klog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err)
errMsg = append(errMsg, fmt.Sprintf("%v", err))
}
break
Expand Down

0 comments on commit 2b21f08

Please sign in to comment.