Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

daemoncontroller.go:format for #45574

Merged
merged 1 commit into from
May 16, 2017
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
22 changes: 8 additions & 14 deletions pkg/controller/daemon/daemoncontroller.go
Original file line number Diff line number Diff line change
Expand Up @@ -414,8 +414,7 @@ func (dsc *DaemonSetsController) addNode(obj interface{}) {
return
}
node := obj.(*v1.Node)
for i := range dsList {
ds := dsList[i]
for _, ds := range dsList {
_, shouldSchedule, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
if err != nil {
continue
Expand All @@ -439,8 +438,7 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
return
}
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
for i := range dsList {
ds := dsList[i]
for _, ds := range dsList {
_, oldShouldSchedule, oldShouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(oldNode, ds)
if err != nil {
continue
Expand Down Expand Up @@ -538,8 +536,7 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
}
var nodesNeedingDaemonPods, podsToDelete []string
var failedPodsObserved int
for i := range nodeList {
node := nodeList[i]
for _, node := range nodeList {
_, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(node, ds)
if err != nil {
continue
Expand All @@ -555,8 +552,7 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
// If a daemon pod failed, delete it
// If there's no daemon pods left on this node, we will create it in the next sync loop
var daemonPodsRunning []*v1.Pod
for i := range daemonPods {
pod := daemonPods[i]
for _, pod := range daemonPods {
if pod.Status.Phase == v1.PodFailed {
msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, node.Name, pod.Name)
glog.V(2).Infof(msg)
Expand All @@ -578,8 +574,8 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
}
case !shouldContinueRunning && exists:
// If daemon pod isn't supposed to run on node, but it is, delete all daemon pods on node.
for i := range daemonPods {
podsToDelete = append(podsToDelete, daemonPods[i].Name)
for _, pod := range daemonPods {
podsToDelete = append(podsToDelete, pod.Name)
}
}
}
Expand Down Expand Up @@ -716,8 +712,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
}

var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable int
for i := range nodeList {
node := nodeList[i]
for _, node := range nodeList {
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
if err != nil {
return err
Expand Down Expand Up @@ -881,8 +876,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
if err != nil {
return false, false, false, err
}
for i := range podList {
pod := podList[i]
for _, pod := range podList {
if pod.Spec.NodeName != node.Name {
continue
}
Expand Down