-
Notifications
You must be signed in to change notification settings - Fork 38.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
pkg/kubelet: recreate infra pod if the pod is changed #4563
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -1047,6 +1047,7 @@ func (kl *Kubelet) syncPod(pod *api.BoundPod, dockerContainers dockertools.Docke | |
glog.Errorf("Unable to get pod with name %q and uid %q info with error(%v)", podFullName, uid, err) | ||
} | ||
|
||
podChanged := false | ||
for _, container := range pod.Spec.Containers { | ||
expectedHash := dockertools.HashContainer(&container) | ||
dockerContainerName := dockertools.BuildDockerName(uid, podFullName, &container) | ||
|
@@ -1055,8 +1056,8 @@ func (kl *Kubelet) syncPod(pod *api.BoundPod, dockerContainers dockertools.Docke | |
glog.V(3).Infof("pod %q container %q exists as %v", podFullName, container.Name, containerID) | ||
|
||
// look for changes in the container. | ||
podChanged := hash != 0 && hash != expectedHash | ||
if !podChanged { | ||
containerChanged := hash != 0 && hash != expectedHash | ||
if !containerChanged { | ||
// TODO: This should probably be separated out into a separate goroutine. | ||
// If the container's liveness probe is unsuccessful, set readiness to false. If liveness is succesful, do a readiness check and set | ||
// readiness accordingly. If the initalDelay since container creation on liveness probe has not passed the probe will return Success. | ||
|
@@ -1088,23 +1089,14 @@ func (kl *Kubelet) syncPod(pod *api.BoundPod, dockerContainers dockertools.Docke | |
} | ||
glog.Infof("pod %q container %q is unhealthy. Container will be killed and re-created.", podFullName, container.Name, live) | ||
} else { | ||
podChanged = true | ||
glog.Infof("pod %q container %q hash changed (%d vs %d). Container will be killed and re-created.", podFullName, container.Name, hash, expectedHash) | ||
} | ||
if err := kl.killContainer(dockerContainer); err != nil { | ||
glog.V(1).Infof("Failed to kill container %q: %v", dockerContainer.ID, err) | ||
continue | ||
} | ||
killedContainers[containerID] = empty{} | ||
|
||
if podChanged { | ||
// Also kill associated pod infra container if the pod changed. | ||
if podInfraContainer, found, _ := dockerContainers.FindPodContainer(podFullName, uid, dockertools.PodInfraContainerName); found { | ||
if err := kl.killContainer(podInfraContainer); err != nil { | ||
glog.V(1).Infof("Failed to kill pod infra container %q: %v", podInfraContainer.ID, err) | ||
continue | ||
} | ||
} | ||
} | ||
} | ||
|
||
// Check RestartPolicy for container | ||
|
@@ -1167,6 +1159,20 @@ func (kl *Kubelet) syncPod(pod *api.BoundPod, dockerContainers dockertools.Docke | |
containersToKeep[containerID] = empty{} | ||
} | ||
|
||
if podChanged { | ||
// Also kill associated pod infra container if the pod changed. | ||
if podInfraContainer, found, _ := dockerContainers.FindPodContainer(podFullName, uid, dockertools.PodInfraContainerName); found { | ||
if err := kl.killContainer(podInfraContainer); err != nil { | ||
glog.V(1).Infof("Failed to kill pod infra container %q: %v", podInfraContainer.ID, err) | ||
} | ||
} | ||
podInfraContainerID, err = kl.createPodInfraContainer(pod) | ||
if err != nil { | ||
glog.Errorf("Failed to recreate pod infra container: %v for pod %q", err, podFullName) | ||
} | ||
containersToKeep[podInfraContainerID] = empty{} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is this necessary? At the beginning of Sync Loop, we should already put PodInfraContainerID to containersToKeep, right? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
I mean they are the same variable name but might have different IDs. (I assume docker will not generate duplicate IDs for containers) |
||
} | ||
|
||
// Kill any containers in this pod which were not identified above (guards against duplicates). | ||
for id, container := range dockerContainers { | ||
curPodFullName, curUUID, _, _ := dockertools.ParseDockerName(container.Names[0]) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why would we restart the pod if just one container changed?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Ugh, there is an escape here - if a container adds/removes/changes a host port, we won't restart the network container. I'm not too worried about that by itself, but are there any other aspects of a single container that might require the pod itself to be recreated?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@thockin previously, we kill the infra pod container whenever there is a container change. The infra container will not be recreated until the next sync happens. This pull request does not change the killing behavior. The only change is to recreate the killed infra container right after we finish the sync.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You're right. That's awful
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The only reason killing infra pod container when a container is changed is that we don't know if network related config is changed, such as port.