From c458bca6dc25c25d3345b093592167f2bd5e4af9 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Fri, 17 Jul 2020 11:47:40 -0700 Subject: [PATCH] Handle missing c8d task on stop In this case, we are sending a signal to the container (typically this would be SIGKILL or SIGTERM, but could be any signal), but container reports that the process does not exist. At the point this code is happening, dockerd thinks that the container is running, but containerd reports that it is not. Since containerd reports that it is not running, try to collect the exit status of the container from containerd, and mark the container as stopped in dockerd. Repro this problem like so: ``` id=$(docker run -d busybox top) pkill containerd && pkill top docker stop $id ``` Without this change, `docker stop $id` will first try to send SIGTERM, wait for exit, then try SIGKILL. Because the process doesn't exist to begin with, no signal is sent, and so nothing happens. Since we won't receive any event here to process, the container can never be marked as stopped until the daemon is restarted. With the change `docker stop` succeeds immediately (since the process is already stopped) and we mark the container as stopped. We handle the case as if we missed a exit event. There are definitely some other places in the stack that could use some improvement here, but this helps people get out of a sticky situation. With io.containerd.runc.v2, no event is ever recieved by docker because the shim quits trying to send the event. With io.containerd.runtime.v1.linux the TastExit event is sent before dockerd can reconnect to the event stream and we miss the event. No matter what, we shouldn't be reliant on the shim doing the right thing here, nor can we rely on a steady event stream. Signed-off-by: Brian Goff --- daemon/kill.go | 1 + daemon/monitor.go | 143 +++++++++++++++++++++++++--------------------- 2 files changed, 78 insertions(+), 66 deletions(-) diff --git a/daemon/kill.go b/daemon/kill.go index 0fe7412913aa0..d89450feaff6c 100644 --- a/daemon/kill.go +++ b/daemon/kill.go @@ -99,6 +99,7 @@ func (daemon *Daemon) killWithSignal(container *containerpkg.Container, sig int) if errdefs.IsNotFound(err) { unpause = false logrus.WithError(err).WithField("container", container.ID).WithField("action", "kill").Debug("container kill failed because of 'container not found' or 'no such process'") + go daemon.handleContainerExit(container, nil) } else { return errors.Wrapf(err, "Cannot kill container %s", container.ID) } diff --git a/daemon/monitor.go b/daemon/monitor.go index 68c6dbeb8b6bc..04d7212dbc85a 100644 --- a/daemon/monitor.go +++ b/daemon/monitor.go @@ -24,6 +24,82 @@ func (daemon *Daemon) setStateCounter(c *container.Container) { } } +func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontainerdtypes.EventInfo) error { + c.Lock() + + ec, et, err := daemon.containerd.DeleteTask(context.Background(), c.ID) + if err != nil { + logrus.WithError(err).Warnf("failed to delete container %s from containerd", c.ID) + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + c.StreamConfig.Wait(ctx) + cancel() + c.Reset(false) + + exitStatus := container.ExitStatus{ + ExitCode: int(ec), + ExitedAt: et, + } + if e != nil { + exitStatus.ExitCode = int(e.ExitCode) + exitStatus.ExitedAt = e.ExitedAt + exitStatus.OOMKilled = e.OOMKilled + if e.Error != nil { + c.SetError(e.Error) + } + } + + restart, wait, err := c.RestartManager().ShouldRestart(ec, daemon.IsShuttingDown() || c.HasBeenManuallyStopped, time.Since(c.StartedAt)) + if err == nil && restart { + c.RestartCount++ + c.SetRestarting(&exitStatus) + } else { + c.SetStopped(&exitStatus) + defer daemon.autoRemove(c) + } + defer c.Unlock() // needs to be called before autoRemove + + // cancel healthcheck here, they will be automatically + // restarted if/when the container is started again + daemon.stopHealthchecks(c) + attributes := map[string]string{ + "exitCode": strconv.Itoa(int(ec)), + } + daemon.LogContainerEventWithAttributes(c, "die", attributes) + daemon.Cleanup(c) + daemon.setStateCounter(c) + cpErr := c.CheckpointTo(daemon.containersReplica) + + if err == nil && restart { + go func() { + err := <-wait + if err == nil { + // daemon.netController is initialized when daemon is restoring containers. + // But containerStart will use daemon.netController segment. + // So to avoid panic at startup process, here must wait util daemon restore done. + daemon.waitForStartupDone() + if err = daemon.containerStart(c, "", "", false); err != nil { + logrus.Debugf("failed to restart container: %+v", err) + } + } + if err != nil { + c.Lock() + c.SetStopped(&exitStatus) + daemon.setStateCounter(c) + c.CheckpointTo(daemon.containersReplica) + c.Unlock() + defer daemon.autoRemove(c) + if err != restartmanager.ErrRestartCanceled { + logrus.Errorf("restartmanger wait error: %+v", err) + } + } + }() + } + + return cpErr +} + // ProcessEvent is called by libcontainerd whenever an event occurs func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) error { c, err := daemon.GetContainer(id) @@ -48,72 +124,7 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei daemon.LogContainerEvent(c, "oom") case libcontainerdtypes.EventExit: if int(ei.Pid) == c.Pid { - c.Lock() - _, _, err := daemon.containerd.DeleteTask(context.Background(), c.ID) - if err != nil { - logrus.WithError(err).Warnf("failed to delete container %s from containerd", c.ID) - } - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - c.StreamConfig.Wait(ctx) - cancel() - c.Reset(false) - - exitStatus := container.ExitStatus{ - ExitCode: int(ei.ExitCode), - ExitedAt: ei.ExitedAt, - OOMKilled: ei.OOMKilled, - } - restart, wait, err := c.RestartManager().ShouldRestart(ei.ExitCode, daemon.IsShuttingDown() || c.HasBeenManuallyStopped, time.Since(c.StartedAt)) - if err == nil && restart { - c.RestartCount++ - c.SetRestarting(&exitStatus) - } else { - if ei.Error != nil { - c.SetError(ei.Error) - } - c.SetStopped(&exitStatus) - defer daemon.autoRemove(c) - } - defer c.Unlock() // needs to be called before autoRemove - - // cancel healthcheck here, they will be automatically - // restarted if/when the container is started again - daemon.stopHealthchecks(c) - attributes := map[string]string{ - "exitCode": strconv.Itoa(int(ei.ExitCode)), - } - daemon.LogContainerEventWithAttributes(c, "die", attributes) - daemon.Cleanup(c) - daemon.setStateCounter(c) - cpErr := c.CheckpointTo(daemon.containersReplica) - - if err == nil && restart { - go func() { - err := <-wait - if err == nil { - // daemon.netController is initialized when daemon is restoring containers. - // But containerStart will use daemon.netController segment. - // So to avoid panic at startup process, here must wait util daemon restore done. - daemon.waitForStartupDone() - if err = daemon.containerStart(c, "", "", false); err != nil { - logrus.Debugf("failed to restart container: %+v", err) - } - } - if err != nil { - c.Lock() - c.SetStopped(&exitStatus) - daemon.setStateCounter(c) - c.CheckpointTo(daemon.containersReplica) - c.Unlock() - defer daemon.autoRemove(c) - if err != restartmanager.ErrRestartCanceled { - logrus.Errorf("restartmanger wait error: %+v", err) - } - } - }() - } - - return cpErr + return daemon.handleContainerExit(c, &ei) } exitCode := 127