Skip to content

Commit

Permalink
docker: stop network pause container of lost alloc after node restart
Browse files Browse the repository at this point in the history
This PR fixes a bug where the docker network pause container would not be
stopped and removed in the case where a node is restarted, the alloc is
moved to another node, the node comes back up. See the issue below for
full repro conditions.

Basically in the DestroyNetwork PostRun hook we would depend on the
NetworkIsolationSpec field not being nil - which is only the case
if the Client stays alive all the way from network creation to network
teardown. If the node is rebooted we lose that state and previously
would not be able to find the pause container to remove. Now, we manually
find the pause container by scanning them and looking for the associated
allocID.

Fixes #17299
  • Loading branch information
shoenig committed Jun 7, 2023
1 parent 84e7cf3 commit 295d5ce
Show file tree
Hide file tree
Showing 6 changed files with 82 additions and 8 deletions.
3 changes: 3 additions & 0 deletions .changelog/17455.txt
@@ -0,0 +1,3 @@
```release-note:bug
docker: Fixed a bug where network pause container would not be removed after node restart
```
12 changes: 7 additions & 5 deletions client/allocrunner/network_hook.go
Expand Up @@ -178,12 +178,14 @@ func (h *networkHook) Prerun() error {
}

func (h *networkHook) Postrun() error {
if h.spec == nil {
return nil
}

if err := h.networkConfigurator.Teardown(context.TODO(), h.alloc, h.spec); err != nil {
h.logger.Error("failed to cleanup network for allocation, resources may have leaked", "alloc", h.alloc.ID, "error", err)
// we need the spec for network teardown
if h.spec != nil {
if err := h.networkConfigurator.Teardown(context.TODO(), h.alloc, h.spec); err != nil {
h.logger.Error("failed to cleanup network for allocation, resources may have leaked", "alloc", h.alloc.ID, "error", err)
}
}

// issue driver destroy regardless if we have a spec (e.g. cleanup pause container)
return h.manager.DestroyNetwork(h.alloc.ID, h.spec)
}
3 changes: 3 additions & 0 deletions client/allocrunner/network_manager_linux.go
Expand Up @@ -152,6 +152,9 @@ func (*defaultNetworkManager) CreateNetwork(allocID string, _ *drivers.NetworkCr
}

func (*defaultNetworkManager) DestroyNetwork(allocID string, spec *drivers.NetworkIsolationSpec) error {
if spec == nil {
return nil
}
return nsutil.UnmountNS(spec.Path)
}

Expand Down
36 changes: 35 additions & 1 deletion drivers/docker/driver.go
Expand Up @@ -35,6 +35,7 @@ import (
"github.com/hashicorp/nomad/plugins/drivers"
pstructs "github.com/hashicorp/nomad/plugins/shared/structs"
"github.com/ryanuber/go-glob"
"golang.org/x/exp/slices"
)

var (
Expand Down Expand Up @@ -760,6 +761,39 @@ func (d *Driver) containerBinds(task *drivers.TaskConfig, driverConfig *TaskConf
return binds, nil
}

func (d *Driver) findPauseContainer(allocID string) (string, error) {

_, dockerClient, err := d.dockerClients()
if err != nil {
return "", err
}

containers, listErr := dockerClient.ListContainers(docker.ListContainersOptions{
Context: d.ctx,
All: false, // running only
Filters: map[string][]string{
"label": {dockerLabelAllocID},
},
})
if listErr != nil {
d.logger.Error("failed to list pause containers for recovery", "error", listErr)
return "", listErr
}

for _, c := range containers {
if !slices.ContainsFunc(c.Names, func(s string) bool {
return strings.HasPrefix(s, "/nomad_init_")
}) {
continue
}
if c.Labels[dockerLabelAllocID] == allocID {
return c.ID, nil
}
}

return "", nil
}

// recoverPauseContainers gets called when we start up the plugin. On client
// restarts we need to rebuild the set of pause containers we are
// tracking. Basically just scan all containers and pull the ID from anything
Expand All @@ -779,7 +813,7 @@ func (d *Driver) recoverPauseContainers(ctx context.Context) {
},
})
if listErr != nil && listErr != ctx.Err() {
d.logger.Error("failed to list pause containers", "error", listErr)
d.logger.Error("failed to list pause containers for recovery", "error", listErr)
return
}

Expand Down
32 changes: 30 additions & 2 deletions drivers/docker/network.go
Expand Up @@ -93,7 +93,30 @@ func (d *Driver) CreateNetwork(allocID string, createSpec *drivers.NetworkCreate
}

func (d *Driver) DestroyNetwork(allocID string, spec *drivers.NetworkIsolationSpec) error {
id := spec.Labels[dockerNetSpecLabelKey]

var (
id string
err error
)

if spec != nil {
// if we have the spec we can just read the container id
id = spec.Labels[dockerNetSpecLabelKey]
} else {
// otherwise we need to scan all the containers and find the pause container
// associated with this allocation - this happens when the client is
// restarted since we do not persist the network spec
id, err = d.findPauseContainer(allocID)
}

if err != nil {
return err
}

if id == "" {
d.logger.Debug("failed to find pause container to cleanup", "alloc_id", allocID)
return nil
}

// no longer tracking this pause container; even if we fail here we should
// let the background reconciliation keep trying
Expand All @@ -104,11 +127,16 @@ func (d *Driver) DestroyNetwork(allocID string, spec *drivers.NetworkIsolationSp
return fmt.Errorf("failed to connect to docker daemon: %s", err)
}

timeout := uint(1) // this is the pause container, just kill it fast
if err := client.StopContainerWithContext(id, timeout, d.ctx); err != nil {
d.logger.Warn("failed to stop pause container", "id", id, "error", err)
}

if err := client.RemoveContainer(docker.RemoveContainerOptions{
Force: true,
ID: id,
}); err != nil {
return err
return fmt.Errorf("failed to remove pause container: %w", err)
}

if d.config.GC.Image {
Expand Down
4 changes: 4 additions & 0 deletions plugins/drivers/client.go
Expand Up @@ -490,6 +490,10 @@ func (d *driverPluginClient) CreateNetwork(allocID string, _ *NetworkCreateReque
}

func (d *driverPluginClient) DestroyNetwork(allocID string, spec *NetworkIsolationSpec) error {
if spec == nil {
return nil
}

req := &proto.DestroyNetworkRequest{
AllocId: allocID,
IsolationSpec: NetworkIsolationSpecToProto(spec),
Expand Down

0 comments on commit 295d5ce

Please sign in to comment.