forked from moby/moby
-
Notifications
You must be signed in to change notification settings - Fork 0
/
update.go
94 lines (77 loc) · 2.73 KB
/
update.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
package daemon
import (
"context"
"fmt"
"github.com/docker/docker/api/types/container"
"github.com/pkg/errors"
)
// ContainerUpdate updates configuration of the container
func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) {
var warnings []string
c, err := daemon.GetContainer(name)
if err != nil {
return container.ContainerUpdateOKBody{Warnings: warnings}, err
}
warnings, err = daemon.verifyContainerSettings(c.OS, hostConfig, nil, true)
if err != nil {
return container.ContainerUpdateOKBody{Warnings: warnings}, validationError{err}
}
if err := daemon.update(name, hostConfig); err != nil {
return container.ContainerUpdateOKBody{Warnings: warnings}, err
}
return container.ContainerUpdateOKBody{Warnings: warnings}, nil
}
func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error {
if hostConfig == nil {
return nil
}
container, err := daemon.GetContainer(name)
if err != nil {
return err
}
restoreConfig := false
backupHostConfig := *container.HostConfig
defer func() {
if restoreConfig {
container.Lock()
container.HostConfig = &backupHostConfig
container.CheckpointTo(daemon.containersReplica)
container.Unlock()
}
}()
if container.RemovalInProgress || container.Dead {
return errCannotUpdate(container.ID, fmt.Errorf("container is marked for removal and cannot be \"update\""))
}
container.Lock()
if err := container.UpdateContainer(hostConfig); err != nil {
restoreConfig = true
container.Unlock()
return errCannotUpdate(container.ID, err)
}
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
restoreConfig = true
container.Unlock()
return errCannotUpdate(container.ID, err)
}
container.Unlock()
// if Restart Policy changed, we need to update container monitor
if hostConfig.RestartPolicy.Name != "" {
container.UpdateMonitor(hostConfig.RestartPolicy)
}
// If container is not running, update hostConfig struct is enough,
// resources will be updated when the container is started again.
// If container is running (including paused), we need to update configs
// to the real world.
if container.IsRunning() && !container.IsRestarting() {
if err := daemon.containerd.UpdateResources(context.Background(), container.ID, toContainerdResources(hostConfig.Resources)); err != nil {
restoreConfig = true
// TODO: it would be nice if containerd responded with better errors here so we can classify this better.
return errCannotUpdate(container.ID, systemError{err})
}
}
daemon.LogContainerEvent(container, "update")
return nil
}
func errCannotUpdate(containerID string, err error) error {
return errors.Wrap(err, "Cannot update container "+containerID)
}