/
volumeowner.go
109 lines (89 loc) · 2.68 KB
/
volumeowner.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
package dockerutil
import (
"context"
"fmt"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"go.uber.org/zap"
)
// VolumeOwnerOptions contain the configuration for the SetVolumeOwner function.
type VolumeOwnerOptions struct {
Log *zap.Logger
Client *client.Client
VolumeName string
ImageRef string
TestName string
UidGid string
}
// SetVolumeOwner configures the owner of a volume to match the default user in the supplied image reference.
func SetVolumeOwner(ctx context.Context, opts VolumeOwnerOptions) error {
owner := opts.UidGid
if owner == "" {
owner = GetRootUserString()
}
// Start a one-off container to chmod and chown the volume.
containerName := fmt.Sprintf("ibctest-volumeowner-%d-%s", time.Now().UnixNano(), RandLowerCaseLetterString(5))
if err := ensureBusybox(ctx, opts.Client); err != nil {
return err
}
const mountPath = "/mnt/dockervolume"
cc, err := opts.Client.ContainerCreate(
ctx,
&container.Config{
Image: busyboxRef, // Using busybox image which has chown and chmod.
Entrypoint: []string{"sh", "-c"},
Cmd: []string{
`chown "$2" "$1" && chmod 0700 "$1"`,
"_", // Meaningless arg0 for sh -c with positional args.
mountPath,
owner,
},
// Root user so we have permissions to set ownership and mode.
User: GetRootUserString(),
Labels: map[string]string{CleanupLabel: opts.TestName},
},
&container.HostConfig{
Binds: []string{opts.VolumeName + ":" + mountPath},
AutoRemove: true,
},
nil, // No networking necessary.
nil,
containerName,
)
if err != nil {
return fmt.Errorf("creating container: %w", err)
}
autoRemoved := false
defer func() {
if autoRemoved {
// No need to attempt removing the container if we successfully started and waited for it to complete.
return
}
if err := opts.Client.ContainerRemove(ctx, cc.ID, types.ContainerRemoveOptions{
Force: true,
}); err != nil {
opts.Log.Warn("Failed to remove volume-owner container", zap.String("container_id", cc.ID), zap.Error(err))
}
}()
if err := opts.Client.ContainerStart(ctx, cc.ID, types.ContainerStartOptions{}); err != nil {
return fmt.Errorf("starting volume-owner container: %w", err)
}
waitCh, errCh := opts.Client.ContainerWait(ctx, cc.ID, container.WaitConditionNotRunning)
select {
case <-ctx.Done():
return ctx.Err()
case err := <-errCh:
return err
case res := <-waitCh:
autoRemoved = true
if res.Error != nil {
return fmt.Errorf("waiting for volume-owner container: %s", res.Error.Message)
}
if res.StatusCode != 0 {
return fmt.Errorf("configuring volume exited %d", res.StatusCode)
}
}
return nil
}