diff --git a/docs/reference/buildx_create.md b/docs/reference/buildx_create.md
index 4b5abb54bca..1ff87d04088 100644
--- a/docs/reference/buildx_create.md
+++ b/docs/reference/buildx_create.md
@@ -84,6 +84,11 @@ Specifies the configuration file for the buildkitd daemon to use. The configurat
can be overridden by [`--buildkitd-flags`](#buildkitd-flags).
See an [example buildkitd configuration file](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md).
+Note that if you create a `docker-container` builder and have specified
+certificates for registries in the `buildkitd.toml` configuration, the files
+will be copied into the container under `/etc/buildkit/certs` and configuration
+will be updated to reflect that.
+
### Set the builder driver to use (--driver)
```
diff --git a/driver/docker-container/driver.go b/driver/docker-container/driver.go
index eec5d482387..80f745ecd09 100644
--- a/driver/docker-container/driver.go
+++ b/driver/docker-container/driver.go
@@ -1,7 +1,6 @@
package docker
import (
- "archive/tar"
"bytes"
"context"
"io"
@@ -12,6 +11,7 @@ import (
"github.com/docker/buildx/driver"
"github.com/docker/buildx/driver/bkimage"
+ "github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/imagetools"
"github.com/docker/buildx/util/progress"
"github.com/docker/docker/api/types"
@@ -20,6 +20,8 @@ import (
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
dockerclient "github.com/docker/docker/client"
+ dockerarchive "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/stdcopy"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/util/tracing/detect"
@@ -28,12 +30,6 @@ import (
const (
volumeStateSuffix = "_state"
-
- // containerStateDir is the location where buildkitd inside the container
- // stores its state. The container driver creates a Linux container, so
- // this should match the location for Linux, as defined in:
- // https://github.com/moby/buildkit/blob/v0.9.0/util/appdefaults/appdefaults_unix.go#L11-L15
- containerBuildKitRootDir = "/var/lib/buildkit"
)
type Driver struct {
@@ -119,7 +115,7 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
{
Type: mount.TypeVolume,
Source: d.Name + volumeStateSuffix,
- Target: containerBuildKitRootDir,
+ Target: confutil.DefaultBuildKitStateDir,
},
},
}
@@ -139,11 +135,12 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
return err
}
if f := d.InitConfig.ConfigFile; f != "" {
- buf, err := readFileToTar(f)
+ configFiles, err := confutil.LoadConfigFiles(f)
if err != nil {
return err
}
- if err := d.DockerAPI.CopyToContainer(ctx, d.Name, "/", buf, dockertypes.CopyToContainerOptions{}); err != nil {
+ defer os.RemoveAll(configFiles)
+ if err := d.copyToContainer(ctx, configFiles, "/"); err != nil {
return err
}
}
@@ -205,6 +202,17 @@ func (d *Driver) copyLogs(ctx context.Context, l progress.SubLogger) error {
return rc.Close()
}
+func (d *Driver) copyToContainer(ctx context.Context, srcPath string, dstDir string) error {
+ srcArchive, err := dockerarchive.TarWithOptions(srcPath, &dockerarchive.TarOptions{
+ ChownOpts: &idtools.Identity{UID: 0, GID: 0},
+ })
+ if err != nil {
+ return err
+ }
+ defer srcArchive.Close()
+ return d.DockerAPI.CopyToContainer(ctx, d.Name, dstDir, srcArchive, dockertypes.CopyToContainerOptions{})
+}
+
func (d *Driver) exec(ctx context.Context, cmd []string) (string, net.Conn, error) {
execConfig := types.ExecConfig{
Cmd: cmd,
@@ -366,29 +374,6 @@ func (d *demux) Read(dt []byte) (int, error) {
return d.Reader.Read(dt)
}
-func readFileToTar(fn string) (*bytes.Buffer, error) {
- buf := bytes.NewBuffer(nil)
- tw := tar.NewWriter(buf)
- dt, err := ioutil.ReadFile(fn)
- if err != nil {
- return nil, err
- }
- if err := tw.WriteHeader(&tar.Header{
- Name: "/etc/buildkit/buildkitd.toml",
- Size: int64(len(dt)),
- Mode: 0644,
- }); err != nil {
- return nil, err
- }
- if _, err := tw.Write(dt); err != nil {
- return nil, err
- }
- if err := tw.Close(); err != nil {
- return nil, err
- }
- return buf, nil
-}
-
type logWriter struct {
logger progress.SubLogger
stream int
diff --git a/go.mod b/go.mod
index 7302159c088..304e0238de7 100644
--- a/go.mod
+++ b/go.mod
@@ -34,6 +34,7 @@ require (
github.com/moby/buildkit v0.9.1-0.20211019185819-8778943ac3da
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283
+ github.com/pelletier/go-toml v1.9.4
github.com/pkg/errors v0.9.1
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
github.com/sirupsen/logrus v1.8.1
diff --git a/util/confutil/config.go b/util/confutil/config.go
new file mode 100644
index 00000000000..8aa607be5ea
--- /dev/null
+++ b/util/confutil/config.go
@@ -0,0 +1,25 @@
+package confutil
+
+import (
+ "os"
+
+ "github.com/pelletier/go-toml"
+ "github.com/pkg/errors"
+)
+
+// loadConfigTree loads BuildKit config toml tree
+func loadConfigTree(fp string) (*toml.Tree, error) {
+ f, err := os.Open(fp)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return nil, nil
+ }
+ return nil, errors.Wrapf(err, "failed to load config from %s", fp)
+ }
+ defer f.Close()
+ t, err := toml.LoadReader(f)
+ if err != nil {
+ return t, errors.Wrap(err, "failed to parse config")
+ }
+ return t, nil
+}
diff --git a/util/confutil/container.go b/util/confutil/container.go
new file mode 100644
index 00000000000..9b9447e01e5
--- /dev/null
+++ b/util/confutil/container.go
@@ -0,0 +1,144 @@
+package confutil
+
+import (
+ "io"
+ "os"
+ "path"
+
+ "github.com/pelletier/go-toml"
+ "github.com/pkg/errors"
+)
+
+const (
+ // DefaultBuildKitStateDir and DefaultBuildKitConfigDir are the location
+ // where buildkitd inside the container stores its state. Some drivers
+ // create a Linux container, so this should match the location for Linux,
+ // as defined in: https://github.com/moby/buildkit/blob/v0.9.0/util/appdefaults/appdefaults_unix.go#L11-L15
+ DefaultBuildKitStateDir = "/var/lib/buildkit"
+ DefaultBuildKitConfigDir = "/etc/buildkit"
+)
+
+// LoadConfigFiles creates a temp directory with BuildKit config and
+// registry certificates ready to be copied to a container.
+func LoadConfigFiles(bkconfig string) (string, error) {
+ if _, err := os.Stat(bkconfig); errors.Is(err, os.ErrNotExist) {
+ return "", errors.Wrapf(err, "buildkit configuration file not found: %s", bkconfig)
+ } else if err != nil {
+ return "", errors.Wrapf(err, "invalid buildkit configuration file: %s", bkconfig)
+ }
+
+ // Load config tree
+ btoml, err := loadConfigTree(bkconfig)
+ if err != nil {
+ return "", err
+ }
+
+ // Temp dir that will be copied to the container
+ tmpDir, err := os.MkdirTemp("", "buildkitd-config")
+ if err != nil {
+ return "", err
+ }
+
+ // Create BuildKit config folders
+ tmpBuildKitConfigDir := path.Join(tmpDir, DefaultBuildKitConfigDir)
+ tmpBuildKitCertsDir := path.Join(tmpBuildKitConfigDir, "certs")
+ if err := os.MkdirAll(tmpBuildKitCertsDir, 0700); err != nil {
+ return "", err
+ }
+
+ // Iterate through registry config to copy certs and update
+ // BuildKit config with the underlying certs' path in the container.
+ //
+ // The following BuildKit config:
+ //
+ // [registry."myregistry.io"]
+ // ca=["/etc/config/myca.pem"]
+ // [[registry."myregistry.io".keypair]]
+ // key="/etc/config/key.pem"
+ // cert="/etc/config/cert.pem"
+ //
+ // will be translated in the container as:
+ //
+ // [registry."myregistry.io"]
+ // ca=["/etc/buildkit/certs/myregistry.io/myca.pem"]
+ // [[registry."myregistry.io".keypair]]
+ // key="/etc/buildkit/certs/myregistry.io/key.pem"
+ // cert="/etc/buildkit/certs/myregistry.io/cert.pem"
+ if btoml.Has("registry") {
+ for regName := range btoml.GetArray("registry").(*toml.Tree).Values() {
+ regConf := btoml.GetPath([]string{"registry", regName}).(*toml.Tree)
+ if regConf == nil {
+ continue
+ }
+ regCertsDir := path.Join(tmpBuildKitCertsDir, regName)
+ if err := os.Mkdir(regCertsDir, 0755); err != nil {
+ return "", err
+ }
+ if regConf.Has("ca") {
+ regCAs := regConf.GetArray("ca").([]string)
+ if len(regCAs) > 0 {
+ var cas []string
+ for _, ca := range regCAs {
+ cas = append(cas, path.Join(DefaultBuildKitConfigDir, "certs", regName, path.Base(ca)))
+ if err := copyfile(ca, path.Join(regCertsDir, path.Base(ca))); err != nil {
+ return "", err
+ }
+ }
+ regConf.Set("ca", cas)
+ }
+ }
+ if regConf.Has("keypair") {
+ regKeyPairs := regConf.GetArray("keypair").([]*toml.Tree)
+ if len(regKeyPairs) == 0 {
+ continue
+ }
+ for _, kp := range regKeyPairs {
+ if kp == nil {
+ continue
+ }
+ key := kp.Get("key").(string)
+ if len(key) > 0 {
+ kp.Set("key", path.Join(DefaultBuildKitConfigDir, "certs", regName, path.Base(key)))
+ if err := copyfile(key, path.Join(regCertsDir, path.Base(key))); err != nil {
+ return "", err
+ }
+ }
+ cert := kp.Get("cert").(string)
+ if len(cert) > 0 {
+ kp.Set("cert", path.Join(DefaultBuildKitConfigDir, "certs", regName, path.Base(cert)))
+ if err := copyfile(cert, path.Join(regCertsDir, path.Base(cert))); err != nil {
+ return "", err
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Write BuildKit config
+ bkfile, err := os.OpenFile(path.Join(tmpBuildKitConfigDir, "buildkitd.toml"), os.O_CREATE|os.O_WRONLY, 0600)
+ if err != nil {
+ return "", err
+ }
+ _, err = btoml.WriteTo(bkfile)
+ if err != nil {
+ return "", err
+ }
+
+ return tmpDir, nil
+}
+
+func copyfile(src string, dst string) error {
+ sf, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer sf.Close()
+ df, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY, 0600)
+ if err != nil {
+ return err
+ }
+ defer df.Close()
+ _, err = io.Copy(df, sf)
+ return err
+}
diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go
new file mode 100644
index 00000000000..6656465efbc
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go
@@ -0,0 +1,62 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package userns
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "sync"
+)
+
+var (
+ inUserNS bool
+ nsOnce sync.Once
+)
+
+// RunningInUserNS detects whether we are currently running in a user namespace.
+// Originally copied from github.com/lxc/lxd/shared/util.go
+func RunningInUserNS() bool {
+ nsOnce.Do(func() {
+ file, err := os.Open("/proc/self/uid_map")
+ if err != nil {
+ // This kernel-provided file only exists if user namespaces are supported
+ return
+ }
+ defer file.Close()
+
+ buf := bufio.NewReader(file)
+ l, _, err := buf.ReadLine()
+ if err != nil {
+ return
+ }
+
+ line := string(l)
+ var a, b, c int64
+ fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
+
+ /*
+ * We assume we are in the initial user namespace if we have a full
+ * range - 4294967295 uids starting at uid 0.
+ */
+ if a == 0 && b == 0 && c == 4294967295 {
+ return
+ }
+ inUserNS = true
+ })
+ return inUserNS
+}
diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go
new file mode 100644
index 00000000000..aab756fd2a6
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go
@@ -0,0 +1,25 @@
+// +build !linux
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package userns
+
+// RunningInUserNS is a stub for non-Linux systems
+// Always returns false
+func RunningInUserNS() bool {
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md
new file mode 100644
index 00000000000..7307d9694f6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/README.md
@@ -0,0 +1 @@
+This code provides helper functions for dealing with archive files.
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go
new file mode 100644
index 00000000000..a8a23387326
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive.go
@@ -0,0 +1,1334 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "bufio"
+ "bytes"
+ "compress/bzip2"
+ "compress/gzip"
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/docker/docker/pkg/fileutils"
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+ exec "golang.org/x/sys/execabs"
+)
+
+type (
+ // Compression is the state represents if compressed or not.
+ Compression int
+ // WhiteoutFormat is the format of whiteouts unpacked
+ WhiteoutFormat int
+
+ // TarOptions wraps the tar options.
+ TarOptions struct {
+ IncludeFiles []string
+ ExcludePatterns []string
+ Compression Compression
+ NoLchown bool
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
+ ChownOpts *idtools.Identity
+ IncludeSourceDir bool
+ // WhiteoutFormat is the expected on disk format for whiteout files.
+ // This format will be converted to the standard format on pack
+ // and from the standard format on unpack.
+ WhiteoutFormat WhiteoutFormat
+ // When unpacking, specifies whether overwriting a directory with a
+ // non-directory is allowed and vice versa.
+ NoOverwriteDirNonDir bool
+ // For each include when creating an archive, the included name will be
+ // replaced with the matching name from this map.
+ RebaseNames map[string]string
+ InUserNS bool
+ }
+)
+
+// Archiver implements the Archiver interface and allows the reuse of most utility functions of
+// this package with a pluggable Untar function. Also, to facilitate the passing of specific id
+// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
+type Archiver struct {
+ Untar func(io.Reader, string, *TarOptions) error
+ IDMapping *idtools.IdentityMapping
+}
+
+// NewDefaultArchiver returns a new Archiver without any IdentityMapping
+func NewDefaultArchiver() *Archiver {
+ return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}}
+}
+
+// breakoutError is used to differentiate errors related to breaking out
+// When testing archive breakout in the unit tests, this error is expected
+// in order for the test to pass.
+type breakoutError error
+
+const (
+ // Uncompressed represents the uncompressed.
+ Uncompressed Compression = iota
+ // Bzip2 is bzip2 compression algorithm.
+ Bzip2
+ // Gzip is gzip compression algorithm.
+ Gzip
+ // Xz is xz compression algorithm.
+ Xz
+)
+
+const (
+ // AUFSWhiteoutFormat is the default format for whiteouts
+ AUFSWhiteoutFormat WhiteoutFormat = iota
+ // OverlayWhiteoutFormat formats whiteout according to the overlay
+ // standard.
+ OverlayWhiteoutFormat
+)
+
+const (
+ modeISDIR = 040000 // Directory
+ modeISFIFO = 010000 // FIFO
+ modeISREG = 0100000 // Regular file
+ modeISLNK = 0120000 // Symbolic link
+ modeISBLK = 060000 // Block special file
+ modeISCHR = 020000 // Character special file
+ modeISSOCK = 0140000 // Socket
+)
+
+// IsArchivePath checks if the (possibly compressed) file at the given path
+// starts with a tar file header.
+func IsArchivePath(path string) bool {
+ file, err := os.Open(path)
+ if err != nil {
+ return false
+ }
+ defer file.Close()
+ rdr, err := DecompressStream(file)
+ if err != nil {
+ return false
+ }
+ defer rdr.Close()
+ r := tar.NewReader(rdr)
+ _, err = r.Next()
+ return err == nil
+}
+
+// DetectCompression detects the compression algorithm of the source.
+func DetectCompression(source []byte) Compression {
+ for compression, m := range map[Compression][]byte{
+ Bzip2: {0x42, 0x5A, 0x68},
+ Gzip: {0x1F, 0x8B, 0x08},
+ Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
+ } {
+ if bytes.HasPrefix(source, m) {
+ return compression
+ }
+ }
+ return Uncompressed
+}
+
+func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) {
+ args := []string{"xz", "-d", "-c", "-q"}
+
+ return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive)
+}
+
+func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
+ if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" {
+ noPigz, err := strconv.ParseBool(noPigzEnv)
+ if err != nil {
+ logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var")
+ }
+ if noPigz {
+ logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv)
+ return gzip.NewReader(buf)
+ }
+ }
+
+ unpigzPath, err := exec.LookPath("unpigz")
+ if err != nil {
+ logrus.Debugf("unpigz binary not found, falling back to go gzip library")
+ return gzip.NewReader(buf)
+ }
+
+ logrus.Debugf("Using %s to decompress", unpigzPath)
+
+ return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf)
+}
+
+func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser {
+ return ioutils.NewReadCloserWrapper(readBuf, func() error {
+ cancel()
+ return readBuf.Close()
+ })
+}
+
+// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
+func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
+ p := pools.BufioReader32KPool
+ buf := p.Get(archive)
+ bs, err := buf.Peek(10)
+ if err != nil && err != io.EOF {
+ // Note: we'll ignore any io.EOF error because there are some odd
+ // cases where the layer.tar file will be empty (zero bytes) and
+ // that results in an io.EOF from the Peek() call. So, in those
+ // cases we'll just treat it as a non-compressed stream and
+ // that means just create an empty layer.
+ // See Issue 18170
+ return nil, err
+ }
+
+ compression := DetectCompression(bs)
+ switch compression {
+ case Uncompressed:
+ readBufWrapper := p.NewReadCloserWrapper(buf, buf)
+ return readBufWrapper, nil
+ case Gzip:
+ ctx, cancel := context.WithCancel(context.Background())
+
+ gzReader, err := gzDecompress(ctx, buf)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
+ return wrapReadCloser(readBufWrapper, cancel), nil
+ case Bzip2:
+ bz2Reader := bzip2.NewReader(buf)
+ readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
+ return readBufWrapper, nil
+ case Xz:
+ ctx, cancel := context.WithCancel(context.Background())
+
+ xzReader, err := xzDecompress(ctx, buf)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
+ return wrapReadCloser(readBufWrapper, cancel), nil
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// CompressStream compresses the dest with specified compression algorithm.
+func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
+ p := pools.BufioWriter32KPool
+ buf := p.Get(dest)
+ switch compression {
+ case Uncompressed:
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
+ return writeBufWrapper, nil
+ case Gzip:
+ gzWriter := gzip.NewWriter(dest)
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
+ return writeBufWrapper, nil
+ case Bzip2, Xz:
+ // archive/bzip2 does not support writing, and there is no xz support at all
+ // However, this is not a problem as docker only currently generates gzipped tars
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to
+// modify the contents or header of an entry in the archive. If the file already
+// exists in the archive the TarModifierFunc will be called with the Header and
+// a reader which will return the files content. If the file does not exist both
+// header and content will be nil.
+type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)
+
+// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the
+// tar stream are modified if they match any of the keys in mods.
+func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser {
+ pipeReader, pipeWriter := io.Pipe()
+
+ go func() {
+ tarReader := tar.NewReader(inputTarStream)
+ tarWriter := tar.NewWriter(pipeWriter)
+ defer inputTarStream.Close()
+ defer tarWriter.Close()
+
+ modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error {
+ header, data, err := modifier(name, original, tarReader)
+ switch {
+ case err != nil:
+ return err
+ case header == nil:
+ return nil
+ }
+
+ if header.Name == "" {
+ header.Name = name
+ }
+ header.Size = int64(len(data))
+ if err := tarWriter.WriteHeader(header); err != nil {
+ return err
+ }
+ if len(data) != 0 {
+ if _, err := tarWriter.Write(data); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ var err error
+ var originalHeader *tar.Header
+ for {
+ originalHeader, err = tarReader.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+
+ modifier, ok := mods[originalHeader.Name]
+ if !ok {
+ // No modifiers for this file, copy the header and data
+ if err := tarWriter.WriteHeader(originalHeader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ if _, err := pools.Copy(tarWriter, tarReader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ continue
+ }
+ delete(mods, originalHeader.Name)
+
+ if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ }
+
+ // Apply the modifiers that haven't matched any files in the archive
+ for name, modifier := range mods {
+ if err := modify(name, nil, modifier, nil); err != nil {
+ pipeWriter.CloseWithError(err)
+ return
+ }
+ }
+
+ pipeWriter.Close()
+
+ }()
+ return pipeReader
+}
+
+// Extension returns the extension of a file that uses the specified compression algorithm.
+func (compression *Compression) Extension() string {
+ switch *compression {
+ case Uncompressed:
+ return "tar"
+ case Bzip2:
+ return "tar.bz2"
+ case Gzip:
+ return "tar.gz"
+ case Xz:
+ return "tar.xz"
+ }
+ return ""
+}
+
+// FileInfoHeader creates a populated Header from fi.
+// Compared to archive pkg this function fills in more information.
+// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
+// which have been deleted since Go 1.9 archive/tar.
+func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
+ hdr, err := tar.FileInfoHeader(fi, link)
+ if err != nil {
+ return nil, err
+ }
+ hdr.Format = tar.FormatPAX
+ hdr.ModTime = hdr.ModTime.Truncate(time.Second)
+ hdr.AccessTime = time.Time{}
+ hdr.ChangeTime = time.Time{}
+ hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
+ hdr.Name = canonicalTarName(name, fi.IsDir())
+ if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
+ return nil, err
+ }
+ return hdr, nil
+}
+
+// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar
+// https://github.com/golang/go/commit/66b5a2f
+func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
+ fm := fi.Mode()
+ switch {
+ case fm.IsRegular():
+ mode |= modeISREG
+ case fi.IsDir():
+ mode |= modeISDIR
+ case fm&os.ModeSymlink != 0:
+ mode |= modeISLNK
+ case fm&os.ModeDevice != 0:
+ if fm&os.ModeCharDevice != 0 {
+ mode |= modeISCHR
+ } else {
+ mode |= modeISBLK
+ }
+ case fm&os.ModeNamedPipe != 0:
+ mode |= modeISFIFO
+ case fm&os.ModeSocket != 0:
+ mode |= modeISSOCK
+ }
+ return mode
+}
+
+// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
+// to a tar header
+func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
+ const (
+ // Values based on linux/include/uapi/linux/capability.h
+ xattrCapsSz2 = 20
+ versionOffset = 3
+ vfsCapRevision2 = 2
+ vfsCapRevision3 = 3
+ )
+ capability, _ := system.Lgetxattr(path, "security.capability")
+ if capability != nil {
+ length := len(capability)
+ if capability[versionOffset] == vfsCapRevision3 {
+ // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no
+ // sense outside the user namespace the archive is built in.
+ capability[versionOffset] = vfsCapRevision2
+ length = xattrCapsSz2
+ }
+ hdr.Xattrs = make(map[string]string)
+ hdr.Xattrs["security.capability"] = string(capability[:length])
+ }
+ return nil
+}
+
+type tarWhiteoutConverter interface {
+ ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
+ ConvertRead(*tar.Header, string) (bool, error)
+}
+
+type tarAppender struct {
+ TarWriter *tar.Writer
+ Buffer *bufio.Writer
+
+ // for hardlink mapping
+ SeenFiles map[uint64]string
+ IdentityMapping *idtools.IdentityMapping
+ ChownOpts *idtools.Identity
+
+ // For packing and unpacking whiteout files in the
+ // non standard format. The whiteout files defined
+ // by the AUFS standard are used as the tar whiteout
+ // standard.
+ WhiteoutConverter tarWhiteoutConverter
+}
+
+func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender {
+ return &tarAppender{
+ SeenFiles: make(map[uint64]string),
+ TarWriter: tar.NewWriter(writer),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ IdentityMapping: idMapping,
+ ChownOpts: chownOpts,
+ }
+}
+
+// canonicalTarName provides a platform-independent and consistent posix-style
+// path for files and directories to be archived regardless of the platform.
+func canonicalTarName(name string, isDir bool) string {
+ name = CanonicalTarNameForPath(name)
+
+ // suffix with '/' for directories
+ if isDir && !strings.HasSuffix(name, "/") {
+ name += "/"
+ }
+ return name
+}
+
+// addTarFile adds to the tar archive a file from `path` as `name`
+func (ta *tarAppender) addTarFile(path, name string) error {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return err
+ }
+
+ var link string
+ if fi.Mode()&os.ModeSymlink != 0 {
+ var err error
+ link, err = os.Readlink(path)
+ if err != nil {
+ return err
+ }
+ }
+
+ hdr, err := FileInfoHeader(name, fi, link)
+ if err != nil {
+ return err
+ }
+ if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
+ return err
+ }
+
+ // if it's not a directory and has more than 1 link,
+ // it's hard linked, so set the type flag accordingly
+ if !fi.IsDir() && hasHardlinks(fi) {
+ inode, err := getInodeFromStat(fi.Sys())
+ if err != nil {
+ return err
+ }
+ // a link should have a name that it links too
+ // and that linked name should be first in the tar archive
+ if oldpath, ok := ta.SeenFiles[inode]; ok {
+ hdr.Typeflag = tar.TypeLink
+ hdr.Linkname = oldpath
+ hdr.Size = 0 // This Must be here for the writer math to add up!
+ } else {
+ ta.SeenFiles[inode] = name
+ }
+ }
+
+ // check whether the file is overlayfs whiteout
+ // if yes, skip re-mapping container ID mappings.
+ isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0
+
+ // handle re-mapping container ID mappings back to host ID mappings before
+ // writing tar headers/files. We skip whiteout files because they were written
+ // by the kernel and already have proper ownership relative to the host
+ if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() {
+ fileIDPair, err := getFileUIDGID(fi.Sys())
+ if err != nil {
+ return err
+ }
+ hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair)
+ if err != nil {
+ return err
+ }
+ }
+
+ // explicitly override with ChownOpts
+ if ta.ChownOpts != nil {
+ hdr.Uid = ta.ChownOpts.UID
+ hdr.Gid = ta.ChownOpts.GID
+ }
+
+ if ta.WhiteoutConverter != nil {
+ wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
+ if err != nil {
+ return err
+ }
+
+ // If a new whiteout file exists, write original hdr, then
+ // replace hdr with wo to be written after. Whiteouts should
+ // always be written after the original. Note the original
+ // hdr may have been updated to be a whiteout with returning
+ // a whiteout header
+ if wo != nil {
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+ return fmt.Errorf("tar: cannot use whiteout for non-empty file")
+ }
+ hdr = wo
+ }
+ }
+
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+ // We use system.OpenSequential to ensure we use sequential file
+ // access on Windows to avoid depleting the standby list.
+ // On Linux, this equates to a regular os.Open.
+ file, err := system.OpenSequential(path)
+ if err != nil {
+ return err
+ }
+
+ ta.Buffer.Reset(ta.TarWriter)
+ defer ta.Buffer.Reset(nil)
+ _, err = io.Copy(ta.Buffer, file)
+ file.Close()
+ if err != nil {
+ return err
+ }
+ err = ta.Buffer.Flush()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error {
+ // hdr.Mode is in linux format, which we can use for sycalls,
+ // but for os.Foo() calls we need the mode converted to os.FileMode,
+ // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
+ hdrInfo := hdr.FileInfo()
+
+ switch hdr.Typeflag {
+ case tar.TypeDir:
+ // Create directory unless it exists as a directory already.
+ // In that case we just want to merge the two
+ if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+ if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+
+ case tar.TypeReg, tar.TypeRegA:
+ // Source is regular file. We use system.OpenFileSequential to use sequential
+ // file access to avoid depleting the standby list on Windows.
+ // On Linux, this equates to a regular os.OpenFile
+ file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
+ if err != nil {
+ return err
+ }
+ if _, err := io.Copy(file, reader); err != nil {
+ file.Close()
+ return err
+ }
+ file.Close()
+
+ case tar.TypeBlock, tar.TypeChar:
+ if inUserns { // cannot create devices in a userns
+ return nil
+ }
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ return err
+ }
+
+ case tar.TypeFifo:
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ return err
+ }
+
+ case tar.TypeLink:
+ targetPath := filepath.Join(extractDir, hdr.Linkname)
+ // check for hardlink breakout
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
+ }
+ if err := os.Link(targetPath, path); err != nil {
+ return err
+ }
+
+ case tar.TypeSymlink:
+ // path -> hdr.Linkname = targetPath
+ // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
+ targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
+
+ // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
+ // that symlink would first have to be created, which would be caught earlier, at this very check:
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
+ }
+ if err := os.Symlink(hdr.Linkname, path); err != nil {
+ return err
+ }
+
+ case tar.TypeXGlobalHeader:
+ logrus.Debug("PAX Global Extended Headers found and ignored")
+ return nil
+
+ default:
+ return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
+ }
+
+ // Lchown is not supported on Windows.
+ if Lchown && runtime.GOOS != "windows" {
+ if chownOpts == nil {
+ chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}
+ }
+ if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
+ return err
+ }
+ }
+
+ var errors []string
+ for key, value := range hdr.Xattrs {
+ if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
+ if err == syscall.ENOTSUP || err == syscall.EPERM {
+ // We ignore errors here because not all graphdrivers support
+ // xattrs *cough* old versions of AUFS *cough*. However only
+ // ENOTSUP should be emitted in that case, otherwise we still
+ // bail.
+ // EPERM occurs if modifying xattrs is not allowed. This can
+ // happen when running in userns with restrictions (ChromeOS).
+ errors = append(errors, err.Error())
+ continue
+ }
+ return err
+ }
+
+ }
+
+ if len(errors) > 0 {
+ logrus.WithFields(logrus.Fields{
+ "errors": errors,
+ }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
+ }
+
+ // There is no LChmod, so ignore mode for symlink. Also, this
+ // must happen after chown, as that can modify the file mode
+ if err := handleLChmod(hdr, path, hdrInfo); err != nil {
+ return err
+ }
+
+ aTime := hdr.AccessTime
+ if aTime.Before(hdr.ModTime) {
+ // Last access time should never be before last modified time.
+ aTime = hdr.ModTime
+ }
+
+ // system.Chtimes doesn't support a NOFOLLOW flag atm
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+ return err
+ }
+ } else {
+ ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)}
+ if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ return err
+ }
+ }
+ return nil
+}
+
+// Tar creates an archive from the directory at `path`, and returns it as a
+// stream of bytes.
+func Tar(path string, compression Compression) (io.ReadCloser, error) {
+ return TarWithOptions(path, &TarOptions{Compression: compression})
+}
+
+// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
+// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
+func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+
+ // Fix the source path to work with long path names. This is a no-op
+ // on platforms other than Windows.
+ srcPath = fixVolumePathPrefix(srcPath)
+
+ pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
+ if err != nil {
+ return nil, err
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+
+ compressWriter, err := CompressStream(pipeWriter, options.Compression)
+ if err != nil {
+ return nil, err
+ }
+
+ whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS)
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ ta := newTarAppender(
+ idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
+ compressWriter,
+ options.ChownOpts,
+ )
+ ta.WhiteoutConverter = whiteoutConverter
+
+ defer func() {
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Errorf("Can't close tar writer: %s", err)
+ }
+ if err := compressWriter.Close(); err != nil {
+ logrus.Errorf("Can't close compress writer: %s", err)
+ }
+ if err := pipeWriter.Close(); err != nil {
+ logrus.Errorf("Can't close pipe writer: %s", err)
+ }
+ }()
+
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+
+ stat, err := os.Lstat(srcPath)
+ if err != nil {
+ return
+ }
+
+ if !stat.IsDir() {
+ // We can't later join a non-dir with any includes because the
+ // 'walk' will error if "file/." is stat-ed and "file" is not a
+ // directory. So, we must split the source path and use the
+ // basename as the include.
+ if len(options.IncludeFiles) > 0 {
+ logrus.Warn("Tar: Can't archive a file with includes")
+ }
+
+ dir, base := SplitPathDirEntry(srcPath)
+ srcPath = dir
+ options.IncludeFiles = []string{base}
+ }
+
+ if len(options.IncludeFiles) == 0 {
+ options.IncludeFiles = []string{"."}
+ }
+
+ seen := make(map[string]bool)
+
+ for _, include := range options.IncludeFiles {
+ rebaseName := options.RebaseNames[include]
+
+ var (
+ parentMatched []bool
+ parentDirs []string
+ )
+
+ walkRoot := getWalkRoot(srcPath, include)
+ filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
+ if err != nil {
+ logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
+ return nil
+ }
+
+ relFilePath, err := filepath.Rel(srcPath, filePath)
+ if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
+ // Error getting relative path OR we are looking
+ // at the source directory path. Skip in both situations.
+ return nil
+ }
+
+ if options.IncludeSourceDir && include == "." && relFilePath != "." {
+ relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
+ }
+
+ skip := false
+
+ // If "include" is an exact match for the current file
+ // then even if there's an "excludePatterns" pattern that
+ // matches it, don't skip it. IOW, assume an explicit 'include'
+ // is asking for that file no matter what - which is true
+ // for some files, like .dockerignore and Dockerfile (sometimes)
+ if include != relFilePath {
+ for len(parentDirs) != 0 {
+ lastParentDir := parentDirs[len(parentDirs)-1]
+ if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) {
+ break
+ }
+ parentDirs = parentDirs[:len(parentDirs)-1]
+ parentMatched = parentMatched[:len(parentMatched)-1]
+ }
+
+ if len(parentMatched) != 0 {
+ skip, err = pm.MatchesUsingParentResult(relFilePath, parentMatched[len(parentMatched)-1])
+ } else {
+ skip, err = pm.MatchesOrParentMatches(relFilePath)
+ }
+ if err != nil {
+ logrus.Errorf("Error matching %s: %v", relFilePath, err)
+ return err
+ }
+
+ if f.IsDir() {
+ parentDirs = append(parentDirs, relFilePath)
+ parentMatched = append(parentMatched, skip)
+ }
+ }
+
+ if skip {
+ // If we want to skip this file and its a directory
+ // then we should first check to see if there's an
+ // excludes pattern (e.g. !dir/file) that starts with this
+ // dir. If so then we can't skip this dir.
+
+ // Its not a dir then so we can just return/skip.
+ if !f.IsDir() {
+ return nil
+ }
+
+ // No exceptions (!...) in patterns so just skip dir
+ if !pm.Exclusions() {
+ return filepath.SkipDir
+ }
+
+ dirSlash := relFilePath + string(filepath.Separator)
+
+ for _, pat := range pm.Patterns() {
+ if !pat.Exclusion() {
+ continue
+ }
+ if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
+ // found a match - so can't skip this dir
+ return nil
+ }
+ }
+
+ // No matching exclusion dir so just skip dir
+ return filepath.SkipDir
+ }
+
+ if seen[relFilePath] {
+ return nil
+ }
+ seen[relFilePath] = true
+
+ // Rename the base resource.
+ if rebaseName != "" {
+ var replacement string
+ if rebaseName != string(filepath.Separator) {
+ // Special case the root directory to replace with an
+ // empty string instead so that we don't end up with
+ // double slashes in the paths.
+ replacement = rebaseName
+ }
+
+ relFilePath = strings.Replace(relFilePath, include, replacement, 1)
+ }
+
+ if err := ta.addTarFile(filePath, relFilePath); err != nil {
+ logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
+ // if pipe is broken, stop writing tar stream to it
+ if err == io.ErrClosedPipe {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+ }()
+
+ return pipeReader, nil
+}
+
+// Unpack unpacks the decompressedArchive to dest with options.
+func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
+ tr := tar.NewReader(decompressedArchive)
+ trBuf := pools.BufioReader32KPool.Get(nil)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+ idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
+ rootIDs := idMapping.RootPair()
+ whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS)
+ if err != nil {
+ return err
+ }
+
+ // Iterate through the files in the archive.
+loop:
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ // ignore XGlobalHeader early to avoid creating parent directories for them
+ if hdr.Typeflag == tar.TypeXGlobalHeader {
+ logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name)
+ continue
+ }
+
+ // Normalize name, for safety and for a simple is-root check
+ // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
+ // This keeps "..\" as-is, but normalizes "\..\" to "\".
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ for _, exclude := range options.ExcludePatterns {
+ if strings.HasPrefix(hdr.Name, exclude) {
+ continue loop
+ }
+ }
+
+ // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
+ // the filepath format for the OS on which the daemon is running. Hence
+ // the check for a slash-suffix MUST be done in an OS-agnostic way.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return err
+ }
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+
+ // If path exits we almost always just want to remove and replace it
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing directory with a non-directory from the archive.
+ return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
+ }
+
+ if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing non-directory with a directory from the archive.
+ return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
+ }
+
+ if fi.IsDir() && hdr.Name == "." {
+ continue
+ }
+
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return err
+ }
+ }
+ }
+ trBuf.Reset(tr)
+
+ if err := remapIDs(idMapping, hdr); err != nil {
+ return err
+ }
+
+ if whiteoutConverter != nil {
+ writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
+ if err != nil {
+ return err
+ }
+ if !writeFile {
+ continue
+ }
+ }
+
+ if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil {
+ return err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+// identity (uncompressed), gzip, bzip2, xz.
+// FIXME: specify behavior when target path exists vs. doesn't exist.
+func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, true)
+}
+
+// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive must be an uncompressed stream.
+func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, false)
+}
+
+// Handler for teasing out the automatic decompression
+func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
+ if tarArchive == nil {
+ return fmt.Errorf("Empty archive")
+ }
+ dest = filepath.Clean(dest)
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+
+ r := tarArchive
+ if decompress {
+ decompressedArchive, err := DecompressStream(tarArchive)
+ if err != nil {
+ return err
+ }
+ defer decompressedArchive.Close()
+ r = decompressedArchive
+ }
+
+ return Unpack(r, dest, options)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func (archiver *Archiver) TarUntar(src, dst string) error {
+ archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ options := &TarOptions{
+ UIDMaps: archiver.IDMapping.UIDs(),
+ GIDMaps: archiver.IDMapping.GIDs(),
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// UntarPath untar a file from path to a destination, src is the source tar file path.
+func (archiver *Archiver) UntarPath(src, dst string) error {
+ archive, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ options := &TarOptions{
+ UIDMaps: archiver.IDMapping.UIDs(),
+ GIDMaps: archiver.IDMapping.GIDs(),
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func (archiver *Archiver) CopyWithTar(src, dst string) error {
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if !srcSt.IsDir() {
+ return archiver.CopyFileWithTar(src, dst)
+ }
+
+ // if this Archiver is set up with ID mapping we need to create
+ // the new destination directory with the remapped root UID/GID pair
+ // as owner
+ rootIDs := archiver.IDMapping.RootPair()
+ // Create dst, copy src's content into it
+ if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
+ return err
+ }
+ return archiver.TarUntar(src, dst)
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+
+ if srcSt.IsDir() {
+ return fmt.Errorf("Can't copy a directory")
+ }
+
+ // Clean up the trailing slash. This must be done in an operating
+ // system specific manner.
+ if dst[len(dst)-1] == os.PathSeparator {
+ dst = filepath.Join(dst, filepath.Base(src))
+ }
+ // Create the holding directory if necessary
+ if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil {
+ return err
+ }
+
+ r, w := io.Pipe()
+ errC := make(chan error, 1)
+
+ go func() {
+ defer close(errC)
+
+ errC <- func() error {
+ defer w.Close()
+
+ srcF, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ hdr, err := tar.FileInfoHeader(srcSt, "")
+ if err != nil {
+ return err
+ }
+ hdr.Format = tar.FormatPAX
+ hdr.ModTime = hdr.ModTime.Truncate(time.Second)
+ hdr.AccessTime = time.Time{}
+ hdr.ChangeTime = time.Time{}
+ hdr.Name = filepath.Base(dst)
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+ if err := remapIDs(archiver.IDMapping, hdr); err != nil {
+ return err
+ }
+
+ tw := tar.NewWriter(w)
+ defer tw.Close()
+ if err := tw.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if _, err := io.Copy(tw, srcF); err != nil {
+ return err
+ }
+ return nil
+ }()
+ }()
+ defer func() {
+ if er := <-errC; err == nil && er != nil {
+ err = er
+ }
+ }()
+
+ err = archiver.Untar(r, filepath.Dir(dst), nil)
+ if err != nil {
+ r.CloseWithError(err)
+ }
+ return err
+}
+
+// IdentityMapping returns the IdentityMapping of the archiver.
+func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping {
+ return archiver.IDMapping
+}
+
+func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error {
+ ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid})
+ hdr.Uid, hdr.Gid = ids.UID, ids.GID
+ return err
+}
+
+// cmdStream executes a command, and returns its stdout as a stream.
+// If the command fails to run or doesn't complete successfully, an error
+// will be returned, including anything written on stderr.
+func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
+ cmd.Stdin = input
+ pipeR, pipeW := io.Pipe()
+ cmd.Stdout = pipeW
+ var errBuf bytes.Buffer
+ cmd.Stderr = &errBuf
+
+ // Run the command and return the pipe
+ if err := cmd.Start(); err != nil {
+ return nil, err
+ }
+
+ // Ensure the command has exited before we clean anything up
+ done := make(chan struct{})
+
+ // Copy stdout to the returned pipe
+ go func() {
+ if err := cmd.Wait(); err != nil {
+ pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
+ } else {
+ pipeW.Close()
+ }
+ close(done)
+ }()
+
+ return ioutils.NewReadCloserWrapper(pipeR, func() error {
+ // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as
+ // cmd.Wait waits for any non-file stdout/stderr/stdin to close.
+ err := pipeR.Close()
+ <-done
+ return err
+ }), nil
+}
+
+// NewTempArchive reads the content of src into a temporary file, and returns the contents
+// of that file as an archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
+ f, err := ioutil.TempFile(dir, "")
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.Copy(f, src); err != nil {
+ return nil, err
+ }
+ if _, err := f.Seek(0, 0); err != nil {
+ return nil, err
+ }
+ st, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ size := st.Size()
+ return &TempArchive{File: f, Size: size}, nil
+}
+
+// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+type TempArchive struct {
+ *os.File
+ Size int64 // Pre-computed from Stat().Size() as a convenience
+ read int64
+ closed bool
+}
+
+// Close closes the underlying file if it's still open, or does a no-op
+// to allow callers to try to close the TempArchive multiple times safely.
+func (archive *TempArchive) Close() error {
+ if archive.closed {
+ return nil
+ }
+
+ archive.closed = true
+
+ return archive.File.Close()
+}
+
+func (archive *TempArchive) Read(data []byte) (int, error) {
+ n, err := archive.File.Read(data)
+ archive.read += int64(n)
+ if err != nil || archive.read == archive.Size {
+ archive.Close()
+ os.Remove(archive.File.Name())
+ }
+ return n, err
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
new file mode 100644
index 00000000000..0a3cc1f92bc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
@@ -0,0 +1,100 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+ "github.com/pkg/errors"
+ "golang.org/x/sys/unix"
+)
+
+func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) {
+ if format == OverlayWhiteoutFormat {
+ if inUserNS {
+ return nil, errors.New("specifying OverlayWhiteoutFormat is not allowed in userns")
+ }
+ return overlayWhiteoutConverter{}, nil
+ }
+ return nil, nil
+}
+
+type overlayWhiteoutConverter struct {
+}
+
+func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
+ // convert whiteouts to AUFS format
+ if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
+ // we just rename the file and make it normal
+ dir, filename := filepath.Split(hdr.Name)
+ hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
+ hdr.Mode = 0600
+ hdr.Typeflag = tar.TypeReg
+ hdr.Size = 0
+ }
+
+ if fi.Mode()&os.ModeDir != 0 {
+ // convert opaque dirs to AUFS format by writing an empty file with the prefix
+ opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
+ if err != nil {
+ return nil, err
+ }
+ if len(opaque) == 1 && opaque[0] == 'y' {
+ if hdr.Xattrs != nil {
+ delete(hdr.Xattrs, "trusted.overlay.opaque")
+ }
+
+ // create a header for the whiteout file
+ // it should inherit some properties from the parent, but be a regular file
+ wo = &tar.Header{
+ Typeflag: tar.TypeReg,
+ Mode: hdr.Mode & int64(os.ModePerm),
+ Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
+ Size: 0,
+ Uid: hdr.Uid,
+ Uname: hdr.Uname,
+ Gid: hdr.Gid,
+ Gname: hdr.Gname,
+ AccessTime: hdr.AccessTime,
+ ChangeTime: hdr.ChangeTime,
+ }
+ }
+ }
+
+ return
+}
+
+func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
+ base := filepath.Base(path)
+ dir := filepath.Dir(path)
+
+ // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
+ if base == WhiteoutOpaqueDir {
+ err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
+ if err != nil {
+ return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir)
+ }
+ // don't write the file itself
+ return false, err
+ }
+
+ // if a file was deleted and we are using overlay, we need to create a character device
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+
+ if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
+ return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath)
+ }
+ if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
+ return false, err
+ }
+
+ // don't write the file itself
+ return false, nil
+ }
+
+ return true, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
new file mode 100644
index 00000000000..2a3dc95398e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
@@ -0,0 +1,7 @@
+// +build !linux
+
+package archive // import "github.com/docker/docker/pkg/archive"
+
+func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
new file mode 100644
index 00000000000..412cd5a3b0c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
@@ -0,0 +1,115 @@
+// +build !windows
+
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "errors"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/containerd/containerd/pkg/userns"
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return srcPath
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific. On Linux, we
+// can't use filepath.Join(srcPath,include) because this will clean away
+// a trailing "." or "/" which may be important.
+func getWalkRoot(srcPath string, include string) string {
+ return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) string {
+ return p // already unix-style
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ return perm // noop for unix as golang APIs provide perm bits correctly
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if ok {
+ // Currently go does not fill in the major/minors
+ if s.Mode&unix.S_IFBLK != 0 ||
+ s.Mode&unix.S_IFCHR != 0 {
+ hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
+ hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
+ }
+ }
+
+ return
+}
+
+func getInodeFromStat(stat interface{}) (inode uint64, err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if ok {
+ inode = s.Ino
+ }
+
+ return
+}
+
+func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if !ok {
+ return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t")
+ }
+ return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ mode := uint32(hdr.Mode & 07777)
+ switch hdr.Typeflag {
+ case tar.TypeBlock:
+ mode |= unix.S_IFBLK
+ case tar.TypeChar:
+ mode |= unix.S_IFCHR
+ case tar.TypeFifo:
+ mode |= unix.S_IFIFO
+ }
+
+ err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
+ if errors.Is(err, syscall.EPERM) && userns.RunningInUserNS() {
+ // In most cases, cannot create a device if running in user namespace
+ err = nil
+ }
+ return err
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
new file mode 100644
index 00000000000..7260174bfbb
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
@@ -0,0 +1,67 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "os"
+ "path/filepath"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/longpath"
+)
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return longpath.AddPrefix(srcPath)
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific.
+func getWalkRoot(srcPath string, include string) string {
+ return filepath.Join(srcPath, include)
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) string {
+ return filepath.ToSlash(p)
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ // perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
+ permPart := perm & os.ModePerm
+ noPermPart := perm &^ os.ModePerm
+ // Add the x bit: make everything +x from windows
+ permPart |= 0111
+ permPart &= 0755
+
+ return noPermPart | permPart
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
+ // do nothing. no notion of Rdev, Nlink in stat on Windows
+ return
+}
+
+func getInodeFromStat(stat interface{}) (inode uint64, err error) {
+ // do nothing. no notion of Inode in stat on Windows
+ return
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ return nil
+}
+
+func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
+ // no notion of file ownership mapping yet on Windows
+ return idtools.Identity{UID: 0, GID: 0}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go
new file mode 100644
index 00000000000..aedb91b0352
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes.go
@@ -0,0 +1,445 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// ChangeType represents the change type.
+type ChangeType int
+
+const (
+ // ChangeModify represents the modify operation.
+ ChangeModify = iota
+ // ChangeAdd represents the add operation.
+ ChangeAdd
+ // ChangeDelete represents the delete operation.
+ ChangeDelete
+)
+
+func (c ChangeType) String() string {
+ switch c {
+ case ChangeModify:
+ return "C"
+ case ChangeAdd:
+ return "A"
+ case ChangeDelete:
+ return "D"
+ }
+ return ""
+}
+
+// Change represents a change, it wraps the change type and path.
+// It describes changes of the files in the path respect to the
+// parent layers. The change could be modify, add, delete.
+// This is used for layer diff.
+type Change struct {
+ Path string
+ Kind ChangeType
+}
+
+func (change *Change) String() string {
+ return fmt.Sprintf("%s %s", change.Kind, change.Path)
+}
+
+// for sort.Sort
+type changesByPath []Change
+
+func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
+func (c changesByPath) Len() int { return len(c) }
+func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
+
+// Gnu tar doesn't have sub-second mtime precision. The go tar
+// writer (1.10+) does when using PAX format, but we round times to seconds
+// to ensure archives have the same hashes for backwards compatibility.
+// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4.
+//
+// Non-sub-second is problematic when we apply changes via tar
+// files. We handle this by comparing for exact times, *or* same
+// second count and either a or b having exactly 0 nanoseconds
+func sameFsTime(a, b time.Time) bool {
+ return a.Equal(b) ||
+ (a.Unix() == b.Unix() &&
+ (a.Nanosecond() == 0 || b.Nanosecond() == 0))
+}
+
+func sameFsTimeSpec(a, b syscall.Timespec) bool {
+ return a.Sec == b.Sec &&
+ (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
+}
+
+// Changes walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func Changes(layers []string, rw string) ([]Change, error) {
+ return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
+}
+
+func aufsMetadataSkip(path string) (skip bool, err error) {
+ skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
+ if err != nil {
+ skip = true
+ }
+ return
+}
+
+func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
+ f := filepath.Base(path)
+
+ // If there is a whiteout, then the file was removed
+ if strings.HasPrefix(f, WhiteoutPrefix) {
+ originalFile := f[len(WhiteoutPrefix):]
+ return filepath.Join(filepath.Dir(path), originalFile), nil
+ }
+
+ return "", nil
+}
+
+type skipChange func(string) (bool, error)
+type deleteChange func(string, string, os.FileInfo) (string, error)
+
+func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
+ var (
+ changes []Change
+ changedDirs = make(map[string]struct{})
+ )
+
+ err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ path, err = filepath.Rel(rw, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ path = filepath.Join(string(os.PathSeparator), path)
+
+ // Skip root
+ if path == string(os.PathSeparator) {
+ return nil
+ }
+
+ if sc != nil {
+ if skip, err := sc(path); skip {
+ return err
+ }
+ }
+
+ change := Change{
+ Path: path,
+ }
+
+ deletedFile, err := dc(rw, path, f)
+ if err != nil {
+ return err
+ }
+
+ // Find out what kind of modification happened
+ if deletedFile != "" {
+ change.Path = deletedFile
+ change.Kind = ChangeDelete
+ } else {
+ // Otherwise, the file was added
+ change.Kind = ChangeAdd
+
+ // ...Unless it already existed in a top layer, in which case, it's a modification
+ for _, layer := range layers {
+ stat, err := os.Stat(filepath.Join(layer, path))
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err == nil {
+ // The file existed in the top layer, so that's a modification
+
+ // However, if it's a directory, maybe it wasn't actually modified.
+ // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
+ if stat.IsDir() && f.IsDir() {
+ if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
+ // Both directories are the same, don't record the change
+ return nil
+ }
+ }
+ change.Kind = ChangeModify
+ break
+ }
+ }
+ }
+
+ // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
+ // This block is here to ensure the change is recorded even if the
+ // modify time, mode and size of the parent directory in the rw and ro layers are all equal.
+ // Check https://github.com/docker/docker/pull/13590 for details.
+ if f.IsDir() {
+ changedDirs[path] = struct{}{}
+ }
+ if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
+ parent := filepath.Dir(path)
+ if _, ok := changedDirs[parent]; !ok && parent != "/" {
+ changes = append(changes, Change{Path: parent, Kind: ChangeModify})
+ changedDirs[parent] = struct{}{}
+ }
+ }
+
+ // Record change
+ changes = append(changes, change)
+ return nil
+ })
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ return changes, nil
+}
+
+// FileInfo describes the information of a file.
+type FileInfo struct {
+ parent *FileInfo
+ name string
+ stat *system.StatT
+ children map[string]*FileInfo
+ capability []byte
+ added bool
+}
+
+// LookUp looks up the file information of a file.
+func (info *FileInfo) LookUp(path string) *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ parent := info
+ if path == string(os.PathSeparator) {
+ return info
+ }
+
+ pathElements := strings.Split(path, string(os.PathSeparator))
+ for _, elem := range pathElements {
+ if elem != "" {
+ child := parent.children[elem]
+ if child == nil {
+ return nil
+ }
+ parent = child
+ }
+ }
+ return parent
+}
+
+func (info *FileInfo) path() string {
+ if info.parent == nil {
+ // As this runs on the daemon side, file paths are OS specific.
+ return string(os.PathSeparator)
+ }
+ return filepath.Join(info.parent.path(), info.name)
+}
+
+func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
+
+ sizeAtEntry := len(*changes)
+
+ if oldInfo == nil {
+ // add
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeAdd,
+ }
+ *changes = append(*changes, change)
+ info.added = true
+ }
+
+ // We make a copy so we can modify it to detect additions
+ // also, we only recurse on the old dir if the new info is a directory
+ // otherwise any previous delete/change is considered recursive
+ oldChildren := make(map[string]*FileInfo)
+ if oldInfo != nil && info.isDir() {
+ for k, v := range oldInfo.children {
+ oldChildren[k] = v
+ }
+ }
+
+ for name, newChild := range info.children {
+ oldChild := oldChildren[name]
+ if oldChild != nil {
+ // change?
+ oldStat := oldChild.stat
+ newStat := newChild.stat
+ // Note: We can't compare inode or ctime or blocksize here, because these change
+ // when copying a file into a container. However, that is not generally a problem
+ // because any content change will change mtime, and any status change should
+ // be visible when actually comparing the stat fields. The only time this
+ // breaks down is if some code intentionally hides a change by setting
+ // back mtime
+ if statDifferent(oldStat, newStat) ||
+ !bytes.Equal(oldChild.capability, newChild.capability) {
+ change := Change{
+ Path: newChild.path(),
+ Kind: ChangeModify,
+ }
+ *changes = append(*changes, change)
+ newChild.added = true
+ }
+
+ // Remove from copy so we can detect deletions
+ delete(oldChildren, name)
+ }
+
+ newChild.addChanges(oldChild, changes)
+ }
+ for _, oldChild := range oldChildren {
+ // delete
+ change := Change{
+ Path: oldChild.path(),
+ Kind: ChangeDelete,
+ }
+ *changes = append(*changes, change)
+ }
+
+ // If there were changes inside this directory, we need to add it, even if the directory
+ // itself wasn't changed. This is needed to properly save and restore filesystem permissions.
+ // As this runs on the daemon side, file paths are OS specific.
+ if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeModify,
+ }
+ // Let's insert the directory entry before the recently added entries located inside this dir
+ *changes = append(*changes, change) // just to resize the slice, will be overwritten
+ copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
+ (*changes)[sizeAtEntry] = change
+ }
+
+}
+
+// Changes add changes to file information.
+func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
+ var changes []Change
+
+ info.addChanges(oldInfo, &changes)
+
+ return changes
+}
+
+func newRootFileInfo() *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ root := &FileInfo{
+ name: string(os.PathSeparator),
+ children: make(map[string]*FileInfo),
+ }
+ return root
+}
+
+// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
+// If oldDir is "", then all files in newDir will be Add-Changes.
+func ChangesDirs(newDir, oldDir string) ([]Change, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ )
+ if oldDir == "" {
+ emptyDir, err := ioutil.TempDir("", "empty")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(emptyDir)
+ oldDir = emptyDir
+ }
+ oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
+ if err != nil {
+ return nil, err
+ }
+
+ return newRoot.Changes(oldRoot), nil
+}
+
+// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
+func ChangesSize(newDir string, changes []Change) int64 {
+ var (
+ size int64
+ sf = make(map[uint64]struct{})
+ )
+ for _, change := range changes {
+ if change.Kind == ChangeModify || change.Kind == ChangeAdd {
+ file := filepath.Join(newDir, change.Path)
+ fileInfo, err := os.Lstat(file)
+ if err != nil {
+ logrus.Errorf("Can not stat %q: %s", file, err)
+ continue
+ }
+
+ if fileInfo != nil && !fileInfo.IsDir() {
+ if hasHardlinks(fileInfo) {
+ inode := getIno(fileInfo)
+ if _, ok := sf[inode]; !ok {
+ size += fileInfo.Size()
+ sf[inode] = struct{}{}
+ }
+ } else {
+ size += fileInfo.Size()
+ }
+ }
+ }
+ }
+ return size
+}
+
+// ExportChanges produces an Archive from the provided changes, relative to dir.
+func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
+ reader, writer := io.Pipe()
+ go func() {
+ ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
+
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ sort.Sort(changesByPath(changes))
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+ for _, change := range changes {
+ if change.Kind == ChangeDelete {
+ whiteOutDir := filepath.Dir(change.Path)
+ whiteOutBase := filepath.Base(change.Path)
+ whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
+ timestamp := time.Now()
+ hdr := &tar.Header{
+ Name: whiteOut[1:],
+ Size: 0,
+ ModTime: timestamp,
+ AccessTime: timestamp,
+ ChangeTime: timestamp,
+ }
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ logrus.Debugf("Can't write whiteout header: %s", err)
+ }
+ } else {
+ path := filepath.Join(dir, change.Path)
+ if err := ta.addTarFile(path, change.Path[1:]); err != nil {
+ logrus.Debugf("Can't add file %s to tar: %s", path, err)
+ }
+ }
+ }
+
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Debugf("Can't close layer: %s", err)
+ }
+ if err := writer.Close(); err != nil {
+ logrus.Debugf("failed close Changes writer: %s", err)
+ }
+ }()
+ return reader, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
new file mode 100644
index 00000000000..f8792b3d4e5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
@@ -0,0 +1,286 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "syscall"
+ "unsafe"
+
+ "github.com/docker/docker/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+// walker is used to implement collectFileInfoForChanges on linux. Where this
+// method in general returns the entire contents of two directory trees, we
+// optimize some FS calls out on linux. In particular, we take advantage of the
+// fact that getdents(2) returns the inode of each file in the directory being
+// walked, which, when walking two trees in parallel to generate a list of
+// changes, can be used to prune subtrees without ever having to lstat(2) them
+// directly. Eliminating stat calls in this way can save up to seconds on large
+// images.
+type walker struct {
+ dir1 string
+ dir2 string
+ root1 *FileInfo
+ root2 *FileInfo
+}
+
+// collectFileInfoForChanges returns a complete representation of the trees
+// rooted at dir1 and dir2, with one important exception: any subtree or
+// leaf where the inode and device numbers are an exact match between dir1
+// and dir2 will be pruned from the results. This method is *only* to be used
+// to generating a list of changes between the two directories, as it does not
+// reflect the full contents.
+func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
+ w := &walker{
+ dir1: dir1,
+ dir2: dir2,
+ root1: newRootFileInfo(),
+ root2: newRootFileInfo(),
+ }
+
+ i1, err := os.Lstat(w.dir1)
+ if err != nil {
+ return nil, nil, err
+ }
+ i2, err := os.Lstat(w.dir2)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if err := w.walk("/", i1, i2); err != nil {
+ return nil, nil, err
+ }
+
+ return w.root1, w.root2, nil
+}
+
+// Given a FileInfo, its path info, and a reference to the root of the tree
+// being constructed, register this file with the tree.
+func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
+ if fi == nil {
+ return nil
+ }
+ parent := root.LookUp(filepath.Dir(path))
+ if parent == nil {
+ return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
+ }
+ info := &FileInfo{
+ name: filepath.Base(path),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+ cpath := filepath.Join(dir, path)
+ stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
+ if err != nil {
+ return err
+ }
+ info.stat = stat
+ info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
+ parent.children[info.name] = info
+ return nil
+}
+
+// Walk a subtree rooted at the same path in both trees being iterated. For
+// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
+func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
+ // Register these nodes with the return trees, unless we're still at the
+ // (already-created) roots:
+ if path != "/" {
+ if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
+ return err
+ }
+ if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
+ return err
+ }
+ }
+
+ is1Dir := i1 != nil && i1.IsDir()
+ is2Dir := i2 != nil && i2.IsDir()
+
+ sameDevice := false
+ if i1 != nil && i2 != nil {
+ si1 := i1.Sys().(*syscall.Stat_t)
+ si2 := i2.Sys().(*syscall.Stat_t)
+ if si1.Dev == si2.Dev {
+ sameDevice = true
+ }
+ }
+
+ // If these files are both non-existent, or leaves (non-dirs), we are done.
+ if !is1Dir && !is2Dir {
+ return nil
+ }
+
+ // Fetch the names of all the files contained in both directories being walked:
+ var names1, names2 []nameIno
+ if is1Dir {
+ names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+ if is2Dir {
+ names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+
+ // We have lists of the files contained in both parallel directories, sorted
+ // in the same order. Walk them in parallel, generating a unique merged list
+ // of all items present in either or both directories.
+ var names []string
+ ix1 := 0
+ ix2 := 0
+
+ for {
+ if ix1 >= len(names1) {
+ break
+ }
+ if ix2 >= len(names2) {
+ break
+ }
+
+ ni1 := names1[ix1]
+ ni2 := names2[ix2]
+
+ switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
+ case -1: // ni1 < ni2 -- advance ni1
+ // we will not encounter ni1 in names2
+ names = append(names, ni1.name)
+ ix1++
+ case 0: // ni1 == ni2
+ if ni1.ino != ni2.ino || !sameDevice {
+ names = append(names, ni1.name)
+ }
+ ix1++
+ ix2++
+ case 1: // ni1 > ni2 -- advance ni2
+ // we will not encounter ni2 in names1
+ names = append(names, ni2.name)
+ ix2++
+ }
+ }
+ for ix1 < len(names1) {
+ names = append(names, names1[ix1].name)
+ ix1++
+ }
+ for ix2 < len(names2) {
+ names = append(names, names2[ix2].name)
+ ix2++
+ }
+
+ // For each of the names present in either or both of the directories being
+ // iterated, stat the name under each root, and recurse the pair of them:
+ for _, name := range names {
+ fname := filepath.Join(path, name)
+ var cInfo1, cInfo2 os.FileInfo
+ if is1Dir {
+ cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if is2Dir {
+ cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if err = w.walk(fname, cInfo1, cInfo2); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// {name,inode} pairs used to support the early-pruning logic of the walker type
+type nameIno struct {
+ name string
+ ino uint64
+}
+
+type nameInoSlice []nameIno
+
+func (s nameInoSlice) Len() int { return len(s) }
+func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
+
+// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
+// numbers further up the stack when reading directory contents. Unlike
+// os.Readdirnames, which returns a list of filenames, this function returns a
+// list of {filename,inode} pairs.
+func readdirnames(dirname string) (names []nameIno, err error) {
+ var (
+ size = 100
+ buf = make([]byte, 4096)
+ nbuf int
+ bufp int
+ nb int
+ )
+
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ names = make([]nameIno, 0, size) // Empty with room to grow.
+ for {
+ // Refill the buffer if necessary
+ if bufp >= nbuf {
+ bufp = 0
+ nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
+ if nbuf < 0 {
+ nbuf = 0
+ }
+ if err != nil {
+ return nil, os.NewSyscallError("readdirent", err)
+ }
+ if nbuf <= 0 {
+ break // EOF
+ }
+ }
+
+ // Drain the buffer
+ nb, names = parseDirent(buf[bufp:nbuf], names)
+ bufp += nb
+ }
+
+ sl := nameInoSlice(names)
+ sort.Sort(sl)
+ return sl, nil
+}
+
+// parseDirent is a minor modification of unix.ParseDirent (linux version)
+// which returns {name,inode} pairs instead of just names.
+func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
+ origlen := len(buf)
+ for len(buf) > 0 {
+ dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:clen(bytes[:])])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ names = append(names, nameIno{name, dirent.Ino})
+ }
+ return origlen - len(buf), names
+}
+
+func clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
new file mode 100644
index 00000000000..ba744741cd0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
@@ -0,0 +1,97 @@
+// +build !linux
+
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ err1, err2 error
+ errs = make(chan error, 2)
+ )
+ go func() {
+ oldRoot, err1 = collectFileInfo(oldDir)
+ errs <- err1
+ }()
+ go func() {
+ newRoot, err2 = collectFileInfo(newDir)
+ errs <- err2
+ }()
+
+ // block until both routines have returned
+ for i := 0; i < 2; i++ {
+ if err := <-errs; err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return oldRoot, newRoot, nil
+}
+
+func collectFileInfo(sourceDir string) (*FileInfo, error) {
+ root := newRootFileInfo()
+
+ err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ relPath, err := filepath.Rel(sourceDir, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ relPath = filepath.Join(string(os.PathSeparator), relPath)
+
+ // See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
+ // Temporary workaround. If the returned path starts with two backslashes,
+ // trim it down to a single backslash. Only relevant on Windows.
+ if runtime.GOOS == "windows" {
+ if strings.HasPrefix(relPath, `\\`) {
+ relPath = relPath[1:]
+ }
+ }
+
+ if relPath == string(os.PathSeparator) {
+ return nil
+ }
+
+ parent := root.LookUp(filepath.Dir(relPath))
+ if parent == nil {
+ return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
+ }
+
+ info := &FileInfo{
+ name: filepath.Base(relPath),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+
+ s, err := system.Lstat(path)
+ if err != nil {
+ return err
+ }
+ info.stat = s
+
+ info.capability, _ = system.Lgetxattr(path, "security.capability")
+
+ parent.children[info.name] = info
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return root, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
new file mode 100644
index 00000000000..06217b7161e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
@@ -0,0 +1,43 @@
+// +build !windows
+
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "os"
+ "syscall"
+
+ "github.com/docker/docker/pkg/system"
+ "golang.org/x/sys/unix"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.Mode() != newStat.Mode() ||
+ oldStat.UID() != newStat.UID() ||
+ oldStat.GID() != newStat.GID() ||
+ oldStat.Rdev() != newStat.Rdev() ||
+ // Don't look at size or modification time for dirs, its not a good
+ // measure of change. See https://github.com/moby/moby/issues/9874
+ // for a description of the issue with modification time, and
+ // https://github.com/moby/moby/pull/11422 for the change.
+ // (Note that in the Windows implementation of this function,
+ // modification time IS taken as a change). See
+ // https://github.com/moby/moby/pull/37982 for more information.
+ (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
+ (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0
+}
+
+func getIno(fi os.FileInfo) uint64 {
+ return fi.Sys().(*syscall.Stat_t).Ino
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return fi.Sys().(*syscall.Stat_t).Nlink > 1
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
new file mode 100644
index 00000000000..9906685e4b0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
@@ -0,0 +1,34 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "os"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+ // Note there is slight difference between the Linux and Windows
+ // implementations here. Due to https://github.com/moby/moby/issues/9874,
+ // and the fix at https://github.com/moby/moby/pull/11422, Linux does not
+ // consider a change to the directory time as a change. Windows on NTFS
+ // does. See https://github.com/moby/moby/pull/37982 for more information.
+
+ if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) ||
+ oldStat.Mode() != newStat.Mode() ||
+ oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode().IsDir()
+}
+
+func getIno(fi os.FileInfo) (inode uint64) {
+ return
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go
new file mode 100644
index 00000000000..8a4cee908d5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy.go
@@ -0,0 +1,487 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "errors"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// Errors used or returned by this file.
+var (
+ ErrNotDirectory = errors.New("not a directory")
+ ErrDirNotExists = errors.New("no such directory")
+ ErrCannotCopyDir = errors.New("cannot copy directory")
+ ErrInvalidCopySource = errors.New("invalid copy source content")
+)
+
+// PreserveTrailingDotOrSeparator returns the given cleaned path (after
+// processing using any utility functions from the path or filepath stdlib
+// packages) and appends a trailing `/.` or `/` if its corresponding original
+// path (from before being processed by utility functions from the path or
+// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
+// path already ends in a `.` path segment, then another is not added. If the
+// clean path already ends in the separator, then another is not added.
+func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string {
+ // Ensure paths are in platform semantics
+ cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1)
+ originalPath = strings.Replace(originalPath, "/", string(sep), -1)
+
+ if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
+ if !hasTrailingPathSeparator(cleanedPath, sep) {
+ // Add a separator if it doesn't already end with one (a cleaned
+ // path would only end in a separator if it is the root).
+ cleanedPath += string(sep)
+ }
+ cleanedPath += "."
+ }
+
+ if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) {
+ cleanedPath += string(sep)
+ }
+
+ return cleanedPath
+}
+
+// assertsDirectory returns whether the given path is
+// asserted to be a directory, i.e., the path ends with
+// a trailing '/' or `/.`, assuming a path separator of `/`.
+func assertsDirectory(path string, sep byte) bool {
+ return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path)
+}
+
+// hasTrailingPathSeparator returns whether the given
+// path ends with the system's path separator character.
+func hasTrailingPathSeparator(path string, sep byte) bool {
+ return len(path) > 0 && path[len(path)-1] == sep
+}
+
+// specifiesCurrentDir returns whether the given path specifies
+// a "current directory", i.e., the last path segment is `.`.
+func specifiesCurrentDir(path string) bool {
+ return filepath.Base(path) == "."
+}
+
+// SplitPathDirEntry splits the given path between its directory name and its
+// basename by first cleaning the path but preserves a trailing "." if the
+// original path specified the current directory.
+func SplitPathDirEntry(path string) (dir, base string) {
+ cleanedPath := filepath.Clean(filepath.FromSlash(path))
+
+ if specifiesCurrentDir(path) {
+ cleanedPath += string(os.PathSeparator) + "."
+ }
+
+ return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
+}
+
+// TarResource archives the resource described by the given CopyInfo to a Tar
+// archive. A non-nil error is returned if sourcePath does not exist or is
+// asserted to be a directory but exists as another type of file.
+//
+// This function acts as a convenient wrapper around TarWithOptions, which
+// requires a directory as the source path. TarResource accepts either a
+// directory or a file path and correctly sets the Tar options.
+func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
+ return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
+}
+
+// TarResourceRebase is like TarResource but renames the first path element of
+// items in the resulting tar archive to match the given rebaseName if not "".
+func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
+ sourcePath = normalizePath(sourcePath)
+ if _, err = os.Lstat(sourcePath); err != nil {
+ // Catches the case where the source does not exist or is not a
+ // directory if asserted to be a directory, as this also causes an
+ // error.
+ return
+ }
+
+ // Separate the source path between its directory and
+ // the entry in that directory which we are archiving.
+ sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
+ opts := TarResourceRebaseOpts(sourceBase, rebaseName)
+
+ logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
+ return TarWithOptions(sourceDir, opts)
+}
+
+// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
+// parameters to be sent to TarWithOptions (the TarOptions struct)
+func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
+ filter := []string{sourceBase}
+ return &TarOptions{
+ Compression: Uncompressed,
+ IncludeFiles: filter,
+ IncludeSourceDir: true,
+ RebaseNames: map[string]string{
+ sourceBase: rebaseName,
+ },
+ }
+}
+
+// CopyInfo holds basic info about the source
+// or destination path of a copy operation.
+type CopyInfo struct {
+ Path string
+ Exists bool
+ IsDir bool
+ RebaseName string
+}
+
+// CopyInfoSourcePath stats the given path to create a CopyInfo
+// struct representing that resource for the source of an archive copy
+// operation. The given path should be an absolute local path. A source path
+// has all symlinks evaluated that appear before the last path separator ("/"
+// on Unix). As it is to be a copy source, the path must exist.
+func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
+ // normalize the file path and then evaluate the symbol link
+ // we will use the target file instead of the symbol link if
+ // followLink is set
+ path = normalizePath(path)
+
+ resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ stat, err := os.Lstat(resolvedPath)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ return CopyInfo{
+ Path: resolvedPath,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ RebaseName: rebaseName,
+ }, nil
+}
+
+// CopyInfoDestinationPath stats the given path to create a CopyInfo
+// struct representing that resource for the destination of an archive copy
+// operation. The given path should be an absolute local path.
+func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
+ maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
+ path = normalizePath(path)
+ originalPath := path
+
+ stat, err := os.Lstat(path)
+
+ if err == nil && stat.Mode()&os.ModeSymlink == 0 {
+ // The path exists and is not a symlink.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+ }
+
+ // While the path is a symlink.
+ for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
+ if n > maxSymlinkIter {
+ // Don't follow symlinks more than this arbitrary number of times.
+ return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
+ }
+
+ // The path is a symbolic link. We need to evaluate it so that the
+ // destination of the copy operation is the link target and not the
+ // link itself. This is notably different than CopyInfoSourcePath which
+ // only evaluates symlinks before the last appearing path separator.
+ // Also note that it is okay if the last path element is a broken
+ // symlink as the copy operation should create the target.
+ var linkTarget string
+
+ linkTarget, err = os.Readlink(path)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ if !system.IsAbs(linkTarget) {
+ // Join with the parent directory.
+ dstParent, _ := SplitPathDirEntry(path)
+ linkTarget = filepath.Join(dstParent, linkTarget)
+ }
+
+ path = linkTarget
+ stat, err = os.Lstat(path)
+ }
+
+ if err != nil {
+ // It's okay if the destination path doesn't exist. We can still
+ // continue the copy operation if the parent directory exists.
+ if !os.IsNotExist(err) {
+ return CopyInfo{}, err
+ }
+
+ // Ensure destination parent dir exists.
+ dstParent, _ := SplitPathDirEntry(path)
+
+ parentDirStat, err := os.Stat(dstParent)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+ if !parentDirStat.IsDir() {
+ return CopyInfo{}, ErrNotDirectory
+ }
+
+ return CopyInfo{Path: path}, nil
+ }
+
+ // The path exists after resolving symlinks.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+}
+
+// PrepareArchiveCopy prepares the given srcContent archive, which should
+// contain the archived resource described by srcInfo, to the destination
+// described by dstInfo. Returns the possibly modified content archive along
+// with the path to the destination directory which it should be extracted to.
+func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
+ // Ensure in platform semantics
+ srcInfo.Path = normalizePath(srcInfo.Path)
+ dstInfo.Path = normalizePath(dstInfo.Path)
+
+ // Separate the destination path between its directory and base
+ // components in case the source archive contents need to be rebased.
+ dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
+ _, srcBase := SplitPathDirEntry(srcInfo.Path)
+
+ switch {
+ case dstInfo.Exists && dstInfo.IsDir:
+ // The destination exists as a directory. No alteration
+ // to srcContent is needed as its contents can be
+ // simply extracted to the destination directory.
+ return dstInfo.Path, ioutil.NopCloser(srcContent), nil
+ case dstInfo.Exists && srcInfo.IsDir:
+ // The destination exists as some type of file and the source
+ // content is a directory. This is an error condition since
+ // you cannot copy a directory to an existing file location.
+ return "", nil, ErrCannotCopyDir
+ case dstInfo.Exists:
+ // The destination exists as some type of file and the source content
+ // is also a file. The source content entry will have to be renamed to
+ // have a basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case srcInfo.IsDir:
+ // The destination does not exist and the source content is an archive
+ // of a directory. The archive should be extracted to the parent of
+ // the destination path instead, and when it is, the directory that is
+ // created as a result should take the name of the destination path.
+ // The source content entries will have to be renamed to have a
+ // basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case assertsDirectory(dstInfo.Path, os.PathSeparator):
+ // The destination does not exist and is asserted to be created as a
+ // directory, but the source content is not a directory. This is an
+ // error condition since you cannot create a directory from a file
+ // source.
+ return "", nil, ErrDirNotExists
+ default:
+ // The last remaining case is when the destination does not exist, is
+ // not asserted to be a directory, and the source content is not an
+ // archive of a directory. It this case, the destination file will need
+ // to be created when the archive is extracted and the source content
+ // entry will have to be renamed to have a basename which matches the
+ // destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ }
+
+}
+
+// RebaseArchiveEntries rewrites the given srcContent archive replacing
+// an occurrence of oldBase with newBase at the beginning of entry names.
+func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
+ if oldBase == string(os.PathSeparator) {
+ // If oldBase specifies the root directory, use an empty string as
+ // oldBase instead so that newBase doesn't replace the path separator
+ // that all paths will start with.
+ oldBase = ""
+ }
+
+ rebased, w := io.Pipe()
+
+ go func() {
+ srcTar := tar.NewReader(srcContent)
+ rebasedTar := tar.NewWriter(w)
+
+ for {
+ hdr, err := srcTar.Next()
+ if err == io.EOF {
+ // Signals end of archive.
+ rebasedTar.Close()
+ w.Close()
+ return
+ }
+ if err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ // srcContent tar stream, as served by TarWithOptions(), is
+ // definitely in PAX format, but tar.Next() mistakenly guesses it
+ // as USTAR, which creates a problem: if the newBase is >100
+ // characters long, WriteHeader() returns an error like
+ // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...".
+ //
+ // To fix, set the format to PAX here. See docker/for-linux issue #484.
+ hdr.Format = tar.FormatPAX
+ hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
+ if hdr.Typeflag == tar.TypeLink {
+ hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
+ }
+
+ if err = rebasedTar.WriteHeader(hdr); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ // Ignoring GoSec G110. See https://github.com/securego/gosec/pull/433
+ // and https://cure53.de/pentest-report_opa.pdf, which recommends to
+ // replace io.Copy with io.CopyN7. The latter allows to specify the
+ // maximum number of bytes that should be read. By properly defining
+ // the limit, it can be assured that a GZip compression bomb cannot
+ // easily cause a Denial-of-Service.
+ // After reviewing with @tonistiigi and @cpuguy83, this should not
+ // affect us, because here we do not read into memory, hence should
+ // not be vulnerable to this code consuming memory.
+ //nolint:gosec // G110: Potential DoS vulnerability via decompression bomb (gosec)
+ if _, err = io.Copy(rebasedTar, srcTar); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+ }
+ }()
+
+ return rebased
+}
+
+// CopyResource performs an archive copy from the given source path to the
+// given destination path. The source path MUST exist and the destination
+// path's parent directory must exist.
+func CopyResource(srcPath, dstPath string, followLink bool) error {
+ var (
+ srcInfo CopyInfo
+ err error
+ )
+
+ // Ensure in platform semantics
+ srcPath = normalizePath(srcPath)
+ dstPath = normalizePath(dstPath)
+
+ // Clean the source and destination paths.
+ srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator)
+ dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator)
+
+ if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
+ return err
+ }
+
+ content, err := TarResource(srcInfo)
+ if err != nil {
+ return err
+ }
+ defer content.Close()
+
+ return CopyTo(content, srcInfo, dstPath)
+}
+
+// CopyTo handles extracting the given content whose
+// entries should be sourced from srcInfo to dstPath.
+func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
+ // The destination path need not exist, but CopyInfoDestinationPath will
+ // ensure that at least the parent directory exists.
+ dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
+ if err != nil {
+ return err
+ }
+
+ dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
+ if err != nil {
+ return err
+ }
+ defer copyArchive.Close()
+
+ options := &TarOptions{
+ NoLchown: true,
+ NoOverwriteDirNonDir: true,
+ }
+
+ return Untar(copyArchive, dstDir, options)
+}
+
+// ResolveHostSourcePath decides real path need to be copied with parameters such as
+// whether to follow symbol link or not, if followLink is true, resolvedPath will return
+// link target of any symbol link file, else it will only resolve symlink of directory
+// but return symbol link file itself without resolving.
+func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
+ if followLink {
+ resolvedPath, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ return
+ }
+
+ resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
+ } else {
+ dirPath, basePath := filepath.Split(path)
+
+ // if not follow symbol link, then resolve symbol link of parent dir
+ var resolvedDirPath string
+ resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
+ if err != nil {
+ return
+ }
+ // resolvedDirPath will have been cleaned (no trailing path separators) so
+ // we can manually join it with the base path element.
+ resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
+ if hasTrailingPathSeparator(path, os.PathSeparator) &&
+ filepath.Base(path) != filepath.Base(resolvedPath) {
+ rebaseName = filepath.Base(path)
+ }
+ }
+ return resolvedPath, rebaseName, nil
+}
+
+// GetRebaseName normalizes and compares path and resolvedPath,
+// return completed resolved path and rebased file name
+func GetRebaseName(path, resolvedPath string) (string, string) {
+ // linkTarget will have been cleaned (no trailing path separators and dot) so
+ // we can manually join it with them
+ var rebaseName string
+ if specifiesCurrentDir(path) &&
+ !specifiesCurrentDir(resolvedPath) {
+ resolvedPath += string(filepath.Separator) + "."
+ }
+
+ if hasTrailingPathSeparator(path, os.PathSeparator) &&
+ !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) {
+ resolvedPath += string(filepath.Separator)
+ }
+
+ if filepath.Base(path) != filepath.Base(resolvedPath) {
+ // In the case where the path had a trailing separator and a symlink
+ // evaluation has changed the last path component, we will need to
+ // rebase the name in the archive that is being copied to match the
+ // originally requested name.
+ rebaseName = filepath.Base(path)
+ }
+ return resolvedPath, rebaseName
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
new file mode 100644
index 00000000000..3958364f5ba
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.ToSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
new file mode 100644
index 00000000000..a878d1bac42
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
@@ -0,0 +1,9 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.FromSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go
new file mode 100644
index 00000000000..27897e6ab73
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/diff.go
@@ -0,0 +1,260 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+ "github.com/sirupsen/logrus"
+)
+
+// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
+ tr := tar.NewReader(layer)
+ trBuf := pools.BufioReader32KPool.Get(tr)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+ unpackedPaths := make(map[string]struct{})
+
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+ idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
+
+ aufsTempdir := ""
+ aufsHardlinks := make(map[string]*tar.Header)
+
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ size += hdr.Size
+
+ // Normalize name, for safety and for a simple is-root check
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ // Windows does not support filenames with colons in them. Ignore
+ // these files. This is not a problem though (although it might
+ // appear that it is). Let's suppose a client is running docker pull.
+ // The daemon it points to is Windows. Would it make sense for the
+ // client to be doing a docker pull Ubuntu for example (which has files
+ // with colons in the name under /usr/share/man/man3)? No, absolutely
+ // not as it would really only make sense that they were pulling a
+ // Windows image. However, for development, it is necessary to be able
+ // to pull Linux images which are in the repository.
+ //
+ // TODO Windows. Once the registry is aware of what images are Windows-
+ // specific or Linux-specific, this warning should be changed to an error
+ // to cater for the situation where someone does manage to upload a Linux
+ // image but have it tagged as Windows inadvertently.
+ if runtime.GOOS == "windows" {
+ if strings.Contains(hdr.Name, ":") {
+ logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
+ continue
+ }
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists.
+ // This happened in some tests where an image had a tarfile without any
+ // parent directories.
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = system.MkdirAll(parentPath, 0600)
+ if err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ // Skip AUFS metadata dirs
+ if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
+ // Regular files inside /.wh..wh.plnk can be used as hardlink targets
+ // We don't want this directory, but we need the files in them so that
+ // such hardlinks can be resolved.
+ if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
+ basename := filepath.Base(hdr.Name)
+ aufsHardlinks[basename] = hdr
+ if aufsTempdir == "" {
+ if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
+ return 0, err
+ }
+ defer os.RemoveAll(aufsTempdir)
+ }
+ if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
+ return 0, err
+ }
+ }
+
+ if hdr.Name != WhiteoutOpaqueDir {
+ continue
+ }
+ }
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return 0, err
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+ base := filepath.Base(path)
+
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ dir := filepath.Dir(path)
+ if base == WhiteoutOpaqueDir {
+ _, err := os.Lstat(dir)
+ if err != nil {
+ return 0, err
+ }
+ err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = nil // parent was deleted
+ }
+ return err
+ }
+ if path == dir {
+ return nil
+ }
+ if _, exists := unpackedPaths[path]; !exists {
+ err := os.RemoveAll(path)
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ return 0, err
+ }
+ } else {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+ if err := os.RemoveAll(originalPath); err != nil {
+ return 0, err
+ }
+ }
+ } else {
+ // If path exits we almost always just want to remove and replace it.
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ trBuf.Reset(tr)
+ srcData := io.Reader(trBuf)
+ srcHdr := hdr
+
+ // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
+ // we manually retarget these into the temporary files we extracted them into
+ if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
+ linkBasename := filepath.Base(hdr.Linkname)
+ srcHdr = aufsHardlinks[linkBasename]
+ if srcHdr == nil {
+ return 0, fmt.Errorf("Invalid aufs hardlink")
+ }
+ tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
+ if err != nil {
+ return 0, err
+ }
+ defer tmpFile.Close()
+ srcData = tmpFile
+ }
+
+ if err := remapIDs(idMapping, srcHdr); err != nil {
+ return 0, err
+ }
+
+ if err := createTarFile(path, dest, srcHdr, srcData, !options.NoLchown, nil, options.InUserNS); err != nil {
+ return 0, err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ unpackedPaths[path] = struct{}{}
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return 0, err
+ }
+ }
+
+ return size, nil
+}
+
+// ApplyLayer parses a diff in the standard layer format from `layer`,
+// and applies it to the directory `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyLayer(dest string, layer io.Reader) (int64, error) {
+ return applyLayerHandler(dest, layer, &TarOptions{}, true)
+}
+
+// ApplyUncompressedLayer parses a diff in the standard layer format from
+// `layer`, and applies it to the directory `dest`. The stream `layer`
+// can only be uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
+ return applyLayerHandler(dest, layer, options, false)
+}
+
+// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
+func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
+ dest = filepath.Clean(dest)
+
+ // We need to be able to set any perms
+ if runtime.GOOS != "windows" {
+ oldmask, err := system.Umask(0)
+ if err != nil {
+ return 0, err
+ }
+ defer system.Umask(oldmask)
+ }
+
+ if decompress {
+ decompLayer, err := DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ defer decompLayer.Close()
+ layer = decompLayer
+ }
+ return UnpackLayer(dest, layer, options)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
new file mode 100644
index 00000000000..797143ee84d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
@@ -0,0 +1,16 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ if time.IsZero() {
+ // Return UTIME_OMIT special value
+ ts.Sec = 0
+ ts.Nsec = (1 << 30) - 2
+ return
+ }
+ return syscall.NsecToTimespec(time.UnixNano())
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
new file mode 100644
index 00000000000..f58bf227fd3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
@@ -0,0 +1,16 @@
+// +build !linux
+
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ nsec := int64(0)
+ if !time.IsZero() {
+ nsec = time.UnixNano()
+ }
+ return syscall.NsecToTimespec(nsec)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
new file mode 100644
index 00000000000..4c072a87ee5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
@@ -0,0 +1,23 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+// Whiteouts are files with a special meaning for the layered filesystem.
+// Docker uses AUFS whiteout files inside exported archives. In other
+// filesystems these files are generated/handled on tar creation/extraction.
+
+// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
+// filename this means that file has been removed from the base layer.
+const WhiteoutPrefix = ".wh."
+
+// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
+// for removing an actual file. Normally these files are excluded from exported
+// archives.
+const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
+
+// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
+// layers. Normally these should not go into exported archives and all changed
+// hardlinks should be copied to the top layer.
+const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
+
+// WhiteoutOpaqueDir file means directory has been made opaque - meaning
+// readdir calls to this directory do not follow to lower layers.
+const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go
new file mode 100644
index 00000000000..85435694cff
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go
@@ -0,0 +1,59 @@
+package archive // import "github.com/docker/docker/pkg/archive"
+
+import (
+ "archive/tar"
+ "bytes"
+ "io"
+)
+
+// Generate generates a new archive from the content provided
+// as input.
+//
+// `files` is a sequence of path/content pairs. A new file is
+// added to the archive for each pair.
+// If the last pair is incomplete, the file is created with an
+// empty content. For example:
+//
+// Generate("foo.txt", "hello world", "emptyfile")
+//
+// The above call will return an archive with 2 files:
+// * ./foo.txt with content "hello world"
+// * ./empty with empty content
+//
+// FIXME: stream content instead of buffering
+// FIXME: specify permissions and other archive metadata
+func Generate(input ...string) (io.Reader, error) {
+ files := parseStringPairs(input...)
+ buf := new(bytes.Buffer)
+ tw := tar.NewWriter(buf)
+ for _, file := range files {
+ name, content := file[0], file[1]
+ hdr := &tar.Header{
+ Name: name,
+ Size: int64(len(content)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return nil, err
+ }
+ if _, err := tw.Write([]byte(content)); err != nil {
+ return nil, err
+ }
+ }
+ if err := tw.Close(); err != nil {
+ return nil, err
+ }
+ return buf, nil
+}
+
+func parseStringPairs(input ...string) (output [][2]string) {
+ output = make([][2]string, 0, len(input)/2+1)
+ for i := 0; i < len(input); i += 2 {
+ var pair [2]string
+ pair[0] = input[i]
+ if i+1 < len(input) {
+ pair[1] = input[i+1]
+ }
+ output = append(output, pair)
+ }
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
new file mode 100644
index 00000000000..25a57b231e0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
@@ -0,0 +1,241 @@
+package idtools // import "github.com/docker/docker/pkg/idtools"
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// IDMap contains a single entry for user namespace range remapping. An array
+// of IDMap entries represents the structure that will be provided to the Linux
+// kernel for creating a user namespace.
+type IDMap struct {
+ ContainerID int `json:"container_id"`
+ HostID int `json:"host_id"`
+ Size int `json:"size"`
+}
+
+type subIDRange struct {
+ Start int
+ Length int
+}
+
+type ranges []subIDRange
+
+func (e ranges) Len() int { return len(e) }
+func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
+func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
+
+const (
+ subuidFileName = "/etc/subuid"
+ subgidFileName = "/etc/subgid"
+)
+
+// MkdirAllAndChown creates a directory (include any along the path) and then modifies
+// ownership to the requested uid/gid. If the directory already exists, this
+// function will still change ownership and permissions.
+func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error {
+ return mkdirAs(path, mode, owner, true, true)
+}
+
+// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
+// If the directory already exists, this function still changes ownership and permissions.
+// Note that unlike os.Mkdir(), this function does not return IsExist error
+// in case path already exists.
+func MkdirAndChown(path string, mode os.FileMode, owner Identity) error {
+ return mkdirAs(path, mode, owner, false, true)
+}
+
+// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
+// ownership ONLY of newly created directories to the requested uid/gid. If the
+// directories along the path exist, no change of ownership or permissions will be performed
+func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error {
+ return mkdirAs(path, mode, owner, true, false)
+}
+
+// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
+// If the maps are empty, then the root uid/gid will default to "real" 0/0
+func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
+ uid, err := toHost(0, uidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ gid, err := toHost(0, gidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ return uid, gid, nil
+}
+
+// toContainer takes an id mapping, and uses it to translate a
+// host ID to the remapped ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id
+func toContainer(hostID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return hostID, nil
+ }
+ for _, m := range idMap {
+ if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
+ contID := m.ContainerID + (hostID - m.HostID)
+ return contID, nil
+ }
+ }
+ return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
+}
+
+// toHost takes an id mapping and a remapped ID, and translates the
+// ID to the mapped host ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id #
+func toHost(contID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return contID, nil
+ }
+ for _, m := range idMap {
+ if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
+ hostID := m.HostID + (contID - m.ContainerID)
+ return hostID, nil
+ }
+ }
+ return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
+}
+
+// Identity is either a UID and GID pair or a SID (but not both)
+type Identity struct {
+ UID int
+ GID int
+ SID string
+}
+
+// IdentityMapping contains a mappings of UIDs and GIDs
+type IdentityMapping struct {
+ uids []IDMap
+ gids []IDMap
+}
+
+// NewIDMappingsFromMaps creates a new mapping from two slices
+// Deprecated: this is a temporary shim while transitioning to IDMapping
+func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IdentityMapping {
+ return &IdentityMapping{uids: uids, gids: gids}
+}
+
+// RootPair returns a uid and gid pair for the root user. The error is ignored
+// because a root user always exists, and the defaults are correct when the uid
+// and gid maps are empty.
+func (i *IdentityMapping) RootPair() Identity {
+ uid, gid, _ := GetRootUIDGID(i.uids, i.gids)
+ return Identity{UID: uid, GID: gid}
+}
+
+// ToHost returns the host UID and GID for the container uid, gid.
+// Remapping is only performed if the ids aren't already the remapped root ids
+func (i *IdentityMapping) ToHost(pair Identity) (Identity, error) {
+ var err error
+ target := i.RootPair()
+
+ if pair.UID != target.UID {
+ target.UID, err = toHost(pair.UID, i.uids)
+ if err != nil {
+ return target, err
+ }
+ }
+
+ if pair.GID != target.GID {
+ target.GID, err = toHost(pair.GID, i.gids)
+ }
+ return target, err
+}
+
+// ToContainer returns the container UID and GID for the host uid and gid
+func (i *IdentityMapping) ToContainer(pair Identity) (int, int, error) {
+ uid, err := toContainer(pair.UID, i.uids)
+ if err != nil {
+ return -1, -1, err
+ }
+ gid, err := toContainer(pair.GID, i.gids)
+ return uid, gid, err
+}
+
+// Empty returns true if there are no id mappings
+func (i *IdentityMapping) Empty() bool {
+ return len(i.uids) == 0 && len(i.gids) == 0
+}
+
+// UIDs return the UID mapping
+// TODO: remove this once everything has been refactored to use pairs
+func (i *IdentityMapping) UIDs() []IDMap {
+ return i.uids
+}
+
+// GIDs return the UID mapping
+// TODO: remove this once everything has been refactored to use pairs
+func (i *IdentityMapping) GIDs() []IDMap {
+ return i.gids
+}
+
+func createIDMap(subidRanges ranges) []IDMap {
+ idMap := []IDMap{}
+
+ containerID := 0
+ for _, idrange := range subidRanges {
+ idMap = append(idMap, IDMap{
+ ContainerID: containerID,
+ HostID: idrange.Start,
+ Size: idrange.Length,
+ })
+ containerID = containerID + idrange.Length
+ }
+ return idMap
+}
+
+func parseSubuid(username string) (ranges, error) {
+ return parseSubidFile(subuidFileName, username)
+}
+
+func parseSubgid(username string) (ranges, error) {
+ return parseSubidFile(subgidFileName, username)
+}
+
+// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
+// and return all found ranges for a specified username. If the special value
+// "ALL" is supplied for username, then all ranges in the file will be returned
+func parseSubidFile(path, username string) (ranges, error) {
+ var rangeList ranges
+
+ subidFile, err := os.Open(path)
+ if err != nil {
+ return rangeList, err
+ }
+ defer subidFile.Close()
+
+ s := bufio.NewScanner(subidFile)
+ for s.Scan() {
+ text := strings.TrimSpace(s.Text())
+ if text == "" || strings.HasPrefix(text, "#") {
+ continue
+ }
+ parts := strings.Split(text, ":")
+ if len(parts) != 3 {
+ return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
+ }
+ if parts[0] == username || username == "ALL" {
+ startid, err := strconv.Atoi(parts[1])
+ if err != nil {
+ return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ }
+ length, err := strconv.Atoi(parts[2])
+ if err != nil {
+ return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ }
+ rangeList = append(rangeList, subIDRange{startid, length})
+ }
+ }
+
+ return rangeList, s.Err()
+}
+
+// CurrentIdentity returns the identity of the current process
+func CurrentIdentity() Identity {
+ return Identity{UID: os.Getuid(), GID: os.Getegid()}
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
new file mode 100644
index 00000000000..e7d25ee4713
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
@@ -0,0 +1,295 @@
+// +build !windows
+
+package idtools // import "github.com/docker/docker/pkg/idtools"
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strconv"
+ "sync"
+ "syscall"
+
+ "github.com/docker/docker/pkg/system"
+ "github.com/opencontainers/runc/libcontainer/user"
+ "github.com/pkg/errors"
+)
+
+var (
+ entOnce sync.Once
+ getentCmd string
+)
+
+func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error {
+ // make an array containing the original path asked for, plus (for mkAll == true)
+ // all path components leading up to the complete path that don't exist before we MkdirAll
+ // so that we can chown all of them properly at the end. If chownExisting is false, we won't
+ // chown the full directory path if it exists
+
+ var paths []string
+
+ stat, err := system.Stat(path)
+ if err == nil {
+ if !stat.IsDir() {
+ return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
+ }
+ if !chownExisting {
+ return nil
+ }
+
+ // short-circuit--we were called with an existing directory and chown was requested
+ return setPermissions(path, mode, owner.UID, owner.GID, stat)
+ }
+
+ if os.IsNotExist(err) {
+ paths = []string{path}
+ }
+
+ if mkAll {
+ // walk back to "/" looking for directories which do not exist
+ // and add them to the paths array for chown after creation
+ dirPath := path
+ for {
+ dirPath = filepath.Dir(dirPath)
+ if dirPath == "/" {
+ break
+ }
+ if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
+ paths = append(paths, dirPath)
+ }
+ }
+ if err := system.MkdirAll(path, mode); err != nil {
+ return err
+ }
+ } else {
+ if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
+ return err
+ }
+ }
+ // even if it existed, we will chown the requested path + any subpaths that
+ // didn't exist when we called MkdirAll
+ for _, pathComponent := range paths {
+ if err := setPermissions(pathComponent, mode, owner.UID, owner.GID, nil); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
+// if that uid, gid pair has access (execute bit) to the directory
+func CanAccess(path string, pair Identity) bool {
+ statInfo, err := system.Stat(path)
+ if err != nil {
+ return false
+ }
+ fileMode := os.FileMode(statInfo.Mode())
+ permBits := fileMode.Perm()
+ return accessible(statInfo.UID() == uint32(pair.UID),
+ statInfo.GID() == uint32(pair.GID), permBits)
+}
+
+func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
+ if isOwner && (perms&0100 == 0100) {
+ return true
+ }
+ if isGroup && (perms&0010 == 0010) {
+ return true
+ }
+ if perms&0001 == 0001 {
+ return true
+ }
+ return false
+}
+
+// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupUser(name string) (user.User, error) {
+ // first try a local system files lookup using existing capabilities
+ usr, err := user.LookupUser(name)
+ if err == nil {
+ return usr, nil
+ }
+ // local files lookup failed; attempt to call `getent` to query configured passwd dbs
+ usr, err = getentUser(name)
+ if err != nil {
+ return user.User{}, err
+ }
+ return usr, nil
+}
+
+// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupUID(uid int) (user.User, error) {
+ // first try a local system files lookup using existing capabilities
+ usr, err := user.LookupUid(uid)
+ if err == nil {
+ return usr, nil
+ }
+ // local files lookup failed; attempt to call `getent` to query configured passwd dbs
+ return getentUser(strconv.Itoa(uid))
+}
+
+func getentUser(name string) (user.User, error) {
+ reader, err := callGetent("passwd", name)
+ if err != nil {
+ return user.User{}, err
+ }
+ users, err := user.ParsePasswd(reader)
+ if err != nil {
+ return user.User{}, err
+ }
+ if len(users) == 0 {
+ return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name)
+ }
+ return users[0], nil
+}
+
+// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupGroup(name string) (user.Group, error) {
+ // first try a local system files lookup using existing capabilities
+ group, err := user.LookupGroup(name)
+ if err == nil {
+ return group, nil
+ }
+ // local files lookup failed; attempt to call `getent` to query configured group dbs
+ return getentGroup(name)
+}
+
+// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupGID(gid int) (user.Group, error) {
+ // first try a local system files lookup using existing capabilities
+ group, err := user.LookupGid(gid)
+ if err == nil {
+ return group, nil
+ }
+ // local files lookup failed; attempt to call `getent` to query configured group dbs
+ return getentGroup(strconv.Itoa(gid))
+}
+
+func getentGroup(name string) (user.Group, error) {
+ reader, err := callGetent("group", name)
+ if err != nil {
+ return user.Group{}, err
+ }
+ groups, err := user.ParseGroup(reader)
+ if err != nil {
+ return user.Group{}, err
+ }
+ if len(groups) == 0 {
+ return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name)
+ }
+ return groups[0], nil
+}
+
+func callGetent(database, key string) (io.Reader, error) {
+ entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") })
+ // if no `getent` command on host, can't do anything else
+ if getentCmd == "" {
+ return nil, fmt.Errorf("unable to find getent command")
+ }
+ out, err := execCmd(getentCmd, database, key)
+ if err != nil {
+ exitCode, errC := system.GetExitCode(err)
+ if errC != nil {
+ return nil, err
+ }
+ switch exitCode {
+ case 1:
+ return nil, fmt.Errorf("getent reported invalid parameters/database unknown")
+ case 2:
+ return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database)
+ case 3:
+ return nil, fmt.Errorf("getent database doesn't support enumeration")
+ default:
+ return nil, err
+ }
+
+ }
+ return bytes.NewReader(out), nil
+}
+
+// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested
+// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the
+// dir is on an NFS share, so don't call chown unless we absolutely must.
+// Likewise for setting permissions.
+func setPermissions(p string, mode os.FileMode, uid, gid int, stat *system.StatT) error {
+ if stat == nil {
+ var err error
+ stat, err = system.Stat(p)
+ if err != nil {
+ return err
+ }
+ }
+ if os.FileMode(stat.Mode()).Perm() != mode.Perm() {
+ if err := os.Chmod(p, mode.Perm()); err != nil {
+ return err
+ }
+ }
+ if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) {
+ return nil
+ }
+ return os.Chown(p, uid, gid)
+}
+
+// NewIdentityMapping takes a requested username and
+// using the data from /etc/sub{uid,gid} ranges, creates the
+// proper uid and gid remapping ranges for that user/group pair
+func NewIdentityMapping(name string) (*IdentityMapping, error) {
+ usr, err := LookupUser(name)
+ if err != nil {
+ return nil, fmt.Errorf("Could not get user for username %s: %v", name, err)
+ }
+
+ subuidRanges, err := lookupSubUIDRanges(usr)
+ if err != nil {
+ return nil, err
+ }
+ subgidRanges, err := lookupSubGIDRanges(usr)
+ if err != nil {
+ return nil, err
+ }
+
+ return &IdentityMapping{
+ uids: subuidRanges,
+ gids: subgidRanges,
+ }, nil
+}
+
+func lookupSubUIDRanges(usr user.User) ([]IDMap, error) {
+ rangeList, err := parseSubuid(strconv.Itoa(usr.Uid))
+ if err != nil {
+ return nil, err
+ }
+ if len(rangeList) == 0 {
+ rangeList, err = parseSubuid(usr.Name)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if len(rangeList) == 0 {
+ return nil, errors.Errorf("no subuid ranges found for user %q", usr.Name)
+ }
+ return createIDMap(rangeList), nil
+}
+
+func lookupSubGIDRanges(usr user.User) ([]IDMap, error) {
+ rangeList, err := parseSubgid(strconv.Itoa(usr.Uid))
+ if err != nil {
+ return nil, err
+ }
+ if len(rangeList) == 0 {
+ rangeList, err = parseSubgid(usr.Name)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if len(rangeList) == 0 {
+ return nil, errors.Errorf("no subgid ranges found for user %q", usr.Name)
+ }
+ return createIDMap(rangeList), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
new file mode 100644
index 00000000000..0f5aadd4961
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
@@ -0,0 +1,34 @@
+package idtools // import "github.com/docker/docker/pkg/idtools"
+
+import (
+ "os"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+const (
+ SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege"
+)
+
+const (
+ ContainerAdministratorSidString = "S-1-5-93-2-1"
+ ContainerUserSidString = "S-1-5-93-2-2"
+)
+
+// This is currently a wrapper around MkdirAll, however, since currently
+// permissions aren't set through this path, the identity isn't utilized.
+// Ownership is handled elsewhere, but in the future could be support here
+// too.
+func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error {
+ if err := system.MkdirAll(path, mode); err != nil {
+ return err
+ }
+ return nil
+}
+
+// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
+// if that uid, gid pair has access (execute bit) to the directory
+// Windows does not require/support this function, so always return true
+func CanAccess(path string, identity Identity) bool {
+ return true
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
new file mode 100644
index 00000000000..bf7ae0564ba
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
@@ -0,0 +1,164 @@
+package idtools // import "github.com/docker/docker/pkg/idtools"
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// add a user and/or group to Linux /etc/passwd, /etc/group using standard
+// Linux distribution commands:
+// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group
+// useradd -r -s /bin/false
+
+var (
+ once sync.Once
+ userCommand string
+ idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
+)
+
+const (
+ // default length for a UID/GID subordinate range
+ defaultRangeLen = 65536
+ defaultRangeStart = 100000
+)
+
+// AddNamespaceRangesUser takes a username and uses the standard system
+// utility to create a system user/group pair used to hold the
+// /etc/sub{uid,gid} ranges which will be used for user namespace
+// mapping ranges in containers.
+func AddNamespaceRangesUser(name string) (int, int, error) {
+ if err := addUser(name); err != nil {
+ return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
+ }
+
+ // Query the system for the created uid and gid pair
+ out, err := execCmd("id", name)
+ if err != nil {
+ return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
+ }
+ matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
+ if len(matches) != 3 {
+ return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
+ }
+ uid, err := strconv.Atoi(matches[1])
+ if err != nil {
+ return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
+ }
+ gid, err := strconv.Atoi(matches[2])
+ if err != nil {
+ return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
+ }
+
+ // Now we need to create the subuid/subgid ranges for our new user/group (system users
+ // do not get auto-created ranges in subuid/subgid)
+
+ if err := createSubordinateRanges(name); err != nil {
+ return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
+ }
+ return uid, gid, nil
+}
+
+func addUser(name string) error {
+ once.Do(func() {
+ // set up which commands are used for adding users/groups dependent on distro
+ if _, err := resolveBinary("adduser"); err == nil {
+ userCommand = "adduser"
+ } else if _, err := resolveBinary("useradd"); err == nil {
+ userCommand = "useradd"
+ }
+ })
+ var args []string
+ switch userCommand {
+ case "adduser":
+ args = []string{"--system", "--shell", "/bin/false", "--no-create-home", "--disabled-login", "--disabled-password", "--group", name}
+ case "useradd":
+ args = []string{"-r", "-s", "/bin/false", name}
+ default:
+ return fmt.Errorf("cannot add user; no useradd/adduser binary found")
+ }
+
+ if out, err := execCmd(userCommand, args...); err != nil {
+ return fmt.Errorf("failed to add user with error: %v; output: %q", err, string(out))
+ }
+ return nil
+}
+
+func createSubordinateRanges(name string) error {
+
+ // first, we should verify that ranges weren't automatically created
+ // by the distro tooling
+ ranges, err := parseSubuid(name)
+ if err != nil {
+ return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
+ }
+ if len(ranges) == 0 {
+ // no UID ranges; let's create one
+ startID, err := findNextUIDRange()
+ if err != nil {
+ return fmt.Errorf("Can't find available subuid range: %v", err)
+ }
+ out, err := execCmd("usermod", "-v", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name)
+ if err != nil {
+ return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
+ }
+ }
+
+ ranges, err = parseSubgid(name)
+ if err != nil {
+ return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
+ }
+ if len(ranges) == 0 {
+ // no GID ranges; let's create one
+ startID, err := findNextGIDRange()
+ if err != nil {
+ return fmt.Errorf("Can't find available subgid range: %v", err)
+ }
+ out, err := execCmd("usermod", "-w", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name)
+ if err != nil {
+ return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
+ }
+ }
+ return nil
+}
+
+func findNextUIDRange() (int, error) {
+ ranges, err := parseSubuid("ALL")
+ if err != nil {
+ return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
+ }
+ sort.Sort(ranges)
+ return findNextRangeStart(ranges)
+}
+
+func findNextGIDRange() (int, error) {
+ ranges, err := parseSubgid("ALL")
+ if err != nil {
+ return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
+ }
+ sort.Sort(ranges)
+ return findNextRangeStart(ranges)
+}
+
+func findNextRangeStart(rangeList ranges) (int, error) {
+ startID := defaultRangeStart
+ for _, arange := range rangeList {
+ if wouldOverlap(arange, startID) {
+ startID = arange.Start + arange.Length
+ }
+ }
+ return startID, nil
+}
+
+func wouldOverlap(arange subIDRange, ID int) bool {
+ low := ID
+ high := ID + defaultRangeLen
+ if (low >= arange.Start && low <= arange.Start+arange.Length) ||
+ (high <= arange.Start+arange.Length && high >= arange.Start) {
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
new file mode 100644
index 00000000000..e7c4d63118c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
@@ -0,0 +1,12 @@
+// +build !linux
+
+package idtools // import "github.com/docker/docker/pkg/idtools"
+
+import "fmt"
+
+// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
+// and calls the appropriate helper function to add the group and then
+// the user to the group in /etc/group and /etc/passwd respectively.
+func AddNamespaceRangesUser(name string) (int, int, error) {
+ return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
new file mode 100644
index 00000000000..1e2d4a7a75a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
@@ -0,0 +1,31 @@
+// +build !windows
+
+package idtools // import "github.com/docker/docker/pkg/idtools"
+
+import (
+ "fmt"
+ "os/exec"
+ "path/filepath"
+)
+
+func resolveBinary(binname string) (string, error) {
+ binaryPath, err := exec.LookPath(binname)
+ if err != nil {
+ return "", err
+ }
+ resolvedPath, err := filepath.EvalSymlinks(binaryPath)
+ if err != nil {
+ return "", err
+ }
+ // only return no error if the final resolved binary basename
+ // matches what was searched for
+ if filepath.Base(resolvedPath) == binname {
+ return resolvedPath, nil
+ }
+ return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
+}
+
+func execCmd(cmd string, arg ...string) ([]byte, error) {
+ execCmd := exec.Command(cmd, arg...)
+ return execCmd.CombinedOutput()
+}
diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go
new file mode 100644
index 00000000000..3792c67a9e4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/pools/pools.go
@@ -0,0 +1,137 @@
+// Package pools provides a collection of pools which provide various
+// data types with buffers. These can be used to lower the number of
+// memory allocations and reuse buffers.
+//
+// New pools should be added to this package to allow them to be
+// shared across packages.
+//
+// Utility functions which operate on pools should be added to this
+// package to allow them to be reused.
+package pools // import "github.com/docker/docker/pkg/pools"
+
+import (
+ "bufio"
+ "io"
+ "sync"
+
+ "github.com/docker/docker/pkg/ioutils"
+)
+
+const buffer32K = 32 * 1024
+
+var (
+ // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
+ BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
+ // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
+ BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
+ buffer32KPool = newBufferPoolWithSize(buffer32K)
+)
+
+// BufioReaderPool is a bufio reader that uses sync.Pool.
+type BufioReaderPool struct {
+ pool sync.Pool
+}
+
+// newBufioReaderPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
+ return &BufioReaderPool{
+ pool: sync.Pool{
+ New: func() interface{} { return bufio.NewReaderSize(nil, size) },
+ },
+ }
+}
+
+// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
+func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
+ buf := bufPool.pool.Get().(*bufio.Reader)
+ buf.Reset(r)
+ return buf
+}
+
+// Put puts the bufio.Reader back into the pool.
+func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+type bufferPool struct {
+ pool sync.Pool
+}
+
+func newBufferPoolWithSize(size int) *bufferPool {
+ return &bufferPool{
+ pool: sync.Pool{
+ New: func() interface{} { s := make([]byte, size); return &s },
+ },
+ }
+}
+
+func (bp *bufferPool) Get() *[]byte {
+ return bp.pool.Get().(*[]byte)
+}
+
+func (bp *bufferPool) Put(b *[]byte) {
+ bp.pool.Put(b)
+}
+
+// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
+func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
+ buf := buffer32KPool.Get()
+ written, err = io.CopyBuffer(dst, src, *buf)
+ buffer32KPool.Put(buf)
+ return
+}
+
+// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
+// into the pool and closes the reader if it's an io.ReadCloser.
+func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
+ return ioutils.NewReadCloserWrapper(r, func() error {
+ if readCloser, ok := r.(io.ReadCloser); ok {
+ readCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
+
+// BufioWriterPool is a bufio writer that uses sync.Pool.
+type BufioWriterPool struct {
+ pool sync.Pool
+}
+
+// newBufioWriterPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
+ return &BufioWriterPool{
+ pool: sync.Pool{
+ New: func() interface{} { return bufio.NewWriterSize(nil, size) },
+ },
+ }
+}
+
+// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
+func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
+ buf := bufPool.pool.Get().(*bufio.Writer)
+ buf.Reset(w)
+ return buf
+}
+
+// Put puts the bufio.Writer back into the pool.
+func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
+// into the pool and closes the writer if it's an io.Writecloser.
+func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
+ return ioutils.NewWriteCloserWrapper(w, func() error {
+ buf.Flush()
+ if writeCloser, ok := w.(io.WriteCloser); ok {
+ writeCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
diff --git a/vendor/github.com/opencontainers/runc/LICENSE b/vendor/github.com/opencontainers/runc/LICENSE
new file mode 100644
index 00000000000..27448585ad4
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/opencontainers/runc/NOTICE b/vendor/github.com/opencontainers/runc/NOTICE
new file mode 100644
index 00000000000..5c97abce4b9
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/NOTICE
@@ -0,0 +1,17 @@
+runc
+
+Copyright 2012-2015 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (http://www.docker.com).
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see http://www.bis.doc.gov
+
+See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
new file mode 100644
index 00000000000..967717a1b1c
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
@@ -0,0 +1,156 @@
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package user
+
+import (
+ "io"
+ "os"
+ "strconv"
+
+ "golang.org/x/sys/unix"
+)
+
+// Unix-specific path to the passwd and group formatted files.
+const (
+ unixPasswdPath = "/etc/passwd"
+ unixGroupPath = "/etc/group"
+)
+
+// LookupUser looks up a user by their username in /etc/passwd. If the user
+// cannot be found (or there is no /etc/passwd file on the filesystem), then
+// LookupUser returns an error.
+func LookupUser(username string) (User, error) {
+ return lookupUserFunc(func(u User) bool {
+ return u.Name == username
+ })
+}
+
+// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot
+// be found (or there is no /etc/passwd file on the filesystem), then LookupId
+// returns an error.
+func LookupUid(uid int) (User, error) {
+ return lookupUserFunc(func(u User) bool {
+ return u.Uid == uid
+ })
+}
+
+func lookupUserFunc(filter func(u User) bool) (User, error) {
+ // Get operating system-specific passwd reader-closer.
+ passwd, err := GetPasswd()
+ if err != nil {
+ return User{}, err
+ }
+ defer passwd.Close()
+
+ // Get the users.
+ users, err := ParsePasswdFilter(passwd, filter)
+ if err != nil {
+ return User{}, err
+ }
+
+ // No user entries found.
+ if len(users) == 0 {
+ return User{}, ErrNoPasswdEntries
+ }
+
+ // Assume the first entry is the "correct" one.
+ return users[0], nil
+}
+
+// LookupGroup looks up a group by its name in /etc/group. If the group cannot
+// be found (or there is no /etc/group file on the filesystem), then LookupGroup
+// returns an error.
+func LookupGroup(groupname string) (Group, error) {
+ return lookupGroupFunc(func(g Group) bool {
+ return g.Name == groupname
+ })
+}
+
+// LookupGid looks up a group by its group id in /etc/group. If the group cannot
+// be found (or there is no /etc/group file on the filesystem), then LookupGid
+// returns an error.
+func LookupGid(gid int) (Group, error) {
+ return lookupGroupFunc(func(g Group) bool {
+ return g.Gid == gid
+ })
+}
+
+func lookupGroupFunc(filter func(g Group) bool) (Group, error) {
+ // Get operating system-specific group reader-closer.
+ group, err := GetGroup()
+ if err != nil {
+ return Group{}, err
+ }
+ defer group.Close()
+
+ // Get the users.
+ groups, err := ParseGroupFilter(group, filter)
+ if err != nil {
+ return Group{}, err
+ }
+
+ // No user entries found.
+ if len(groups) == 0 {
+ return Group{}, ErrNoGroupEntries
+ }
+
+ // Assume the first entry is the "correct" one.
+ return groups[0], nil
+}
+
+func GetPasswdPath() (string, error) {
+ return unixPasswdPath, nil
+}
+
+func GetPasswd() (io.ReadCloser, error) {
+ return os.Open(unixPasswdPath)
+}
+
+func GetGroupPath() (string, error) {
+ return unixGroupPath, nil
+}
+
+func GetGroup() (io.ReadCloser, error) {
+ return os.Open(unixGroupPath)
+}
+
+// CurrentUser looks up the current user by their user id in /etc/passwd. If the
+// user cannot be found (or there is no /etc/passwd file on the filesystem),
+// then CurrentUser returns an error.
+func CurrentUser() (User, error) {
+ return LookupUid(unix.Getuid())
+}
+
+// CurrentGroup looks up the current user's group by their primary group id's
+// entry in /etc/passwd. If the group cannot be found (or there is no
+// /etc/group file on the filesystem), then CurrentGroup returns an error.
+func CurrentGroup() (Group, error) {
+ return LookupGid(unix.Getgid())
+}
+
+func currentUserSubIDs(fileName string) ([]SubID, error) {
+ u, err := CurrentUser()
+ if err != nil {
+ return nil, err
+ }
+ filter := func(entry SubID) bool {
+ return entry.Name == u.Name || entry.Name == strconv.Itoa(u.Uid)
+ }
+ return ParseSubIDFileFilter(fileName, filter)
+}
+
+func CurrentUserSubUIDs() ([]SubID, error) {
+ return currentUserSubIDs("/etc/subuid")
+}
+
+func CurrentUserSubGIDs() ([]SubID, error) {
+ return currentUserSubIDs("/etc/subgid")
+}
+
+func CurrentProcessUIDMap() ([]IDMap, error) {
+ return ParseIDMapFile("/proc/self/uid_map")
+}
+
+func CurrentProcessGIDMap() ([]IDMap, error) {
+ return ParseIDMapFile("/proc/self/gid_map")
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go
new file mode 100644
index 00000000000..cc7a106be5d
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go
@@ -0,0 +1,604 @@
+package user
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+const (
+ minID = 0
+ maxID = 1<<31 - 1 // for 32-bit systems compatibility
+)
+
+var (
+ // ErrNoPasswdEntries is returned if no matching entries were found in /etc/group.
+ ErrNoPasswdEntries = errors.New("no matching entries in passwd file")
+ // ErrNoGroupEntries is returned if no matching entries were found in /etc/passwd.
+ ErrNoGroupEntries = errors.New("no matching entries in group file")
+ // ErrRange is returned if a UID or GID is outside of the valid range.
+ ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minID, maxID)
+)
+
+type User struct {
+ Name string
+ Pass string
+ Uid int
+ Gid int
+ Gecos string
+ Home string
+ Shell string
+}
+
+type Group struct {
+ Name string
+ Pass string
+ Gid int
+ List []string
+}
+
+// SubID represents an entry in /etc/sub{u,g}id
+type SubID struct {
+ Name string
+ SubID int64
+ Count int64
+}
+
+// IDMap represents an entry in /proc/PID/{u,g}id_map
+type IDMap struct {
+ ID int64
+ ParentID int64
+ Count int64
+}
+
+func parseLine(line []byte, v ...interface{}) {
+ parseParts(bytes.Split(line, []byte(":")), v...)
+}
+
+func parseParts(parts [][]byte, v ...interface{}) {
+ if len(parts) == 0 {
+ return
+ }
+
+ for i, p := range parts {
+ // Ignore cases where we don't have enough fields to populate the arguments.
+ // Some configuration files like to misbehave.
+ if len(v) <= i {
+ break
+ }
+
+ // Use the type of the argument to figure out how to parse it, scanf() style.
+ // This is legit.
+ switch e := v[i].(type) {
+ case *string:
+ *e = string(p)
+ case *int:
+ // "numbers", with conversion errors ignored because of some misbehaving configuration files.
+ *e, _ = strconv.Atoi(string(p))
+ case *int64:
+ *e, _ = strconv.ParseInt(string(p), 10, 64)
+ case *[]string:
+ // Comma-separated lists.
+ if len(p) != 0 {
+ *e = strings.Split(string(p), ",")
+ } else {
+ *e = []string{}
+ }
+ default:
+ // Someone goof'd when writing code using this function. Scream so they can hear us.
+ panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e))
+ }
+ }
+}
+
+func ParsePasswdFile(path string) ([]User, error) {
+ passwd, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer passwd.Close()
+ return ParsePasswd(passwd)
+}
+
+func ParsePasswd(passwd io.Reader) ([]User, error) {
+ return ParsePasswdFilter(passwd, nil)
+}
+
+func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) {
+ passwd, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer passwd.Close()
+ return ParsePasswdFilter(passwd, filter)
+}
+
+func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
+ if r == nil {
+ return nil, fmt.Errorf("nil source for passwd-formatted data")
+ }
+
+ var (
+ s = bufio.NewScanner(r)
+ out = []User{}
+ )
+
+ for s.Scan() {
+ line := bytes.TrimSpace(s.Bytes())
+ if len(line) == 0 {
+ continue
+ }
+
+ // see: man 5 passwd
+ // name:password:UID:GID:GECOS:directory:shell
+ // Name:Pass:Uid:Gid:Gecos:Home:Shell
+ // root:x:0:0:root:/root:/bin/bash
+ // adm:x:3:4:adm:/var/adm:/bin/false
+ p := User{}
+ parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell)
+
+ if filter == nil || filter(p) {
+ out = append(out, p)
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return out, nil
+}
+
+func ParseGroupFile(path string) ([]Group, error) {
+ group, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+
+ defer group.Close()
+ return ParseGroup(group)
+}
+
+func ParseGroup(group io.Reader) ([]Group, error) {
+ return ParseGroupFilter(group, nil)
+}
+
+func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) {
+ group, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer group.Close()
+ return ParseGroupFilter(group, filter)
+}
+
+func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
+ if r == nil {
+ return nil, fmt.Errorf("nil source for group-formatted data")
+ }
+ rd := bufio.NewReader(r)
+ out := []Group{}
+
+ // Read the file line-by-line.
+ for {
+ var (
+ isPrefix bool
+ wholeLine []byte
+ err error
+ )
+
+ // Read the next line. We do so in chunks (as much as reader's
+ // buffer is able to keep), check if we read enough columns
+ // already on each step and store final result in wholeLine.
+ for {
+ var line []byte
+ line, isPrefix, err = rd.ReadLine()
+
+ if err != nil {
+ // We should return no error if EOF is reached
+ // without a match.
+ if err == io.EOF { //nolint:errorlint // comparison with io.EOF is legit, https://github.com/polyfloyd/go-errorlint/pull/12
+ err = nil
+ }
+ return out, err
+ }
+
+ // Simple common case: line is short enough to fit in a
+ // single reader's buffer.
+ if !isPrefix && len(wholeLine) == 0 {
+ wholeLine = line
+ break
+ }
+
+ wholeLine = append(wholeLine, line...)
+
+ // Check if we read the whole line already.
+ if !isPrefix {
+ break
+ }
+ }
+
+ // There's no spec for /etc/passwd or /etc/group, but we try to follow
+ // the same rules as the glibc parser, which allows comments and blank
+ // space at the beginning of a line.
+ wholeLine = bytes.TrimSpace(wholeLine)
+ if len(wholeLine) == 0 || wholeLine[0] == '#' {
+ continue
+ }
+
+ // see: man 5 group
+ // group_name:password:GID:user_list
+ // Name:Pass:Gid:List
+ // root:x:0:root
+ // adm:x:4:root,adm,daemon
+ p := Group{}
+ parseLine(wholeLine, &p.Name, &p.Pass, &p.Gid, &p.List)
+
+ if filter == nil || filter(p) {
+ out = append(out, p)
+ }
+ }
+}
+
+type ExecUser struct {
+ Uid int
+ Gid int
+ Sgids []int
+ Home string
+}
+
+// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the
+// given file paths and uses that data as the arguments to GetExecUser. If the
+// files cannot be opened for any reason, the error is ignored and a nil
+// io.Reader is passed instead.
+func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) {
+ var passwd, group io.Reader
+
+ if passwdFile, err := os.Open(passwdPath); err == nil {
+ passwd = passwdFile
+ defer passwdFile.Close()
+ }
+
+ if groupFile, err := os.Open(groupPath); err == nil {
+ group = groupFile
+ defer groupFile.Close()
+ }
+
+ return GetExecUser(userSpec, defaults, passwd, group)
+}
+
+// GetExecUser parses a user specification string (using the passwd and group
+// readers as sources for /etc/passwd and /etc/group data, respectively). In
+// the case of blank fields or missing data from the sources, the values in
+// defaults is used.
+//
+// GetExecUser will return an error if a user or group literal could not be
+// found in any entry in passwd and group respectively.
+//
+// Examples of valid user specifications are:
+// * ""
+// * "user"
+// * "uid"
+// * "user:group"
+// * "uid:gid
+// * "user:gid"
+// * "uid:group"
+//
+// It should be noted that if you specify a numeric user or group id, they will
+// not be evaluated as usernames (only the metadata will be filled). So attempting
+// to parse a user with user.Name = "1337" will produce the user with a UID of
+// 1337.
+func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) {
+ if defaults == nil {
+ defaults = new(ExecUser)
+ }
+
+ // Copy over defaults.
+ user := &ExecUser{
+ Uid: defaults.Uid,
+ Gid: defaults.Gid,
+ Sgids: defaults.Sgids,
+ Home: defaults.Home,
+ }
+
+ // Sgids slice *cannot* be nil.
+ if user.Sgids == nil {
+ user.Sgids = []int{}
+ }
+
+ // Allow for userArg to have either "user" syntax, or optionally "user:group" syntax
+ var userArg, groupArg string
+ parseLine([]byte(userSpec), &userArg, &groupArg)
+
+ // Convert userArg and groupArg to be numeric, so we don't have to execute
+ // Atoi *twice* for each iteration over lines.
+ uidArg, uidErr := strconv.Atoi(userArg)
+ gidArg, gidErr := strconv.Atoi(groupArg)
+
+ // Find the matching user.
+ users, err := ParsePasswdFilter(passwd, func(u User) bool {
+ if userArg == "" {
+ // Default to current state of the user.
+ return u.Uid == user.Uid
+ }
+
+ if uidErr == nil {
+ // If the userArg is numeric, always treat it as a UID.
+ return uidArg == u.Uid
+ }
+
+ return u.Name == userArg
+ })
+
+ // If we can't find the user, we have to bail.
+ if err != nil && passwd != nil {
+ if userArg == "" {
+ userArg = strconv.Itoa(user.Uid)
+ }
+ return nil, fmt.Errorf("unable to find user %s: %v", userArg, err)
+ }
+
+ var matchedUserName string
+ if len(users) > 0 {
+ // First match wins, even if there's more than one matching entry.
+ matchedUserName = users[0].Name
+ user.Uid = users[0].Uid
+ user.Gid = users[0].Gid
+ user.Home = users[0].Home
+ } else if userArg != "" {
+ // If we can't find a user with the given username, the only other valid
+ // option is if it's a numeric username with no associated entry in passwd.
+
+ if uidErr != nil {
+ // Not numeric.
+ return nil, fmt.Errorf("unable to find user %s: %v", userArg, ErrNoPasswdEntries)
+ }
+ user.Uid = uidArg
+
+ // Must be inside valid uid range.
+ if user.Uid < minID || user.Uid > maxID {
+ return nil, ErrRange
+ }
+
+ // Okay, so it's numeric. We can just roll with this.
+ }
+
+ // On to the groups. If we matched a username, we need to do this because of
+ // the supplementary group IDs.
+ if groupArg != "" || matchedUserName != "" {
+ groups, err := ParseGroupFilter(group, func(g Group) bool {
+ // If the group argument isn't explicit, we'll just search for it.
+ if groupArg == "" {
+ // Check if user is a member of this group.
+ for _, u := range g.List {
+ if u == matchedUserName {
+ return true
+ }
+ }
+ return false
+ }
+
+ if gidErr == nil {
+ // If the groupArg is numeric, always treat it as a GID.
+ return gidArg == g.Gid
+ }
+
+ return g.Name == groupArg
+ })
+ if err != nil && group != nil {
+ return nil, fmt.Errorf("unable to find groups for spec %v: %v", matchedUserName, err)
+ }
+
+ // Only start modifying user.Gid if it is in explicit form.
+ if groupArg != "" {
+ if len(groups) > 0 {
+ // First match wins, even if there's more than one matching entry.
+ user.Gid = groups[0].Gid
+ } else {
+ // If we can't find a group with the given name, the only other valid
+ // option is if it's a numeric group name with no associated entry in group.
+
+ if gidErr != nil {
+ // Not numeric.
+ return nil, fmt.Errorf("unable to find group %s: %v", groupArg, ErrNoGroupEntries)
+ }
+ user.Gid = gidArg
+
+ // Must be inside valid gid range.
+ if user.Gid < minID || user.Gid > maxID {
+ return nil, ErrRange
+ }
+
+ // Okay, so it's numeric. We can just roll with this.
+ }
+ } else if len(groups) > 0 {
+ // Supplementary group ids only make sense if in the implicit form.
+ user.Sgids = make([]int, len(groups))
+ for i, group := range groups {
+ user.Sgids[i] = group.Gid
+ }
+ }
+ }
+
+ return user, nil
+}
+
+// GetAdditionalGroups looks up a list of groups by name or group id
+// against the given /etc/group formatted data. If a group name cannot
+// be found, an error will be returned. If a group id cannot be found,
+// or the given group data is nil, the id will be returned as-is
+// provided it is in the legal range.
+func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) {
+ groups := []Group{}
+ if group != nil {
+ var err error
+ groups, err = ParseGroupFilter(group, func(g Group) bool {
+ for _, ag := range additionalGroups {
+ if g.Name == ag || strconv.Itoa(g.Gid) == ag {
+ return true
+ }
+ }
+ return false
+ })
+ if err != nil {
+ return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err)
+ }
+ }
+
+ gidMap := make(map[int]struct{})
+ for _, ag := range additionalGroups {
+ var found bool
+ for _, g := range groups {
+ // if we found a matched group either by name or gid, take the
+ // first matched as correct
+ if g.Name == ag || strconv.Itoa(g.Gid) == ag {
+ if _, ok := gidMap[g.Gid]; !ok {
+ gidMap[g.Gid] = struct{}{}
+ found = true
+ break
+ }
+ }
+ }
+ // we asked for a group but didn't find it. let's check to see
+ // if we wanted a numeric group
+ if !found {
+ gid, err := strconv.ParseInt(ag, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("Unable to find group %s", ag)
+ }
+ // Ensure gid is inside gid range.
+ if gid < minID || gid > maxID {
+ return nil, ErrRange
+ }
+ gidMap[int(gid)] = struct{}{}
+ }
+ }
+ gids := []int{}
+ for gid := range gidMap {
+ gids = append(gids, gid)
+ }
+ return gids, nil
+}
+
+// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups
+// that opens the groupPath given and gives it as an argument to
+// GetAdditionalGroups.
+func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) {
+ var group io.Reader
+
+ if groupFile, err := os.Open(groupPath); err == nil {
+ group = groupFile
+ defer groupFile.Close()
+ }
+ return GetAdditionalGroups(additionalGroups, group)
+}
+
+func ParseSubIDFile(path string) ([]SubID, error) {
+ subid, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer subid.Close()
+ return ParseSubID(subid)
+}
+
+func ParseSubID(subid io.Reader) ([]SubID, error) {
+ return ParseSubIDFilter(subid, nil)
+}
+
+func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) {
+ subid, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer subid.Close()
+ return ParseSubIDFilter(subid, filter)
+}
+
+func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) {
+ if r == nil {
+ return nil, fmt.Errorf("nil source for subid-formatted data")
+ }
+
+ var (
+ s = bufio.NewScanner(r)
+ out = []SubID{}
+ )
+
+ for s.Scan() {
+ line := bytes.TrimSpace(s.Bytes())
+ if len(line) == 0 {
+ continue
+ }
+
+ // see: man 5 subuid
+ p := SubID{}
+ parseLine(line, &p.Name, &p.SubID, &p.Count)
+
+ if filter == nil || filter(p) {
+ out = append(out, p)
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return out, nil
+}
+
+func ParseIDMapFile(path string) ([]IDMap, error) {
+ r, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer r.Close()
+ return ParseIDMap(r)
+}
+
+func ParseIDMap(r io.Reader) ([]IDMap, error) {
+ return ParseIDMapFilter(r, nil)
+}
+
+func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) {
+ r, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer r.Close()
+ return ParseIDMapFilter(r, filter)
+}
+
+func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) {
+ if r == nil {
+ return nil, fmt.Errorf("nil source for idmap-formatted data")
+ }
+
+ var (
+ s = bufio.NewScanner(r)
+ out = []IDMap{}
+ )
+
+ for s.Scan() {
+ line := bytes.TrimSpace(s.Bytes())
+ if len(line) == 0 {
+ continue
+ }
+
+ // see: man 7 user_namespaces
+ p := IDMap{}
+ parseParts(bytes.Fields(line), &p.ID, &p.ParentID, &p.Count)
+
+ if filter == nil || filter(p) {
+ out = append(out, p)
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go
new file mode 100644
index 00000000000..8c9bb5df39c
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go
@@ -0,0 +1,42 @@
+// +build gofuzz
+
+package user
+
+import (
+ "io"
+ "strings"
+)
+
+func IsDivisbleBy(n int, divisibleby int) bool {
+ return (n % divisibleby) == 0
+}
+
+func FuzzUser(data []byte) int {
+ if len(data) == 0 {
+ return -1
+ }
+ if !IsDivisbleBy(len(data), 5) {
+ return -1
+ }
+
+ var divided [][]byte
+
+ chunkSize := len(data) / 5
+
+ for i := 0; i < len(data); i += chunkSize {
+ end := i + chunkSize
+
+ divided = append(divided, data[i:end])
+ }
+
+ _, _ = ParsePasswdFilter(strings.NewReader(string(divided[0])), nil)
+
+ var passwd, group io.Reader
+
+ group = strings.NewReader(string(divided[1]))
+ _, _ = GetAdditionalGroups([]string{string(divided[2])}, group)
+
+ passwd = strings.NewReader(string(divided[3]))
+ _, _ = GetExecUser(string(divided[4]), nil, passwd, group)
+ return 1
+}
diff --git a/vendor/github.com/pelletier/go-toml/.dockerignore b/vendor/github.com/pelletier/go-toml/.dockerignore
new file mode 100644
index 00000000000..7b5883475df
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/.dockerignore
@@ -0,0 +1,2 @@
+cmd/tomll/tomll
+cmd/tomljson/tomljson
diff --git a/vendor/github.com/pelletier/go-toml/.gitignore b/vendor/github.com/pelletier/go-toml/.gitignore
new file mode 100644
index 00000000000..e6ba63a5c5c
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/.gitignore
@@ -0,0 +1,5 @@
+test_program/test_program_bin
+fuzz/
+cmd/tomll/tomll
+cmd/tomljson/tomljson
+cmd/tomltestgen/tomltestgen
diff --git a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md
new file mode 100644
index 00000000000..98b9893d378
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md
@@ -0,0 +1,132 @@
+## Contributing
+
+Thank you for your interest in go-toml! We appreciate you considering
+contributing to go-toml!
+
+The main goal is the project is to provide an easy-to-use TOML
+implementation for Go that gets the job done and gets out of your way –
+dealing with TOML is probably not the central piece of your project.
+
+As the single maintainer of go-toml, time is scarce. All help, big or
+small, is more than welcomed!
+
+### Ask questions
+
+Any question you may have, somebody else might have it too. Always feel
+free to ask them on the [issues tracker][issues-tracker]. We will try to
+answer them as clearly and quickly as possible, time permitting.
+
+Asking questions also helps us identify areas where the documentation needs
+improvement, or new features that weren't envisioned before. Sometimes, a
+seemingly innocent question leads to the fix of a bug. Don't hesitate and
+ask away!
+
+### Improve the documentation
+
+The best way to share your knowledge and experience with go-toml is to
+improve the documentation. Fix a typo, clarify an interface, add an
+example, anything goes!
+
+The documentation is present in the [README][readme] and thorough the
+source code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a
+change to the documentation, create a pull request with your proposed
+changes. For simple changes like that, the easiest way to go is probably
+the "Fork this project and edit the file" button on Github, displayed at
+the top right of the file. Unless it's a trivial change (for example a
+typo), provide a little bit of context in your pull request description or
+commit message.
+
+### Report a bug
+
+Found a bug! Sorry to hear that :(. Help us and other track them down and
+fix by reporting it. [File a new bug report][bug-report] on the [issues
+tracker][issues-tracker]. The template should provide enough guidance on
+what to include. When in doubt: add more details! By reducing ambiguity and
+providing more information, it decreases back and forth and saves everyone
+time.
+
+### Code changes
+
+Want to contribute a patch? Very happy to hear that!
+
+First, some high-level rules:
+
+* A short proposal with some POC code is better than a lengthy piece of
+ text with no code. Code speaks louder than words.
+* No backward-incompatible patch will be accepted unless discussed.
+ Sometimes it's hard, and Go's lack of versioning by default does not
+ help, but we try not to break people's programs unless we absolutely have
+ to.
+* If you are writing a new feature or extending an existing one, make sure
+ to write some documentation.
+* Bug fixes need to be accompanied with regression tests.
+* New code needs to be tested.
+* Your commit messages need to explain why the change is needed, even if
+ already included in the PR description.
+
+It does sound like a lot, but those best practices are here to save time
+overall and continuously improve the quality of the project, which is
+something everyone benefits from.
+
+#### Get started
+
+The fairly standard code contribution process looks like that:
+
+1. [Fork the project][fork].
+2. Make your changes, commit on any branch you like.
+3. [Open up a pull request][pull-request]
+4. Review, potential ask for changes.
+5. Merge. You're in!
+
+Feel free to ask for help! You can create draft pull requests to gather
+some early feedback!
+
+#### Run the tests
+
+You can run tests for go-toml using Go's test tool: `go test ./...`.
+When creating a pull requests, all tests will be ran on Linux on a few Go
+versions (Travis CI), and on Windows using the latest Go version
+(AppVeyor).
+
+#### Style
+
+Try to look around and follow the same format and structure as the rest of
+the code. We enforce using `go fmt` on the whole code base.
+
+---
+
+### Maintainers-only
+
+#### Merge pull request
+
+Checklist:
+
+* Passing CI.
+* Does not introduce backward-incompatible changes (unless discussed).
+* Has relevant doc changes.
+* Has relevant unit tests.
+
+1. Merge using "squash and merge".
+2. Make sure to edit the commit message to keep all the useful information
+ nice and clean.
+3. Make sure the commit title is clear and contains the PR number (#123).
+
+#### New release
+
+1. Go to [releases][releases]. Click on "X commits to master since this
+ release".
+2. Make note of all the changes. Look for backward incompatible changes,
+ new features, and bug fixes.
+3. Pick the new version using the above and semver.
+4. Create a [new release][new-release].
+5. Follow the same format as [1.1.0][release-110].
+
+[issues-tracker]: https://github.com/pelletier/go-toml/issues
+[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md
+[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml
+[readme]: ./README.md
+[fork]: https://help.github.com/articles/fork-a-repo
+[pull-request]: https://help.github.com/en/articles/creating-a-pull-request
+[releases]: https://github.com/pelletier/go-toml/releases
+[new-release]: https://github.com/pelletier/go-toml/releases/new
+[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0
diff --git a/vendor/github.com/pelletier/go-toml/Dockerfile b/vendor/github.com/pelletier/go-toml/Dockerfile
new file mode 100644
index 00000000000..fffdb016668
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/Dockerfile
@@ -0,0 +1,11 @@
+FROM golang:1.12-alpine3.9 as builder
+WORKDIR /go/src/github.com/pelletier/go-toml
+COPY . .
+ENV CGO_ENABLED=0
+ENV GOOS=linux
+RUN go install ./...
+
+FROM scratch
+COPY --from=builder /go/bin/tomll /usr/bin/tomll
+COPY --from=builder /go/bin/tomljson /usr/bin/tomljson
+COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml
diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE
new file mode 100644
index 00000000000..f414553c21a
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/LICENSE
@@ -0,0 +1,247 @@
+The bulk of github.com/pelletier/go-toml is distributed under the MIT license
+(see below), with the exception of localtime.go and localtime.test.go.
+Those two files have been copied over from Google's civil library at revision
+ed46f5086358513cf8c25f8e3f022cb838a49d66, and are distributed under the Apache
+2.0 license (see below).
+
+
+github.com/pelletier/go-toml:
+
+
+The MIT License (MIT)
+
+Copyright (c) 2013 - 2021 Thomas Pelletier, Eric Anderton
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+localtime.go, localtime_test.go:
+
+Originals:
+ https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go
+ https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil_test.go
+Changes:
+ * Renamed files from civil* to localtime*.
+ * Package changed from civil to toml.
+ * 'Local' prefix added to all structs.
+License:
+ https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/LICENSE
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/pelletier/go-toml/Makefile b/vendor/github.com/pelletier/go-toml/Makefile
new file mode 100644
index 00000000000..9e4503aea65
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/Makefile
@@ -0,0 +1,29 @@
+export CGO_ENABLED=0
+go := go
+go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1)
+go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2)
+
+out.tools := tomll tomljson jsontoml
+out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz)
+sources := $(wildcard **/*.go)
+
+
+.PHONY:
+tools: $(out.tools)
+
+$(out.tools): $(sources)
+ GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@
+
+.PHONY:
+dist: $(out.dist)
+
+$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: %
+ if [ "$(go.goos)" = "windows" ]; then \
+ tar -cJf $@ $^.exe; \
+ else \
+ tar -cJf $@ $^; \
+ fi
+
+.PHONY:
+clean:
+ rm -rf $(out.tools) $(out.dist)
diff --git a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 00000000000..041cdc4a2f1
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,5 @@
+**Issue:** add link to pelletier/go-toml issue here
+
+Explanation of what this pull request does.
+
+More detailed description of the decisions being made and the reasons why (if the patch is non-trivial).
diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md
new file mode 100644
index 00000000000..6c061712bb1
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/README.md
@@ -0,0 +1,176 @@
+# go-toml
+
+Go library for the [TOML](https://toml.io/) format.
+
+This library supports TOML version
+[v1.0.0-rc.3](https://toml.io/en/v1.0.0-rc.3)
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml.svg)](https://pkg.go.dev/github.com/pelletier/go-toml)
+[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE)
+[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master)
+[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml)
+[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml)
+[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield)
+
+
+## Development status
+
+**ℹ️ Consider go-toml v2!**
+
+The next version of go-toml is in [active development][v2-dev], and
+[nearing completion][v2-map].
+
+Though technically in beta, v2 is already more tested, [fixes bugs][v1-bugs],
+and [much faster][v2-bench]. If you only need reading and writing TOML documents
+(majority of cases), those features are implemented and the API unlikely to
+change.
+
+The remaining features (Document structure editing and tooling) will be added
+shortly. While pull-requests are welcome on v1, no active development is
+expected on it. When v2.0.0 is released, v1 will be deprecated.
+
+👉 [go-toml v2][v2]
+
+[v2]: https://github.com/pelletier/go-toml/tree/v2
+[v2-map]: https://github.com/pelletier/go-toml/discussions/506
+[v2-dev]: https://github.com/pelletier/go-toml/tree/v2
+[v1-bugs]: https://github.com/pelletier/go-toml/issues?q=is%3Aissue+is%3Aopen+label%3Av2-fixed
+[v2-bench]: https://github.com/pelletier/go-toml/tree/v2#benchmarks
+
+## Features
+
+Go-toml provides the following features for using data parsed from TOML documents:
+
+* Load TOML documents from files and string data
+* Easily navigate TOML structure using Tree
+* Marshaling and unmarshaling to and from data structures
+* Line & column position data for all parsed elements
+* [Query support similar to JSON-Path](query/)
+* Syntax errors contain line and column numbers
+
+## Import
+
+```go
+import "github.com/pelletier/go-toml"
+```
+
+## Usage example
+
+Read a TOML document:
+
+```go
+config, _ := toml.Load(`
+[postgres]
+user = "pelletier"
+password = "mypassword"`)
+// retrieve data directly
+user := config.Get("postgres.user").(string)
+
+// or using an intermediate object
+postgresConfig := config.Get("postgres").(*toml.Tree)
+password := postgresConfig.Get("password").(string)
+```
+
+Or use Unmarshal:
+
+```go
+type Postgres struct {
+ User string
+ Password string
+}
+type Config struct {
+ Postgres Postgres
+}
+
+doc := []byte(`
+[Postgres]
+User = "pelletier"
+Password = "mypassword"`)
+
+config := Config{}
+toml.Unmarshal(doc, &config)
+fmt.Println("user=", config.Postgres.User)
+```
+
+Or use a query:
+
+```go
+// use a query to gather elements without walking the tree
+q, _ := query.Compile("$..[user,password]")
+results := q.Execute(config)
+for ii, item := range results.Values() {
+ fmt.Printf("Query result %d: %v\n", ii, item)
+}
+```
+
+## Documentation
+
+The documentation and additional examples are available at
+[pkg.go.dev](https://pkg.go.dev/github.com/pelletier/go-toml).
+
+## Tools
+
+Go-toml provides three handy command line tools:
+
+* `tomll`: Reads TOML files and lints them.
+
+ ```
+ go install github.com/pelletier/go-toml/cmd/tomll
+ tomll --help
+ ```
+* `tomljson`: Reads a TOML file and outputs its JSON representation.
+
+ ```
+ go install github.com/pelletier/go-toml/cmd/tomljson
+ tomljson --help
+ ```
+
+ * `jsontoml`: Reads a JSON file and outputs a TOML representation.
+
+ ```
+ go install github.com/pelletier/go-toml/cmd/jsontoml
+ jsontoml --help
+ ```
+
+### Docker image
+
+Those tools are also available as a Docker image from
+[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to
+use `tomljson`:
+
+```
+docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml
+```
+
+Only master (`latest`) and tagged versions are published to dockerhub. You
+can build your own image as usual:
+
+```
+docker build -t go-toml .
+```
+
+## Contribute
+
+Feel free to report bugs and patches using GitHub's pull requests system on
+[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be
+much appreciated!
+
+### Run tests
+
+`go test ./...`
+
+### Fuzzing
+
+The script `./fuzz.sh` is available to
+run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml.
+
+## Versioning
+
+Go-toml follows [Semantic Versioning](http://semver.org/). The supported version
+of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of
+this document. The last two major versions of Go are supported
+(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)).
+
+## License
+
+The MIT License (MIT) + Apache 2.0. Read [LICENSE](LICENSE).
diff --git a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml
new file mode 100644
index 00000000000..4af198b4d97
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml
@@ -0,0 +1,188 @@
+trigger:
+- master
+
+stages:
+- stage: run_checks
+ displayName: "Check"
+ dependsOn: []
+ jobs:
+ - job: fmt
+ displayName: "fmt"
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: GoTool@0
+ displayName: "Install Go 1.16"
+ inputs:
+ version: "1.16"
+ - task: Go@0
+ displayName: "go fmt ./..."
+ inputs:
+ command: 'custom'
+ customCommand: 'fmt'
+ arguments: './...'
+ - job: coverage
+ displayName: "coverage"
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: GoTool@0
+ displayName: "Install Go 1.16"
+ inputs:
+ version: "1.16"
+ - task: Go@0
+ displayName: "Generate coverage"
+ inputs:
+ command: 'test'
+ arguments: "-race -coverprofile=coverage.txt -covermode=atomic"
+ - task: Bash@3
+ inputs:
+ targetType: 'inline'
+ script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}'
+ env:
+ CODECOV_TOKEN: $(CODECOV_TOKEN)
+ - job: benchmark
+ displayName: "benchmark"
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: GoTool@0
+ displayName: "Install Go 1.16"
+ inputs:
+ version: "1.16"
+ - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/"
+ - task: Bash@3
+ inputs:
+ filePath: './benchmark.sh'
+ arguments: "master $(Build.Repository.Uri)"
+
+ - job: go_unit_tests
+ displayName: "unit tests"
+ strategy:
+ matrix:
+ linux 1.16:
+ goVersion: '1.16'
+ imageName: 'ubuntu-latest'
+ mac 1.16:
+ goVersion: '1.16'
+ imageName: 'macOS-latest'
+ windows 1.16:
+ goVersion: '1.16'
+ imageName: 'windows-latest'
+ linux 1.15:
+ goVersion: '1.15'
+ imageName: 'ubuntu-latest'
+ mac 1.15:
+ goVersion: '1.15'
+ imageName: 'macOS-latest'
+ windows 1.15:
+ goVersion: '1.15'
+ imageName: 'windows-latest'
+ pool:
+ vmImage: $(imageName)
+ steps:
+ - task: GoTool@0
+ displayName: "Install Go $(goVersion)"
+ inputs:
+ version: $(goVersion)
+ - task: Go@0
+ displayName: "go test ./..."
+ inputs:
+ command: 'test'
+ arguments: './...'
+- stage: build_binaries
+ displayName: "Build binaries"
+ dependsOn: run_checks
+ jobs:
+ - job: build_binary
+ displayName: "Build binary"
+ strategy:
+ matrix:
+ linux_amd64:
+ GOOS: linux
+ GOARCH: amd64
+ darwin_amd64:
+ GOOS: darwin
+ GOARCH: amd64
+ windows_amd64:
+ GOOS: windows
+ GOARCH: amd64
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: GoTool@0
+ displayName: "Install Go"
+ inputs:
+ version: 1.16
+ - task: Bash@3
+ inputs:
+ targetType: inline
+ script: "make dist"
+ env:
+ go.goos: $(GOOS)
+ go.goarch: $(GOARCH)
+ - task: CopyFiles@2
+ inputs:
+ sourceFolder: '$(Build.SourcesDirectory)'
+ contents: '*.tar.xz'
+ TargetFolder: '$(Build.ArtifactStagingDirectory)'
+ - task: PublishBuildArtifacts@1
+ inputs:
+ pathtoPublish: '$(Build.ArtifactStagingDirectory)'
+ artifactName: binaries
+- stage: build_binaries_manifest
+ displayName: "Build binaries manifest"
+ dependsOn: build_binaries
+ jobs:
+ - job: build_manifest
+ displayName: "Build binaries manifest"
+ steps:
+ - task: DownloadBuildArtifacts@0
+ inputs:
+ buildType: 'current'
+ downloadType: 'single'
+ artifactName: 'binaries'
+ downloadPath: '$(Build.SourcesDirectory)'
+ - task: Bash@3
+ inputs:
+ targetType: inline
+ script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt"
+ - task: PublishBuildArtifacts@1
+ inputs:
+ pathtoPublish: '$(Build.ArtifactStagingDirectory)'
+ artifactName: manifest
+
+- stage: build_docker_image
+ displayName: "Build Docker image"
+ dependsOn: run_checks
+ jobs:
+ - job: build
+ displayName: "Build"
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: Docker@2
+ inputs:
+ command: 'build'
+ Dockerfile: 'Dockerfile'
+ buildContext: '.'
+ addPipelineData: false
+
+- stage: publish_docker_image
+ displayName: "Publish Docker image"
+ dependsOn: build_docker_image
+ condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master'))
+ jobs:
+ - job: publish
+ displayName: "Publish"
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: Docker@2
+ inputs:
+ containerRegistry: 'DockerHub'
+ repository: 'pelletier/go-toml'
+ command: 'buildAndPush'
+ Dockerfile: 'Dockerfile'
+ buildContext: '.'
+ tags: 'latest'
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh
new file mode 100644
index 00000000000..a69d3040fa2
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+set -ex
+
+reference_ref=${1:-master}
+reference_git=${2:-.}
+
+if ! `hash benchstat 2>/dev/null`; then
+ echo "Installing benchstat"
+ go get golang.org/x/perf/cmd/benchstat
+fi
+
+tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX`
+ref_tempdir="${tempdir}/ref"
+ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt"
+local_benchmark="`pwd`/benchmark-local.txt"
+
+echo "=== ${reference_ref} (${ref_tempdir})"
+git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null
+pushd ${ref_tempdir} >/dev/null
+git checkout ${reference_ref} >/dev/null 2>/dev/null
+go test -bench=. -benchmem | tee ${ref_benchmark}
+cd benchmark
+go test -bench=. -benchmem | tee -a ${ref_benchmark}
+popd >/dev/null
+
+echo ""
+echo "=== local"
+go test -bench=. -benchmem | tee ${local_benchmark}
+cd benchmark
+go test -bench=. -benchmem | tee -a ${local_benchmark}
+
+echo ""
+echo "=== diff"
+benchstat -delta-test=none ${ref_benchmark} ${local_benchmark}
diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go
new file mode 100644
index 00000000000..a1406a32b38
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/doc.go
@@ -0,0 +1,23 @@
+// Package toml is a TOML parser and manipulation library.
+//
+// This version supports the specification as described in
+// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md
+//
+// Marshaling
+//
+// Go-toml can marshal and unmarshal TOML documents from and to data
+// structures.
+//
+// TOML document as a tree
+//
+// Go-toml can operate on a TOML document as a tree. Use one of the Load*
+// functions to parse TOML data and obtain a Tree instance, then one of its
+// methods to manipulate the tree.
+//
+// JSONPath-like queries
+//
+// The package github.com/pelletier/go-toml/query implements a system
+// similar to JSONPath to quickly retrieve elements of a TOML document using a
+// single expression. See the package documentation for more information.
+//
+package toml
diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml
new file mode 100644
index 00000000000..780d9c68f2d
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/example-crlf.toml
@@ -0,0 +1,30 @@
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+ # You can indent as you please. Tabs or spaces. TOML don't care.
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported
\ No newline at end of file
diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml
new file mode 100644
index 00000000000..f45bf88b8f6
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/example.toml
@@ -0,0 +1,30 @@
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+ # You can indent as you please. Tabs or spaces. TOML don't care.
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported
\ No newline at end of file
diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go
new file mode 100644
index 00000000000..14570c8d357
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/fuzz.go
@@ -0,0 +1,31 @@
+// +build gofuzz
+
+package toml
+
+func Fuzz(data []byte) int {
+ tree, err := LoadBytes(data)
+ if err != nil {
+ if tree != nil {
+ panic("tree must be nil if there is an error")
+ }
+ return 0
+ }
+
+ str, err := tree.ToTomlString()
+ if err != nil {
+ if str != "" {
+ panic(`str must be "" if there is an error`)
+ }
+ panic(err)
+ }
+
+ tree, err = Load(str)
+ if err != nil {
+ if tree != nil {
+ panic("tree must be nil if there is an error")
+ }
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/pelletier/go-toml/fuzz.sh b/vendor/github.com/pelletier/go-toml/fuzz.sh
new file mode 100644
index 00000000000..3204b4c4463
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/fuzz.sh
@@ -0,0 +1,15 @@
+#! /bin/sh
+set -eu
+
+go get github.com/dvyukov/go-fuzz/go-fuzz
+go get github.com/dvyukov/go-fuzz/go-fuzz-build
+
+if [ ! -e toml-fuzz.zip ]; then
+ go-fuzz-build github.com/pelletier/go-toml
+fi
+
+rm -fr fuzz
+mkdir -p fuzz/corpus
+cp *.toml fuzz/corpus
+
+go-fuzz -bin=toml-fuzz.zip -workdir=fuzz
diff --git a/vendor/github.com/pelletier/go-toml/go.mod b/vendor/github.com/pelletier/go-toml/go.mod
new file mode 100644
index 00000000000..7d29a0a664b
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/go.mod
@@ -0,0 +1,3 @@
+module github.com/pelletier/go-toml
+
+go 1.12
diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go
new file mode 100644
index 00000000000..e091500b246
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/keysparsing.go
@@ -0,0 +1,112 @@
+// Parsing keys handling both bare and quoted keys.
+
+package toml
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Convert the bare key group string to an array.
+// The input supports double quotation and single quotation,
+// but escape sequences are not supported. Lexers must unescape them beforehand.
+func parseKey(key string) ([]string, error) {
+ runes := []rune(key)
+ var groups []string
+
+ if len(key) == 0 {
+ return nil, errors.New("empty key")
+ }
+
+ idx := 0
+ for idx < len(runes) {
+ for ; idx < len(runes) && isSpace(runes[idx]); idx++ {
+ // skip leading whitespace
+ }
+ if idx >= len(runes) {
+ break
+ }
+ r := runes[idx]
+ if isValidBareChar(r) {
+ // parse bare key
+ startIdx := idx
+ endIdx := -1
+ idx++
+ for idx < len(runes) {
+ r = runes[idx]
+ if isValidBareChar(r) {
+ idx++
+ } else if r == '.' {
+ endIdx = idx
+ break
+ } else if isSpace(r) {
+ endIdx = idx
+ for ; idx < len(runes) && isSpace(runes[idx]); idx++ {
+ // skip trailing whitespace
+ }
+ if idx < len(runes) && runes[idx] != '.' {
+ return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx])
+ }
+ break
+ } else {
+ return nil, fmt.Errorf("invalid bare key character: %c", r)
+ }
+ }
+ if endIdx == -1 {
+ endIdx = idx
+ }
+ groups = append(groups, string(runes[startIdx:endIdx]))
+ } else if r == '\'' {
+ // parse single quoted key
+ idx++
+ startIdx := idx
+ for {
+ if idx >= len(runes) {
+ return nil, fmt.Errorf("unclosed single-quoted key")
+ }
+ r = runes[idx]
+ if r == '\'' {
+ groups = append(groups, string(runes[startIdx:idx]))
+ idx++
+ break
+ }
+ idx++
+ }
+ } else if r == '"' {
+ // parse double quoted key
+ idx++
+ startIdx := idx
+ for {
+ if idx >= len(runes) {
+ return nil, fmt.Errorf("unclosed double-quoted key")
+ }
+ r = runes[idx]
+ if r == '"' {
+ groups = append(groups, string(runes[startIdx:idx]))
+ idx++
+ break
+ }
+ idx++
+ }
+ } else if r == '.' {
+ idx++
+ if idx >= len(runes) {
+ return nil, fmt.Errorf("unexpected end of key")
+ }
+ r = runes[idx]
+ if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' {
+ return nil, fmt.Errorf("expecting key part after dot")
+ }
+ } else {
+ return nil, fmt.Errorf("invalid key character: %c", r)
+ }
+ }
+ if len(groups) == 0 {
+ return nil, fmt.Errorf("empty key")
+ }
+ return groups, nil
+}
+
+func isValidBareChar(r rune) bool {
+ return isAlphanumeric(r) || r == '-' || isDigit(r)
+}
diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go
new file mode 100644
index 00000000000..313908e3e02
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/lexer.go
@@ -0,0 +1,1031 @@
+// TOML lexer.
+//
+// Written using the principles developed by Rob Pike in
+// http://www.youtube.com/watch?v=HxaD_trXwRE
+
+package toml
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Define state functions
+type tomlLexStateFn func() tomlLexStateFn
+
+// Define lexer
+type tomlLexer struct {
+ inputIdx int
+ input []rune // Textual source
+ currentTokenStart int
+ currentTokenStop int
+ tokens []token
+ brackets []rune
+ line int
+ col int
+ endbufferLine int
+ endbufferCol int
+}
+
+// Basic read operations on input
+
+func (l *tomlLexer) read() rune {
+ r := l.peek()
+ if r == '\n' {
+ l.endbufferLine++
+ l.endbufferCol = 1
+ } else {
+ l.endbufferCol++
+ }
+ l.inputIdx++
+ return r
+}
+
+func (l *tomlLexer) next() rune {
+ r := l.read()
+
+ if r != eof {
+ l.currentTokenStop++
+ }
+ return r
+}
+
+func (l *tomlLexer) ignore() {
+ l.currentTokenStart = l.currentTokenStop
+ l.line = l.endbufferLine
+ l.col = l.endbufferCol
+}
+
+func (l *tomlLexer) skip() {
+ l.next()
+ l.ignore()
+}
+
+func (l *tomlLexer) fastForward(n int) {
+ for i := 0; i < n; i++ {
+ l.next()
+ }
+}
+
+func (l *tomlLexer) emitWithValue(t tokenType, value string) {
+ l.tokens = append(l.tokens, token{
+ Position: Position{l.line, l.col},
+ typ: t,
+ val: value,
+ })
+ l.ignore()
+}
+
+func (l *tomlLexer) emit(t tokenType) {
+ l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop]))
+}
+
+func (l *tomlLexer) peek() rune {
+ if l.inputIdx >= len(l.input) {
+ return eof
+ }
+ return l.input[l.inputIdx]
+}
+
+func (l *tomlLexer) peekString(size int) string {
+ maxIdx := len(l.input)
+ upperIdx := l.inputIdx + size // FIXME: potential overflow
+ if upperIdx > maxIdx {
+ upperIdx = maxIdx
+ }
+ return string(l.input[l.inputIdx:upperIdx])
+}
+
+func (l *tomlLexer) follow(next string) bool {
+ return next == l.peekString(len(next))
+}
+
+// Error management
+
+func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn {
+ l.tokens = append(l.tokens, token{
+ Position: Position{l.line, l.col},
+ typ: tokenError,
+ val: fmt.Sprintf(format, args...),
+ })
+ return nil
+}
+
+// State functions
+
+func (l *tomlLexer) lexVoid() tomlLexStateFn {
+ for {
+ next := l.peek()
+ switch next {
+ case '}': // after '{'
+ return l.lexRightCurlyBrace
+ case '[':
+ return l.lexTableKey
+ case '#':
+ return l.lexComment(l.lexVoid)
+ case '=':
+ return l.lexEqual
+ case '\r':
+ fallthrough
+ case '\n':
+ l.skip()
+ continue
+ }
+
+ if isSpace(next) {
+ l.skip()
+ }
+
+ if isKeyStartChar(next) {
+ return l.lexKey
+ }
+
+ if next == eof {
+ l.next()
+ break
+ }
+ }
+
+ l.emit(tokenEOF)
+ return nil
+}
+
+func (l *tomlLexer) lexRvalue() tomlLexStateFn {
+ for {
+ next := l.peek()
+ switch next {
+ case '.':
+ return l.errorf("cannot start float with a dot")
+ case '=':
+ return l.lexEqual
+ case '[':
+ return l.lexLeftBracket
+ case ']':
+ return l.lexRightBracket
+ case '{':
+ return l.lexLeftCurlyBrace
+ case '}':
+ return l.lexRightCurlyBrace
+ case '#':
+ return l.lexComment(l.lexRvalue)
+ case '"':
+ return l.lexString
+ case '\'':
+ return l.lexLiteralString
+ case ',':
+ return l.lexComma
+ case '\r':
+ fallthrough
+ case '\n':
+ l.skip()
+ if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' {
+ return l.lexRvalue
+ }
+ return l.lexVoid
+ }
+
+ if l.follow("true") {
+ return l.lexTrue
+ }
+
+ if l.follow("false") {
+ return l.lexFalse
+ }
+
+ if l.follow("inf") {
+ return l.lexInf
+ }
+
+ if l.follow("nan") {
+ return l.lexNan
+ }
+
+ if isSpace(next) {
+ l.skip()
+ continue
+ }
+
+ if next == eof {
+ l.next()
+ break
+ }
+
+ if next == '+' || next == '-' {
+ return l.lexNumber
+ }
+
+ if isDigit(next) {
+ return l.lexDateTimeOrNumber
+ }
+
+ return l.errorf("no value can start with %c", next)
+ }
+
+ l.emit(tokenEOF)
+ return nil
+}
+
+func (l *tomlLexer) lexDateTimeOrNumber() tomlLexStateFn {
+ // Could be either a date/time, or a digit.
+ // The options for date/times are:
+ // YYYY-... => date or date-time
+ // HH:... => time
+ // Anything else should be a number.
+
+ lookAhead := l.peekString(5)
+ if len(lookAhead) < 3 {
+ return l.lexNumber()
+ }
+
+ for idx, r := range lookAhead {
+ if !isDigit(r) {
+ if idx == 2 && r == ':' {
+ return l.lexDateTimeOrTime()
+ }
+ if idx == 4 && r == '-' {
+ return l.lexDateTimeOrTime()
+ }
+ return l.lexNumber()
+ }
+ }
+ return l.lexNumber()
+}
+
+func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn {
+ l.next()
+ l.emit(tokenLeftCurlyBrace)
+ l.brackets = append(l.brackets, '{')
+ return l.lexVoid
+}
+
+func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn {
+ l.next()
+ l.emit(tokenRightCurlyBrace)
+ if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' {
+ return l.errorf("cannot have '}' here")
+ }
+ l.brackets = l.brackets[:len(l.brackets)-1]
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexDateTimeOrTime() tomlLexStateFn {
+ // Example matches:
+ // 1979-05-27T07:32:00Z
+ // 1979-05-27T00:32:00-07:00
+ // 1979-05-27T00:32:00.999999-07:00
+ // 1979-05-27 07:32:00Z
+ // 1979-05-27 00:32:00-07:00
+ // 1979-05-27 00:32:00.999999-07:00
+ // 1979-05-27T07:32:00
+ // 1979-05-27T00:32:00.999999
+ // 1979-05-27 07:32:00
+ // 1979-05-27 00:32:00.999999
+ // 1979-05-27
+ // 07:32:00
+ // 00:32:00.999999
+
+ // we already know those two are digits
+ l.next()
+ l.next()
+
+ // Got 2 digits. At that point it could be either a time or a date(-time).
+
+ r := l.next()
+ if r == ':' {
+ return l.lexTime()
+ }
+
+ return l.lexDateTime()
+}
+
+func (l *tomlLexer) lexDateTime() tomlLexStateFn {
+ // This state accepts an offset date-time, a local date-time, or a local date.
+ //
+ // v--- cursor
+ // 1979-05-27T07:32:00Z
+ // 1979-05-27T00:32:00-07:00
+ // 1979-05-27T00:32:00.999999-07:00
+ // 1979-05-27 07:32:00Z
+ // 1979-05-27 00:32:00-07:00
+ // 1979-05-27 00:32:00.999999-07:00
+ // 1979-05-27T07:32:00
+ // 1979-05-27T00:32:00.999999
+ // 1979-05-27 07:32:00
+ // 1979-05-27 00:32:00.999999
+ // 1979-05-27
+
+ // date
+
+ // already checked by lexRvalue
+ l.next() // digit
+ l.next() // -
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid month digit in date: %c", r)
+ }
+ }
+
+ r := l.next()
+ if r != '-' {
+ return l.errorf("expected - to separate month of a date, not %c", r)
+ }
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid day digit in date: %c", r)
+ }
+ }
+
+ l.emit(tokenLocalDate)
+
+ r = l.peek()
+
+ if r == eof {
+
+ return l.lexRvalue
+ }
+
+ if r != ' ' && r != 'T' {
+ return l.errorf("incorrect date/time separation character: %c", r)
+ }
+
+ if r == ' ' {
+ lookAhead := l.peekString(3)[1:]
+ if len(lookAhead) < 2 {
+ return l.lexRvalue
+ }
+ for _, r := range lookAhead {
+ if !isDigit(r) {
+ return l.lexRvalue
+ }
+ }
+ }
+
+ l.skip() // skip the T or ' '
+
+ // time
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid hour digit in time: %c", r)
+ }
+ }
+
+ r = l.next()
+ if r != ':' {
+ return l.errorf("time hour/minute separator should be :, not %c", r)
+ }
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid minute digit in time: %c", r)
+ }
+ }
+
+ r = l.next()
+ if r != ':' {
+ return l.errorf("time minute/second separator should be :, not %c", r)
+ }
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid second digit in time: %c", r)
+ }
+ }
+
+ r = l.peek()
+ if r == '.' {
+ l.next()
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("expected at least one digit in time's fraction, not %c", r)
+ }
+
+ for {
+ r := l.peek()
+ if !isDigit(r) {
+ break
+ }
+ l.next()
+ }
+ }
+
+ l.emit(tokenLocalTime)
+
+ return l.lexTimeOffset
+
+}
+
+func (l *tomlLexer) lexTimeOffset() tomlLexStateFn {
+ // potential offset
+
+ // Z
+ // -07:00
+ // +07:00
+ // nothing
+
+ r := l.peek()
+
+ if r == 'Z' {
+ l.next()
+ l.emit(tokenTimeOffset)
+ } else if r == '+' || r == '-' {
+ l.next()
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid hour digit in time offset: %c", r)
+ }
+ }
+
+ r = l.next()
+ if r != ':' {
+ return l.errorf("time offset hour/minute separator should be :, not %c", r)
+ }
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid minute digit in time offset: %c", r)
+ }
+ }
+
+ l.emit(tokenTimeOffset)
+ }
+
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexTime() tomlLexStateFn {
+ // v--- cursor
+ // 07:32:00
+ // 00:32:00.999999
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid minute digit in time: %c", r)
+ }
+ }
+
+ r := l.next()
+ if r != ':' {
+ return l.errorf("time minute/second separator should be :, not %c", r)
+ }
+
+ for i := 0; i < 2; i++ {
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("invalid second digit in time: %c", r)
+ }
+ }
+
+ r = l.peek()
+ if r == '.' {
+ l.next()
+ r := l.next()
+ if !isDigit(r) {
+ return l.errorf("expected at least one digit in time's fraction, not %c", r)
+ }
+
+ for {
+ r := l.peek()
+ if !isDigit(r) {
+ break
+ }
+ l.next()
+ }
+ }
+
+ l.emit(tokenLocalTime)
+ return l.lexRvalue
+
+}
+
+func (l *tomlLexer) lexTrue() tomlLexStateFn {
+ l.fastForward(4)
+ l.emit(tokenTrue)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexFalse() tomlLexStateFn {
+ l.fastForward(5)
+ l.emit(tokenFalse)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexInf() tomlLexStateFn {
+ l.fastForward(3)
+ l.emit(tokenInf)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexNan() tomlLexStateFn {
+ l.fastForward(3)
+ l.emit(tokenNan)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexEqual() tomlLexStateFn {
+ l.next()
+ l.emit(tokenEqual)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexComma() tomlLexStateFn {
+ l.next()
+ l.emit(tokenComma)
+ if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' {
+ return l.lexVoid
+ }
+ return l.lexRvalue
+}
+
+// Parse the key and emits its value without escape sequences.
+// bare keys, basic string keys and literal string keys are supported.
+func (l *tomlLexer) lexKey() tomlLexStateFn {
+ var sb strings.Builder
+
+ for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() {
+ if r == '"' {
+ l.next()
+ str, err := l.lexStringAsString(`"`, false, true)
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+ sb.WriteString("\"")
+ sb.WriteString(str)
+ sb.WriteString("\"")
+ l.next()
+ continue
+ } else if r == '\'' {
+ l.next()
+ str, err := l.lexLiteralStringAsString(`'`, false)
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+ sb.WriteString("'")
+ sb.WriteString(str)
+ sb.WriteString("'")
+ l.next()
+ continue
+ } else if r == '\n' {
+ return l.errorf("keys cannot contain new lines")
+ } else if isSpace(r) {
+ var str strings.Builder
+ str.WriteString(" ")
+
+ // skip trailing whitespace
+ l.next()
+ for r = l.peek(); isSpace(r); r = l.peek() {
+ str.WriteRune(r)
+ l.next()
+ }
+ // break loop if not a dot
+ if r != '.' {
+ break
+ }
+ str.WriteString(".")
+ // skip trailing whitespace after dot
+ l.next()
+ for r = l.peek(); isSpace(r); r = l.peek() {
+ str.WriteRune(r)
+ l.next()
+ }
+ sb.WriteString(str.String())
+ continue
+ } else if r == '.' {
+ // skip
+ } else if !isValidBareChar(r) {
+ return l.errorf("keys cannot contain %c character", r)
+ }
+ sb.WriteRune(r)
+ l.next()
+ }
+ l.emitWithValue(tokenKey, sb.String())
+ return l.lexVoid
+}
+
+func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn {
+ return func() tomlLexStateFn {
+ for next := l.peek(); next != '\n' && next != eof; next = l.peek() {
+ if next == '\r' && l.follow("\r\n") {
+ break
+ }
+ l.next()
+ }
+ l.ignore()
+ return previousState
+ }
+}
+
+func (l *tomlLexer) lexLeftBracket() tomlLexStateFn {
+ l.next()
+ l.emit(tokenLeftBracket)
+ l.brackets = append(l.brackets, '[')
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) {
+ var sb strings.Builder
+
+ if discardLeadingNewLine {
+ if l.follow("\r\n") {
+ l.skip()
+ l.skip()
+ } else if l.peek() == '\n' {
+ l.skip()
+ }
+ }
+
+ // find end of string
+ for {
+ if l.follow(terminator) {
+ return sb.String(), nil
+ }
+
+ next := l.peek()
+ if next == eof {
+ break
+ }
+ sb.WriteRune(l.next())
+ }
+
+ return "", errors.New("unclosed string")
+}
+
+func (l *tomlLexer) lexLiteralString() tomlLexStateFn {
+ l.skip()
+
+ // handle special case for triple-quote
+ terminator := "'"
+ discardLeadingNewLine := false
+ if l.follow("''") {
+ l.skip()
+ l.skip()
+ terminator = "'''"
+ discardLeadingNewLine = true
+ }
+
+ str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine)
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+
+ l.emitWithValue(tokenString, str)
+ l.fastForward(len(terminator))
+ l.ignore()
+ return l.lexRvalue
+}
+
+// Lex a string and return the results as a string.
+// Terminator is the substring indicating the end of the token.
+// The resulting string does not include the terminator.
+func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) {
+ var sb strings.Builder
+
+ if discardLeadingNewLine {
+ if l.follow("\r\n") {
+ l.skip()
+ l.skip()
+ } else if l.peek() == '\n' {
+ l.skip()
+ }
+ }
+
+ for {
+ if l.follow(terminator) {
+ return sb.String(), nil
+ }
+
+ if l.follow("\\") {
+ l.next()
+ switch l.peek() {
+ case '\r':
+ fallthrough
+ case '\n':
+ fallthrough
+ case '\t':
+ fallthrough
+ case ' ':
+ // skip all whitespace chars following backslash
+ for strings.ContainsRune("\r\n\t ", l.peek()) {
+ l.next()
+ }
+ case '"':
+ sb.WriteString("\"")
+ l.next()
+ case 'n':
+ sb.WriteString("\n")
+ l.next()
+ case 'b':
+ sb.WriteString("\b")
+ l.next()
+ case 'f':
+ sb.WriteString("\f")
+ l.next()
+ case '/':
+ sb.WriteString("/")
+ l.next()
+ case 't':
+ sb.WriteString("\t")
+ l.next()
+ case 'r':
+ sb.WriteString("\r")
+ l.next()
+ case '\\':
+ sb.WriteString("\\")
+ l.next()
+ case 'u':
+ l.next()
+ var code strings.Builder
+ for i := 0; i < 4; i++ {
+ c := l.peek()
+ if !isHexDigit(c) {
+ return "", errors.New("unfinished unicode escape")
+ }
+ l.next()
+ code.WriteRune(c)
+ }
+ intcode, err := strconv.ParseInt(code.String(), 16, 32)
+ if err != nil {
+ return "", errors.New("invalid unicode escape: \\u" + code.String())
+ }
+ sb.WriteRune(rune(intcode))
+ case 'U':
+ l.next()
+ var code strings.Builder
+ for i := 0; i < 8; i++ {
+ c := l.peek()
+ if !isHexDigit(c) {
+ return "", errors.New("unfinished unicode escape")
+ }
+ l.next()
+ code.WriteRune(c)
+ }
+ intcode, err := strconv.ParseInt(code.String(), 16, 64)
+ if err != nil {
+ return "", errors.New("invalid unicode escape: \\U" + code.String())
+ }
+ sb.WriteRune(rune(intcode))
+ default:
+ return "", errors.New("invalid escape sequence: \\" + string(l.peek()))
+ }
+ } else {
+ r := l.peek()
+
+ if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) {
+ return "", fmt.Errorf("unescaped control character %U", r)
+ }
+ l.next()
+ sb.WriteRune(r)
+ }
+
+ if l.peek() == eof {
+ break
+ }
+ }
+
+ return "", errors.New("unclosed string")
+}
+
+func (l *tomlLexer) lexString() tomlLexStateFn {
+ l.skip()
+
+ // handle special case for triple-quote
+ terminator := `"`
+ discardLeadingNewLine := false
+ acceptNewLines := false
+ if l.follow(`""`) {
+ l.skip()
+ l.skip()
+ terminator = `"""`
+ discardLeadingNewLine = true
+ acceptNewLines = true
+ }
+
+ str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines)
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+
+ l.emitWithValue(tokenString, str)
+ l.fastForward(len(terminator))
+ l.ignore()
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexTableKey() tomlLexStateFn {
+ l.next()
+
+ if l.peek() == '[' {
+ // token '[[' signifies an array of tables
+ l.next()
+ l.emit(tokenDoubleLeftBracket)
+ return l.lexInsideTableArrayKey
+ }
+ // vanilla table key
+ l.emit(tokenLeftBracket)
+ return l.lexInsideTableKey
+}
+
+// Parse the key till "]]", but only bare keys are supported
+func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn {
+ for r := l.peek(); r != eof; r = l.peek() {
+ switch r {
+ case ']':
+ if l.currentTokenStop > l.currentTokenStart {
+ l.emit(tokenKeyGroupArray)
+ }
+ l.next()
+ if l.peek() != ']' {
+ break
+ }
+ l.next()
+ l.emit(tokenDoubleRightBracket)
+ return l.lexVoid
+ case '[':
+ return l.errorf("table array key cannot contain ']'")
+ default:
+ l.next()
+ }
+ }
+ return l.errorf("unclosed table array key")
+}
+
+// Parse the key till "]" but only bare keys are supported
+func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn {
+ for r := l.peek(); r != eof; r = l.peek() {
+ switch r {
+ case ']':
+ if l.currentTokenStop > l.currentTokenStart {
+ l.emit(tokenKeyGroup)
+ }
+ l.next()
+ l.emit(tokenRightBracket)
+ return l.lexVoid
+ case '[':
+ return l.errorf("table key cannot contain ']'")
+ default:
+ l.next()
+ }
+ }
+ return l.errorf("unclosed table key")
+}
+
+func (l *tomlLexer) lexRightBracket() tomlLexStateFn {
+ l.next()
+ l.emit(tokenRightBracket)
+ if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' {
+ return l.errorf("cannot have ']' here")
+ }
+ l.brackets = l.brackets[:len(l.brackets)-1]
+ return l.lexRvalue
+}
+
+type validRuneFn func(r rune) bool
+
+func isValidHexRune(r rune) bool {
+ return r >= 'a' && r <= 'f' ||
+ r >= 'A' && r <= 'F' ||
+ r >= '0' && r <= '9' ||
+ r == '_'
+}
+
+func isValidOctalRune(r rune) bool {
+ return r >= '0' && r <= '7' || r == '_'
+}
+
+func isValidBinaryRune(r rune) bool {
+ return r == '0' || r == '1' || r == '_'
+}
+
+func (l *tomlLexer) lexNumber() tomlLexStateFn {
+ r := l.peek()
+
+ if r == '0' {
+ follow := l.peekString(2)
+ if len(follow) == 2 {
+ var isValidRune validRuneFn
+ switch follow[1] {
+ case 'x':
+ isValidRune = isValidHexRune
+ case 'o':
+ isValidRune = isValidOctalRune
+ case 'b':
+ isValidRune = isValidBinaryRune
+ default:
+ if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' {
+ return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1]))
+ }
+ }
+
+ if isValidRune != nil {
+ l.next()
+ l.next()
+ digitSeen := false
+ for {
+ next := l.peek()
+ if !isValidRune(next) {
+ break
+ }
+ digitSeen = true
+ l.next()
+ }
+
+ if !digitSeen {
+ return l.errorf("number needs at least one digit")
+ }
+
+ l.emit(tokenInteger)
+
+ return l.lexRvalue
+ }
+ }
+ }
+
+ if r == '+' || r == '-' {
+ l.next()
+ if l.follow("inf") {
+ return l.lexInf
+ }
+ if l.follow("nan") {
+ return l.lexNan
+ }
+ }
+
+ pointSeen := false
+ expSeen := false
+ digitSeen := false
+ for {
+ next := l.peek()
+ if next == '.' {
+ if pointSeen {
+ return l.errorf("cannot have two dots in one float")
+ }
+ l.next()
+ if !isDigit(l.peek()) {
+ return l.errorf("float cannot end with a dot")
+ }
+ pointSeen = true
+ } else if next == 'e' || next == 'E' {
+ expSeen = true
+ l.next()
+ r := l.peek()
+ if r == '+' || r == '-' {
+ l.next()
+ }
+ } else if isDigit(next) {
+ digitSeen = true
+ l.next()
+ } else if next == '_' {
+ l.next()
+ } else {
+ break
+ }
+ if pointSeen && !digitSeen {
+ return l.errorf("cannot start float with a dot")
+ }
+ }
+
+ if !digitSeen {
+ return l.errorf("no digit in that number")
+ }
+ if pointSeen || expSeen {
+ l.emit(tokenFloat)
+ } else {
+ l.emit(tokenInteger)
+ }
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) run() {
+ for state := l.lexVoid; state != nil; {
+ state = state()
+ }
+}
+
+// Entry point
+func lexToml(inputBytes []byte) []token {
+ runes := bytes.Runes(inputBytes)
+ l := &tomlLexer{
+ input: runes,
+ tokens: make([]token, 0, 256),
+ line: 1,
+ col: 1,
+ endbufferLine: 1,
+ endbufferCol: 1,
+ }
+ l.run()
+ return l.tokens
+}
diff --git a/vendor/github.com/pelletier/go-toml/localtime.go b/vendor/github.com/pelletier/go-toml/localtime.go
new file mode 100644
index 00000000000..9dfe4b9e6cb
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/localtime.go
@@ -0,0 +1,287 @@
+// Implementation of TOML's local date/time.
+//
+// Copied over from Google's civil to avoid pulling all the Google dependencies.
+// Originals:
+// https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go
+// Changes:
+// * Renamed files from civil* to localtime*.
+// * Package changed from civil to toml.
+// * 'Local' prefix added to all structs.
+//
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package civil implements types for civil time, a time-zone-independent
+// representation of time that follows the rules of the proleptic
+// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second
+// minutes.
+//
+// Because they lack location information, these types do not represent unique
+// moments or intervals of time. Use time.Time for that purpose.
+package toml
+
+import (
+ "fmt"
+ "time"
+)
+
+// A LocalDate represents a date (year, month, day).
+//
+// This type does not include location information, and therefore does not
+// describe a unique 24-hour timespan.
+type LocalDate struct {
+ Year int // Year (e.g., 2014).
+ Month time.Month // Month of the year (January = 1, ...).
+ Day int // Day of the month, starting at 1.
+}
+
+// LocalDateOf returns the LocalDate in which a time occurs in that time's location.
+func LocalDateOf(t time.Time) LocalDate {
+ var d LocalDate
+ d.Year, d.Month, d.Day = t.Date()
+ return d
+}
+
+// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents.
+func ParseLocalDate(s string) (LocalDate, error) {
+ t, err := time.Parse("2006-01-02", s)
+ if err != nil {
+ return LocalDate{}, err
+ }
+ return LocalDateOf(t), nil
+}
+
+// String returns the date in RFC3339 full-date format.
+func (d LocalDate) String() string {
+ return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
+}
+
+// IsValid reports whether the date is valid.
+func (d LocalDate) IsValid() bool {
+ return LocalDateOf(d.In(time.UTC)) == d
+}
+
+// In returns the time corresponding to time 00:00:00 of the date in the location.
+//
+// In is always consistent with time.LocalDate, even when time.LocalDate returns a time
+// on a different day. For example, if loc is America/Indiana/Vincennes, then both
+// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc)
+// and
+// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc)
+// return 23:00:00 on April 30, 1955.
+//
+// In panics if loc is nil.
+func (d LocalDate) In(loc *time.Location) time.Time {
+ return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc)
+}
+
+// AddDays returns the date that is n days in the future.
+// n can also be negative to go into the past.
+func (d LocalDate) AddDays(n int) LocalDate {
+ return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n))
+}
+
+// DaysSince returns the signed number of days between the date and s, not including the end day.
+// This is the inverse operation to AddDays.
+func (d LocalDate) DaysSince(s LocalDate) (days int) {
+ // We convert to Unix time so we do not have to worry about leap seconds:
+ // Unix time increases by exactly 86400 seconds per day.
+ deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix()
+ return int(deltaUnix / 86400)
+}
+
+// Before reports whether d1 occurs before d2.
+func (d1 LocalDate) Before(d2 LocalDate) bool {
+ if d1.Year != d2.Year {
+ return d1.Year < d2.Year
+ }
+ if d1.Month != d2.Month {
+ return d1.Month < d2.Month
+ }
+ return d1.Day < d2.Day
+}
+
+// After reports whether d1 occurs after d2.
+func (d1 LocalDate) After(d2 LocalDate) bool {
+ return d2.Before(d1)
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The output is the result of d.String().
+func (d LocalDate) MarshalText() ([]byte, error) {
+ return []byte(d.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// The date is expected to be a string in a format accepted by ParseLocalDate.
+func (d *LocalDate) UnmarshalText(data []byte) error {
+ var err error
+ *d, err = ParseLocalDate(string(data))
+ return err
+}
+
+// A LocalTime represents a time with nanosecond precision.
+//
+// This type does not include location information, and therefore does not
+// describe a unique moment in time.
+//
+// This type exists to represent the TIME type in storage-based APIs like BigQuery.
+// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type.
+type LocalTime struct {
+ Hour int // The hour of the day in 24-hour format; range [0-23]
+ Minute int // The minute of the hour; range [0-59]
+ Second int // The second of the minute; range [0-59]
+ Nanosecond int // The nanosecond of the second; range [0-999999999]
+}
+
+// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs
+// in that time's location. It ignores the date.
+func LocalTimeOf(t time.Time) LocalTime {
+ var tm LocalTime
+ tm.Hour, tm.Minute, tm.Second = t.Clock()
+ tm.Nanosecond = t.Nanosecond()
+ return tm
+}
+
+// ParseLocalTime parses a string and returns the time value it represents.
+// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After
+// the HH:MM:SS part of the string, an optional fractional part may appear,
+// consisting of a decimal point followed by one to nine decimal digits.
+// (RFC3339 admits only one digit after the decimal point).
+func ParseLocalTime(s string) (LocalTime, error) {
+ t, err := time.Parse("15:04:05.999999999", s)
+ if err != nil {
+ return LocalTime{}, err
+ }
+ return LocalTimeOf(t), nil
+}
+
+// String returns the date in the format described in ParseLocalTime. If Nanoseconds
+// is zero, no fractional part will be generated. Otherwise, the result will
+// end with a fractional part consisting of a decimal point and nine digits.
+func (t LocalTime) String() string {
+ s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second)
+ if t.Nanosecond == 0 {
+ return s
+ }
+ return s + fmt.Sprintf(".%09d", t.Nanosecond)
+}
+
+// IsValid reports whether the time is valid.
+func (t LocalTime) IsValid() bool {
+ // Construct a non-zero time.
+ tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC)
+ return LocalTimeOf(tm) == t
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The output is the result of t.String().
+func (t LocalTime) MarshalText() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// The time is expected to be a string in a format accepted by ParseLocalTime.
+func (t *LocalTime) UnmarshalText(data []byte) error {
+ var err error
+ *t, err = ParseLocalTime(string(data))
+ return err
+}
+
+// A LocalDateTime represents a date and time.
+//
+// This type does not include location information, and therefore does not
+// describe a unique moment in time.
+type LocalDateTime struct {
+ Date LocalDate
+ Time LocalTime
+}
+
+// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub.
+
+// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location.
+func LocalDateTimeOf(t time.Time) LocalDateTime {
+ return LocalDateTime{
+ Date: LocalDateOf(t),
+ Time: LocalTimeOf(t),
+ }
+}
+
+// ParseLocalDateTime parses a string and returns the LocalDateTime it represents.
+// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits
+// the time offset but includes an optional fractional time, as described in
+// ParseLocalTime. Informally, the accepted format is
+// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF]
+// where the 'T' may be a lower-case 't'.
+func ParseLocalDateTime(s string) (LocalDateTime, error) {
+ t, err := time.Parse("2006-01-02T15:04:05.999999999", s)
+ if err != nil {
+ t, err = time.Parse("2006-01-02t15:04:05.999999999", s)
+ if err != nil {
+ return LocalDateTime{}, err
+ }
+ }
+ return LocalDateTimeOf(t), nil
+}
+
+// String returns the date in the format described in ParseLocalDate.
+func (dt LocalDateTime) String() string {
+ return dt.Date.String() + "T" + dt.Time.String()
+}
+
+// IsValid reports whether the datetime is valid.
+func (dt LocalDateTime) IsValid() bool {
+ return dt.Date.IsValid() && dt.Time.IsValid()
+}
+
+// In returns the time corresponding to the LocalDateTime in the given location.
+//
+// If the time is missing or ambigous at the location, In returns the same
+// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then
+// both
+// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc)
+// and
+// civil.LocalDateTime{
+// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}},
+// civil.LocalTime{Minute: 30}}.In(loc)
+// return 23:30:00 on April 30, 1955.
+//
+// In panics if loc is nil.
+func (dt LocalDateTime) In(loc *time.Location) time.Time {
+ return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc)
+}
+
+// Before reports whether dt1 occurs before dt2.
+func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool {
+ return dt1.In(time.UTC).Before(dt2.In(time.UTC))
+}
+
+// After reports whether dt1 occurs after dt2.
+func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool {
+ return dt2.Before(dt1)
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The output is the result of dt.String().
+func (dt LocalDateTime) MarshalText() ([]byte, error) {
+ return []byte(dt.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// The datetime is expected to be a string in a format accepted by ParseLocalDateTime
+func (dt *LocalDateTime) UnmarshalText(data []byte) error {
+ var err error
+ *dt, err = ParseLocalDateTime(string(data))
+ return err
+}
diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go
new file mode 100644
index 00000000000..3443c35452a
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/marshal.go
@@ -0,0 +1,1308 @@
+package toml
+
+import (
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ tagFieldName = "toml"
+ tagFieldComment = "comment"
+ tagCommented = "commented"
+ tagMultiline = "multiline"
+ tagLiteral = "literal"
+ tagDefault = "default"
+)
+
+type tomlOpts struct {
+ name string
+ nameFromTag bool
+ comment string
+ commented bool
+ multiline bool
+ literal bool
+ include bool
+ omitempty bool
+ defaultValue string
+}
+
+type encOpts struct {
+ quoteMapKeys bool
+ arraysOneElementPerLine bool
+}
+
+var encOptsDefaults = encOpts{
+ quoteMapKeys: false,
+}
+
+type annotation struct {
+ tag string
+ comment string
+ commented string
+ multiline string
+ literal string
+ defaultValue string
+}
+
+var annotationDefault = annotation{
+ tag: tagFieldName,
+ comment: tagFieldComment,
+ commented: tagCommented,
+ multiline: tagMultiline,
+ literal: tagLiteral,
+ defaultValue: tagDefault,
+}
+
+type MarshalOrder int
+
+// Orders the Encoder can write the fields to the output stream.
+const (
+ // Sort fields alphabetically.
+ OrderAlphabetical MarshalOrder = iota + 1
+ // Preserve the order the fields are encountered. For example, the order of fields in
+ // a struct.
+ OrderPreserve
+)
+
+var timeType = reflect.TypeOf(time.Time{})
+var marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
+var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem()
+var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
+var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem()
+var localDateType = reflect.TypeOf(LocalDate{})
+var localTimeType = reflect.TypeOf(LocalTime{})
+var localDateTimeType = reflect.TypeOf(LocalDateTime{})
+var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{})
+
+// Check if the given marshal type maps to a Tree primitive
+func isPrimitive(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isPrimitive(mtype.Elem())
+ case reflect.Bool:
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Struct:
+ return isTimeType(mtype)
+ default:
+ return false
+ }
+}
+
+func isTimeType(mtype reflect.Type) bool {
+ return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType
+}
+
+// Check if the given marshal type maps to a Tree slice or array
+func isTreeSequence(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isTreeSequence(mtype.Elem())
+ case reflect.Slice, reflect.Array:
+ return isTree(mtype.Elem())
+ default:
+ return false
+ }
+}
+
+// Check if the given marshal type maps to a slice or array of a custom marshaler type
+func isCustomMarshalerSequence(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isCustomMarshalerSequence(mtype.Elem())
+ case reflect.Slice, reflect.Array:
+ return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type())
+ default:
+ return false
+ }
+}
+
+// Check if the given marshal type maps to a slice or array of a text marshaler type
+func isTextMarshalerSequence(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isTextMarshalerSequence(mtype.Elem())
+ case reflect.Slice, reflect.Array:
+ return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type())
+ default:
+ return false
+ }
+}
+
+// Check if the given marshal type maps to a non-Tree slice or array
+func isOtherSequence(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isOtherSequence(mtype.Elem())
+ case reflect.Slice, reflect.Array:
+ return !isTreeSequence(mtype)
+ default:
+ return false
+ }
+}
+
+// Check if the given marshal type maps to a Tree
+func isTree(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isTree(mtype.Elem())
+ case reflect.Map:
+ return true
+ case reflect.Struct:
+ return !isPrimitive(mtype)
+ default:
+ return false
+ }
+}
+
+func isCustomMarshaler(mtype reflect.Type) bool {
+ return mtype.Implements(marshalerType)
+}
+
+func callCustomMarshaler(mval reflect.Value) ([]byte, error) {
+ return mval.Interface().(Marshaler).MarshalTOML()
+}
+
+func isTextMarshaler(mtype reflect.Type) bool {
+ return mtype.Implements(textMarshalerType) && !isTimeType(mtype)
+}
+
+func callTextMarshaler(mval reflect.Value) ([]byte, error) {
+ return mval.Interface().(encoding.TextMarshaler).MarshalText()
+}
+
+func isCustomUnmarshaler(mtype reflect.Type) bool {
+ return mtype.Implements(unmarshalerType)
+}
+
+func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error {
+ return mval.Interface().(Unmarshaler).UnmarshalTOML(tval)
+}
+
+func isTextUnmarshaler(mtype reflect.Type) bool {
+ return mtype.Implements(textUnmarshalerType)
+}
+
+func callTextUnmarshaler(mval reflect.Value, text []byte) error {
+ return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text)
+}
+
+// Marshaler is the interface implemented by types that
+// can marshal themselves into valid TOML.
+type Marshaler interface {
+ MarshalTOML() ([]byte, error)
+}
+
+// Unmarshaler is the interface implemented by types that
+// can unmarshal a TOML description of themselves.
+type Unmarshaler interface {
+ UnmarshalTOML(interface{}) error
+}
+
+/*
+Marshal returns the TOML encoding of v. Behavior is similar to the Go json
+encoder, except that there is no concept of a Marshaler interface or MarshalTOML
+function for sub-structs, and currently only definite types can be marshaled
+(i.e. no `interface{}`).
+
+The following struct annotations are supported:
+
+ toml:"Field" Overrides the field's name to output.
+ omitempty When set, empty values and groups are not emitted.
+ comment:"comment" Emits a # comment on the same line. This supports new lines.
+ commented:"true" Emits the value as commented.
+
+Note that pointers are automatically assigned the "omitempty" option, as TOML
+explicitly does not handle null values (saying instead the label should be
+dropped).
+
+Tree structural types and corresponding marshal types:
+
+ *Tree (*)struct, (*)map[string]interface{}
+ []*Tree (*)[](*)struct, (*)[](*)map[string]interface{}
+ []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{})
+ interface{} (*)primitive
+
+Tree primitive types and corresponding marshal types:
+
+ uint64 uint, uint8-uint64, pointers to same
+ int64 int, int8-uint64, pointers to same
+ float64 float32, float64, pointers to same
+ string string, pointers to same
+ bool bool, pointers to same
+ time.LocalTime time.LocalTime{}, pointers to same
+
+For additional flexibility, use the Encoder API.
+*/
+func Marshal(v interface{}) ([]byte, error) {
+ return NewEncoder(nil).marshal(v)
+}
+
+// Encoder writes TOML values to an output stream.
+type Encoder struct {
+ w io.Writer
+ encOpts
+ annotation
+ line int
+ col int
+ order MarshalOrder
+ promoteAnon bool
+ compactComments bool
+ indentation string
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ w: w,
+ encOpts: encOptsDefaults,
+ annotation: annotationDefault,
+ line: 0,
+ col: 1,
+ order: OrderAlphabetical,
+ indentation: " ",
+ }
+}
+
+// Encode writes the TOML encoding of v to the stream.
+//
+// See the documentation for Marshal for details.
+func (e *Encoder) Encode(v interface{}) error {
+ b, err := e.marshal(v)
+ if err != nil {
+ return err
+ }
+ if _, err := e.w.Write(b); err != nil {
+ return err
+ }
+ return nil
+}
+
+// QuoteMapKeys sets up the encoder to encode
+// maps with string type keys with quoted TOML keys.
+//
+// This relieves the character limitations on map keys.
+func (e *Encoder) QuoteMapKeys(v bool) *Encoder {
+ e.quoteMapKeys = v
+ return e
+}
+
+// ArraysWithOneElementPerLine sets up the encoder to encode arrays
+// with more than one element on multiple lines instead of one.
+//
+// For example:
+//
+// A = [1,2,3]
+//
+// Becomes
+//
+// A = [
+// 1,
+// 2,
+// 3,
+// ]
+func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder {
+ e.arraysOneElementPerLine = v
+ return e
+}
+
+// Order allows to change in which order fields will be written to the output stream.
+func (e *Encoder) Order(ord MarshalOrder) *Encoder {
+ e.order = ord
+ return e
+}
+
+// Indentation allows to change indentation when marshalling.
+func (e *Encoder) Indentation(indent string) *Encoder {
+ e.indentation = indent
+ return e
+}
+
+// SetTagName allows changing default tag "toml"
+func (e *Encoder) SetTagName(v string) *Encoder {
+ e.tag = v
+ return e
+}
+
+// SetTagComment allows changing default tag "comment"
+func (e *Encoder) SetTagComment(v string) *Encoder {
+ e.comment = v
+ return e
+}
+
+// SetTagCommented allows changing default tag "commented"
+func (e *Encoder) SetTagCommented(v string) *Encoder {
+ e.commented = v
+ return e
+}
+
+// SetTagMultiline allows changing default tag "multiline"
+func (e *Encoder) SetTagMultiline(v string) *Encoder {
+ e.multiline = v
+ return e
+}
+
+// PromoteAnonymous allows to change how anonymous struct fields are marshaled.
+// Usually, they are marshaled as if the inner exported fields were fields in
+// the outer struct. However, if an anonymous struct field is given a name in
+// its TOML tag, it is treated like a regular struct field with that name.
+// rather than being anonymous.
+//
+// In case anonymous promotion is enabled, all anonymous structs are promoted
+// and treated like regular struct fields.
+func (e *Encoder) PromoteAnonymous(promote bool) *Encoder {
+ e.promoteAnon = promote
+ return e
+}
+
+// CompactComments removes the new line before each comment in the tree.
+func (e *Encoder) CompactComments(cc bool) *Encoder {
+ e.compactComments = cc
+ return e
+}
+
+func (e *Encoder) marshal(v interface{}) ([]byte, error) {
+ // Check if indentation is valid
+ for _, char := range e.indentation {
+ if !isSpace(char) {
+ return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters")
+ }
+ }
+
+ mtype := reflect.TypeOf(v)
+ if mtype == nil {
+ return []byte{}, errors.New("nil cannot be marshaled to TOML")
+ }
+
+ switch mtype.Kind() {
+ case reflect.Struct, reflect.Map:
+ case reflect.Ptr:
+ if mtype.Elem().Kind() != reflect.Struct {
+ return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML")
+ }
+ if reflect.ValueOf(v).IsNil() {
+ return []byte{}, errors.New("nil pointer cannot be marshaled to TOML")
+ }
+ default:
+ return []byte{}, errors.New("Only a struct or map can be marshaled to TOML")
+ }
+
+ sval := reflect.ValueOf(v)
+ if isCustomMarshaler(mtype) {
+ return callCustomMarshaler(sval)
+ }
+ if isTextMarshaler(mtype) {
+ return callTextMarshaler(sval)
+ }
+ t, err := e.valueToTree(mtype, sval)
+ if err != nil {
+ return []byte{}, err
+ }
+
+ var buf bytes.Buffer
+ _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, e.compactComments, false)
+
+ return buf.Bytes(), err
+}
+
+// Create next tree with a position based on Encoder.line
+func (e *Encoder) nextTree() *Tree {
+ return newTreeWithPosition(Position{Line: e.line, Col: 1})
+}
+
+// Convert given marshal struct or map value to toml tree
+func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) {
+ if mtype.Kind() == reflect.Ptr {
+ return e.valueToTree(mtype.Elem(), mval.Elem())
+ }
+ tval := e.nextTree()
+ switch mtype.Kind() {
+ case reflect.Struct:
+ switch mval.Interface().(type) {
+ case Tree:
+ reflect.ValueOf(tval).Elem().Set(mval)
+ default:
+ for i := 0; i < mtype.NumField(); i++ {
+ mtypef, mvalf := mtype.Field(i), mval.Field(i)
+ opts := tomlOptions(mtypef, e.annotation)
+ if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) {
+ val, err := e.valueToToml(mtypef.Type, mvalf)
+ if err != nil {
+ return nil, err
+ }
+ if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon {
+ e.appendTree(tval, tree)
+ } else {
+ val = e.wrapTomlValue(val, tval)
+ tval.SetPathWithOptions([]string{opts.name}, SetOptions{
+ Comment: opts.comment,
+ Commented: opts.commented,
+ Multiline: opts.multiline,
+ Literal: opts.literal,
+ }, val)
+ }
+ }
+ }
+ }
+ case reflect.Map:
+ keys := mval.MapKeys()
+ if e.order == OrderPreserve && len(keys) > 0 {
+ // Sorting []reflect.Value is not straight forward.
+ //
+ // OrderPreserve will support deterministic results when string is used
+ // as the key to maps.
+ typ := keys[0].Type()
+ kind := keys[0].Kind()
+ if kind == reflect.String {
+ ikeys := make([]string, len(keys))
+ for i := range keys {
+ ikeys[i] = keys[i].Interface().(string)
+ }
+ sort.Strings(ikeys)
+ for i := range ikeys {
+ keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ)
+ }
+ }
+ }
+ for _, key := range keys {
+ mvalf := mval.MapIndex(key)
+ if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() {
+ continue
+ }
+ val, err := e.valueToToml(mtype.Elem(), mvalf)
+ if err != nil {
+ return nil, err
+ }
+ val = e.wrapTomlValue(val, tval)
+ if e.quoteMapKeys {
+ keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine)
+ if err != nil {
+ return nil, err
+ }
+ tval.SetPath([]string{keyStr}, val)
+ } else {
+ tval.SetPath([]string{key.String()}, val)
+ }
+ }
+ }
+ return tval, nil
+}
+
+// Convert given marshal slice to slice of Toml trees
+func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) {
+ tval := make([]*Tree, mval.Len(), mval.Len())
+ for i := 0; i < mval.Len(); i++ {
+ val, err := e.valueToTree(mtype.Elem(), mval.Index(i))
+ if err != nil {
+ return nil, err
+ }
+ tval[i] = val
+ }
+ return tval, nil
+}
+
+// Convert given marshal slice to slice of toml values
+func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
+ tval := make([]interface{}, mval.Len(), mval.Len())
+ for i := 0; i < mval.Len(); i++ {
+ val, err := e.valueToToml(mtype.Elem(), mval.Index(i))
+ if err != nil {
+ return nil, err
+ }
+ tval[i] = val
+ }
+ return tval, nil
+}
+
+// Convert given marshal value to toml value
+func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
+ if mtype.Kind() == reflect.Ptr {
+ switch {
+ case isCustomMarshaler(mtype):
+ return callCustomMarshaler(mval)
+ case isTextMarshaler(mtype):
+ b, err := callTextMarshaler(mval)
+ return string(b), err
+ default:
+ return e.valueToToml(mtype.Elem(), mval.Elem())
+ }
+ }
+ if mtype.Kind() == reflect.Interface {
+ return e.valueToToml(mval.Elem().Type(), mval.Elem())
+ }
+ switch {
+ case isCustomMarshaler(mtype):
+ return callCustomMarshaler(mval)
+ case isTextMarshaler(mtype):
+ b, err := callTextMarshaler(mval)
+ return string(b), err
+ case isTree(mtype):
+ return e.valueToTree(mtype, mval)
+ case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype):
+ return e.valueToOtherSlice(mtype, mval)
+ case isTreeSequence(mtype):
+ return e.valueToTreeSlice(mtype, mval)
+ default:
+ switch mtype.Kind() {
+ case reflect.Bool:
+ return mval.Bool(), nil
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) {
+ return fmt.Sprint(mval), nil
+ }
+ return mval.Int(), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return mval.Uint(), nil
+ case reflect.Float32, reflect.Float64:
+ return mval.Float(), nil
+ case reflect.String:
+ return mval.String(), nil
+ case reflect.Struct:
+ return mval.Interface(), nil
+ default:
+ return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind())
+ }
+ }
+}
+
+func (e *Encoder) appendTree(t, o *Tree) error {
+ for key, value := range o.values {
+ if _, ok := t.values[key]; ok {
+ continue
+ }
+ if tomlValue, ok := value.(*tomlValue); ok {
+ tomlValue.position.Col = t.position.Col
+ }
+ t.values[key] = value
+ }
+ return nil
+}
+
+// Create a toml value with the current line number as the position line
+func (e *Encoder) wrapTomlValue(val interface{}, parent *Tree) interface{} {
+ _, isTree := val.(*Tree)
+ _, isTreeS := val.([]*Tree)
+ if isTree || isTreeS {
+ e.line++
+ return val
+ }
+
+ ret := &tomlValue{
+ value: val,
+ position: Position{
+ e.line,
+ parent.position.Col,
+ },
+ }
+ e.line++
+ return ret
+}
+
+// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v.
+// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for
+// sub-structs, and only definite types can be unmarshaled.
+func (t *Tree) Unmarshal(v interface{}) error {
+ d := Decoder{tval: t, tagName: tagFieldName}
+ return d.unmarshal(v)
+}
+
+// Marshal returns the TOML encoding of Tree.
+// See Marshal() documentation for types mapping table.
+func (t *Tree) Marshal() ([]byte, error) {
+ var buf bytes.Buffer
+ _, err := t.WriteTo(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// Unmarshal parses the TOML-encoded data and stores the result in the value
+// pointed to by v. Behavior is similar to the Go json encoder, except that there
+// is no concept of an Unmarshaler interface or UnmarshalTOML function for
+// sub-structs, and currently only definite types can be unmarshaled to (i.e. no
+// `interface{}`).
+//
+// The following struct annotations are supported:
+//
+// toml:"Field" Overrides the field's name to map to.
+// default:"foo" Provides a default value.
+//
+// For default values, only fields of the following types are supported:
+// * string
+// * bool
+// * int
+// * int64
+// * float64
+//
+// See Marshal() documentation for types mapping table.
+func Unmarshal(data []byte, v interface{}) error {
+ t, err := LoadReader(bytes.NewReader(data))
+ if err != nil {
+ return err
+ }
+ return t.Unmarshal(v)
+}
+
+// Decoder reads and decodes TOML values from an input stream.
+type Decoder struct {
+ r io.Reader
+ tval *Tree
+ encOpts
+ tagName string
+ strict bool
+ visitor visitorState
+}
+
+// NewDecoder returns a new decoder that reads from r.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ r: r,
+ encOpts: encOptsDefaults,
+ tagName: tagFieldName,
+ }
+}
+
+// Decode reads a TOML-encoded value from it's input
+// and unmarshals it in the value pointed at by v.
+//
+// See the documentation for Marshal for details.
+func (d *Decoder) Decode(v interface{}) error {
+ var err error
+ d.tval, err = LoadReader(d.r)
+ if err != nil {
+ return err
+ }
+ return d.unmarshal(v)
+}
+
+// SetTagName allows changing default tag "toml"
+func (d *Decoder) SetTagName(v string) *Decoder {
+ d.tagName = v
+ return d
+}
+
+// Strict allows changing to strict decoding. Any fields that are found in the
+// input data and do not have a corresponding struct member cause an error.
+func (d *Decoder) Strict(strict bool) *Decoder {
+ d.strict = strict
+ return d
+}
+
+func (d *Decoder) unmarshal(v interface{}) error {
+ mtype := reflect.TypeOf(v)
+ if mtype == nil {
+ return errors.New("nil cannot be unmarshaled from TOML")
+ }
+ if mtype.Kind() != reflect.Ptr {
+ return errors.New("only a pointer to struct or map can be unmarshaled from TOML")
+ }
+
+ elem := mtype.Elem()
+
+ switch elem.Kind() {
+ case reflect.Struct, reflect.Map:
+ case reflect.Interface:
+ elem = mapStringInterfaceType
+ default:
+ return errors.New("only a pointer to struct or map can be unmarshaled from TOML")
+ }
+
+ if reflect.ValueOf(v).IsNil() {
+ return errors.New("nil pointer cannot be unmarshaled from TOML")
+ }
+
+ vv := reflect.ValueOf(v).Elem()
+
+ if d.strict {
+ d.visitor = newVisitorState(d.tval)
+ }
+
+ sval, err := d.valueFromTree(elem, d.tval, &vv)
+ if err != nil {
+ return err
+ }
+ if err := d.visitor.validate(); err != nil {
+ return err
+ }
+ reflect.ValueOf(v).Elem().Set(sval)
+ return nil
+}
+
+// Convert toml tree to marshal struct or map, using marshal type. When mval1
+// is non-nil, merge fields into the given value instead of allocating a new one.
+func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) {
+ if mtype.Kind() == reflect.Ptr {
+ return d.unwrapPointer(mtype, tval, mval1)
+ }
+
+ // Check if pointer to value implements the Unmarshaler interface.
+ if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) {
+ d.visitor.visitAll()
+
+ if tval == nil {
+ return mvalPtr.Elem(), nil
+ }
+
+ if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil {
+ return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err)
+ }
+ return mvalPtr.Elem(), nil
+ }
+
+ var mval reflect.Value
+ switch mtype.Kind() {
+ case reflect.Struct:
+ if mval1 != nil {
+ mval = *mval1
+ } else {
+ mval = reflect.New(mtype).Elem()
+ }
+
+ switch mval.Interface().(type) {
+ case Tree:
+ mval.Set(reflect.ValueOf(tval).Elem())
+ default:
+ for i := 0; i < mtype.NumField(); i++ {
+ mtypef := mtype.Field(i)
+ an := annotation{tag: d.tagName}
+ opts := tomlOptions(mtypef, an)
+ if !opts.include {
+ continue
+ }
+ baseKey := opts.name
+ keysToTry := []string{
+ baseKey,
+ strings.ToLower(baseKey),
+ strings.ToTitle(baseKey),
+ strings.ToLower(string(baseKey[0])) + baseKey[1:],
+ }
+
+ found := false
+ if tval != nil {
+ for _, key := range keysToTry {
+ exists := tval.HasPath([]string{key})
+ if !exists {
+ continue
+ }
+
+ d.visitor.push(key)
+ val := tval.GetPath([]string{key})
+ fval := mval.Field(i)
+ mvalf, err := d.valueFromToml(mtypef.Type, val, &fval)
+ if err != nil {
+ return mval, formatError(err, tval.GetPositionPath([]string{key}))
+ }
+ mval.Field(i).Set(mvalf)
+ found = true
+ d.visitor.pop()
+ break
+ }
+ }
+
+ if !found && opts.defaultValue != "" {
+ mvalf := mval.Field(i)
+ var val interface{}
+ var err error
+ switch mvalf.Kind() {
+ case reflect.String:
+ val = opts.defaultValue
+ case reflect.Bool:
+ val, err = strconv.ParseBool(opts.defaultValue)
+ case reflect.Uint:
+ val, err = strconv.ParseUint(opts.defaultValue, 10, 0)
+ case reflect.Uint8:
+ val, err = strconv.ParseUint(opts.defaultValue, 10, 8)
+ case reflect.Uint16:
+ val, err = strconv.ParseUint(opts.defaultValue, 10, 16)
+ case reflect.Uint32:
+ val, err = strconv.ParseUint(opts.defaultValue, 10, 32)
+ case reflect.Uint64:
+ val, err = strconv.ParseUint(opts.defaultValue, 10, 64)
+ case reflect.Int:
+ val, err = strconv.ParseInt(opts.defaultValue, 10, 0)
+ case reflect.Int8:
+ val, err = strconv.ParseInt(opts.defaultValue, 10, 8)
+ case reflect.Int16:
+ val, err = strconv.ParseInt(opts.defaultValue, 10, 16)
+ case reflect.Int32:
+ val, err = strconv.ParseInt(opts.defaultValue, 10, 32)
+ case reflect.Int64:
+ // Check if the provided number has a non-numeric extension.
+ var hasExtension bool
+ if len(opts.defaultValue) > 0 {
+ lastChar := opts.defaultValue[len(opts.defaultValue)-1]
+ if lastChar < '0' || lastChar > '9' {
+ hasExtension = true
+ }
+ }
+ // If the value is a time.Duration with extension, parse as duration.
+ // If the value is an int64 or a time.Duration without extension, parse as number.
+ if hasExtension && mvalf.Type().String() == "time.Duration" {
+ val, err = time.ParseDuration(opts.defaultValue)
+ } else {
+ val, err = strconv.ParseInt(opts.defaultValue, 10, 64)
+ }
+ case reflect.Float32:
+ val, err = strconv.ParseFloat(opts.defaultValue, 32)
+ case reflect.Float64:
+ val, err = strconv.ParseFloat(opts.defaultValue, 64)
+ default:
+ return mvalf, fmt.Errorf("unsupported field type for default option")
+ }
+
+ if err != nil {
+ return mvalf, err
+ }
+ mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type()))
+ }
+
+ // save the old behavior above and try to check structs
+ if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct {
+ tmpTval := tval
+ if !mtypef.Anonymous {
+ tmpTval = nil
+ }
+ fval := mval.Field(i)
+ v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval)
+ if err != nil {
+ return v, err
+ }
+ mval.Field(i).Set(v)
+ }
+ }
+ }
+ case reflect.Map:
+ mval = reflect.MakeMap(mtype)
+ for _, key := range tval.Keys() {
+ d.visitor.push(key)
+ // TODO: path splits key
+ val := tval.GetPath([]string{key})
+ mvalf, err := d.valueFromToml(mtype.Elem(), val, nil)
+ if err != nil {
+ return mval, formatError(err, tval.GetPositionPath([]string{key}))
+ }
+ mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf)
+ d.visitor.pop()
+ }
+ }
+ return mval, nil
+}
+
+// Convert toml value to marshal struct/map slice, using marshal type
+func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) {
+ mval, err := makeSliceOrArray(mtype, len(tval))
+ if err != nil {
+ return mval, err
+ }
+
+ for i := 0; i < len(tval); i++ {
+ d.visitor.push(strconv.Itoa(i))
+ val, err := d.valueFromTree(mtype.Elem(), tval[i], nil)
+ if err != nil {
+ return mval, err
+ }
+ mval.Index(i).Set(val)
+ d.visitor.pop()
+ }
+ return mval, nil
+}
+
+// Convert toml value to marshal primitive slice, using marshal type
+func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) {
+ mval, err := makeSliceOrArray(mtype, len(tval))
+ if err != nil {
+ return mval, err
+ }
+
+ for i := 0; i < len(tval); i++ {
+ val, err := d.valueFromToml(mtype.Elem(), tval[i], nil)
+ if err != nil {
+ return mval, err
+ }
+ mval.Index(i).Set(val)
+ }
+ return mval, nil
+}
+
+// Convert toml value to marshal primitive slice, using marshal type
+func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
+ val := reflect.ValueOf(tval)
+ length := val.Len()
+
+ mval, err := makeSliceOrArray(mtype, length)
+ if err != nil {
+ return mval, err
+ }
+
+ for i := 0; i < length; i++ {
+ val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil)
+ if err != nil {
+ return mval, err
+ }
+ mval.Index(i).Set(val)
+ }
+ return mval, nil
+}
+
+// Create a new slice or a new array with specified length
+func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) {
+ var mval reflect.Value
+ switch mtype.Kind() {
+ case reflect.Slice:
+ mval = reflect.MakeSlice(mtype, tLength, tLength)
+ case reflect.Array:
+ mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem()
+ if tLength > mtype.Len() {
+ return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len())
+ }
+ }
+ return mval, nil
+}
+
+// Convert toml value to marshal value, using marshal type. When mval1 is non-nil
+// and the given type is a struct value, merge fields into it.
+func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) {
+ if mtype.Kind() == reflect.Ptr {
+ return d.unwrapPointer(mtype, tval, mval1)
+ }
+
+ switch t := tval.(type) {
+ case *Tree:
+ var mval11 *reflect.Value
+ if mtype.Kind() == reflect.Struct {
+ mval11 = mval1
+ }
+
+ if isTree(mtype) {
+ return d.valueFromTree(mtype, t, mval11)
+ }
+
+ if mtype.Kind() == reflect.Interface {
+ if mval1 == nil || mval1.IsNil() {
+ return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil)
+ } else {
+ return d.valueFromToml(mval1.Elem().Type(), t, nil)
+ }
+ }
+
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval)
+ case []*Tree:
+ if isTreeSequence(mtype) {
+ return d.valueFromTreeSlice(mtype, t)
+ }
+ if mtype.Kind() == reflect.Interface {
+ if mval1 == nil || mval1.IsNil() {
+ return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t)
+ } else {
+ ival := mval1.Elem()
+ return d.valueFromToml(mval1.Elem().Type(), t, &ival)
+ }
+ }
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval)
+ case []interface{}:
+ d.visitor.visit()
+ if isOtherSequence(mtype) {
+ return d.valueFromOtherSlice(mtype, t)
+ }
+ if mtype.Kind() == reflect.Interface {
+ if mval1 == nil || mval1.IsNil() {
+ return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t)
+ } else {
+ ival := mval1.Elem()
+ return d.valueFromToml(mval1.Elem().Type(), t, &ival)
+ }
+ }
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval)
+ default:
+ d.visitor.visit()
+ mvalPtr := reflect.New(mtype)
+
+ // Check if pointer to value implements the Unmarshaler interface.
+ if isCustomUnmarshaler(mvalPtr.Type()) {
+ if err := callCustomUnmarshaler(mvalPtr, tval); err != nil {
+ return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err)
+ }
+ return mvalPtr.Elem(), nil
+ }
+
+ // Check if pointer to value implements the encoding.TextUnmarshaler.
+ if isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) {
+ if err := d.unmarshalText(tval, mvalPtr); err != nil {
+ return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err)
+ }
+ return mvalPtr.Elem(), nil
+ }
+
+ switch mtype.Kind() {
+ case reflect.Bool, reflect.Struct:
+ val := reflect.ValueOf(tval)
+
+ switch val.Type() {
+ case localDateType:
+ localDate := val.Interface().(LocalDate)
+ switch mtype {
+ case timeType:
+ return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil
+ }
+ case localDateTimeType:
+ localDateTime := val.Interface().(LocalDateTime)
+ switch mtype {
+ case timeType:
+ return reflect.ValueOf(time.Date(
+ localDateTime.Date.Year,
+ localDateTime.Date.Month,
+ localDateTime.Date.Day,
+ localDateTime.Time.Hour,
+ localDateTime.Time.Minute,
+ localDateTime.Time.Second,
+ localDateTime.Time.Nanosecond,
+ time.Local)), nil
+ }
+ }
+
+ // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime
+ if !val.Type().ConvertibleTo(mtype) {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+ }
+
+ return val.Convert(mtype), nil
+ case reflect.String:
+ val := reflect.ValueOf(tval)
+ // stupidly, int64 is convertible to string. So special case this.
+ if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+ }
+
+ return val.Convert(mtype), nil
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ val := reflect.ValueOf(tval)
+ if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String {
+ d, err := time.ParseDuration(val.String())
+ if err != nil {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err)
+ }
+ return reflect.ValueOf(d), nil
+ }
+ if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+ }
+ if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) {
+ return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
+ }
+
+ return val.Convert(mtype), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ val := reflect.ValueOf(tval)
+ if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+ }
+
+ if val.Convert(reflect.TypeOf(int(1))).Int() < 0 {
+ return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String())
+ }
+ if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) {
+ return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
+ }
+
+ return val.Convert(mtype), nil
+ case reflect.Float32, reflect.Float64:
+ val := reflect.ValueOf(tval)
+ if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+ }
+ if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) {
+ return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
+ }
+
+ return val.Convert(mtype), nil
+ case reflect.Interface:
+ if mval1 == nil || mval1.IsNil() {
+ return reflect.ValueOf(tval), nil
+ } else {
+ ival := mval1.Elem()
+ return d.valueFromToml(mval1.Elem().Type(), t, &ival)
+ }
+ case reflect.Slice, reflect.Array:
+ if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) {
+ return d.valueFromOtherSliceI(mtype, t)
+ }
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind())
+ default:
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind())
+ }
+ }
+}
+
+func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) {
+ var melem *reflect.Value
+
+ if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) {
+ elem := mval1.Elem()
+ melem = &elem
+ }
+
+ val, err := d.valueFromToml(mtype.Elem(), tval, melem)
+ if err != nil {
+ return reflect.ValueOf(nil), err
+ }
+ mval := reflect.New(mtype.Elem())
+ mval.Elem().Set(val)
+ return mval, nil
+}
+
+func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error {
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, tval)
+ return callTextUnmarshaler(mval, buf.Bytes())
+}
+
+func tomlOptions(vf reflect.StructField, an annotation) tomlOpts {
+ tag := vf.Tag.Get(an.tag)
+ parse := strings.Split(tag, ",")
+ var comment string
+ if c := vf.Tag.Get(an.comment); c != "" {
+ comment = c
+ }
+ commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented))
+ multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline))
+ literal, _ := strconv.ParseBool(vf.Tag.Get(an.literal))
+ defaultValue := vf.Tag.Get(tagDefault)
+ result := tomlOpts{
+ name: vf.Name,
+ nameFromTag: false,
+ comment: comment,
+ commented: commented,
+ multiline: multiline,
+ literal: literal,
+ include: true,
+ omitempty: false,
+ defaultValue: defaultValue,
+ }
+ if parse[0] != "" {
+ if parse[0] == "-" && len(parse) == 1 {
+ result.include = false
+ } else {
+ result.name = strings.Trim(parse[0], " ")
+ result.nameFromTag = true
+ }
+ }
+ if vf.PkgPath != "" {
+ result.include = false
+ }
+ if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" {
+ result.omitempty = true
+ }
+ if vf.Type.Kind() == reflect.Ptr {
+ result.omitempty = true
+ }
+ return result
+}
+
+func isZero(val reflect.Value) bool {
+ switch val.Type().Kind() {
+ case reflect.Slice, reflect.Array, reflect.Map:
+ return val.Len() == 0
+ default:
+ return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface())
+ }
+}
+
+func formatError(err error, pos Position) error {
+ if err.Error()[0] == '(' { // Error already contains position information
+ return err
+ }
+ return fmt.Errorf("%s: %s", pos, err)
+}
+
+// visitorState keeps track of which keys were unmarshaled.
+type visitorState struct {
+ tree *Tree
+ path []string
+ keys map[string]struct{}
+ active bool
+}
+
+func newVisitorState(tree *Tree) visitorState {
+ path, result := []string{}, map[string]struct{}{}
+ insertKeys(path, result, tree)
+ return visitorState{
+ tree: tree,
+ path: path[:0],
+ keys: result,
+ active: true,
+ }
+}
+
+func (s *visitorState) push(key string) {
+ if s.active {
+ s.path = append(s.path, key)
+ }
+}
+
+func (s *visitorState) pop() {
+ if s.active {
+ s.path = s.path[:len(s.path)-1]
+ }
+}
+
+func (s *visitorState) visit() {
+ if s.active {
+ delete(s.keys, strings.Join(s.path, "."))
+ }
+}
+
+func (s *visitorState) visitAll() {
+ if s.active {
+ for k := range s.keys {
+ if strings.HasPrefix(k, strings.Join(s.path, ".")) {
+ delete(s.keys, k)
+ }
+ }
+ }
+}
+
+func (s *visitorState) validate() error {
+ if !s.active {
+ return nil
+ }
+ undecoded := make([]string, 0, len(s.keys))
+ for key := range s.keys {
+ undecoded = append(undecoded, key)
+ }
+ sort.Strings(undecoded)
+ if len(undecoded) > 0 {
+ return fmt.Errorf("undecoded keys: %q", undecoded)
+ }
+ return nil
+}
+
+func insertKeys(path []string, m map[string]struct{}, tree *Tree) {
+ for k, v := range tree.values {
+ switch node := v.(type) {
+ case []*Tree:
+ for i, item := range node {
+ insertKeys(append(path, k, strconv.Itoa(i)), m, item)
+ }
+ case *Tree:
+ insertKeys(append(path, k), m, node)
+ case *tomlValue:
+ m[strings.Join(append(path, k), ".")] = struct{}{}
+ }
+ }
+}
diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml
new file mode 100644
index 00000000000..792b72ed721
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml
@@ -0,0 +1,39 @@
+title = "TOML Marshal Testing"
+
+[basic_lists]
+ floats = [12.3,45.6,78.9]
+ bools = [true,false,true]
+ dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z]
+ ints = [8001,8001,8002]
+ uints = [5002,5003]
+ strings = ["One","Two","Three"]
+
+[[subdocptrs]]
+ name = "Second"
+
+[basic_map]
+ one = "one"
+ two = "two"
+
+[subdoc]
+
+ [subdoc.second]
+ name = "Second"
+
+ [subdoc.first]
+ name = "First"
+
+[basic]
+ uint = 5001
+ bool = true
+ float = 123.4
+ float64 = 123.456782132399
+ int = 5000
+ string = "Bite me"
+ date = 1979-05-27T07:32:00Z
+
+[[subdoclist]]
+ name = "List.First"
+
+[[subdoclist]]
+ name = "List.Second"
diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml
new file mode 100644
index 00000000000..ba5e110bf04
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/marshal_test.toml
@@ -0,0 +1,39 @@
+title = "TOML Marshal Testing"
+
+[basic]
+ bool = true
+ date = 1979-05-27T07:32:00Z
+ float = 123.4
+ float64 = 123.456782132399
+ int = 5000
+ string = "Bite me"
+ uint = 5001
+
+[basic_lists]
+ bools = [true,false,true]
+ dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z]
+ floats = [12.3,45.6,78.9]
+ ints = [8001,8001,8002]
+ strings = ["One","Two","Three"]
+ uints = [5002,5003]
+
+[basic_map]
+ one = "one"
+ two = "two"
+
+[subdoc]
+
+ [subdoc.first]
+ name = "First"
+
+ [subdoc.second]
+ name = "Second"
+
+[[subdoclist]]
+ name = "List.First"
+
+[[subdoclist]]
+ name = "List.Second"
+
+[[subdocptrs]]
+ name = "Second"
diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go
new file mode 100644
index 00000000000..f5e1a44fb4d
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/parser.go
@@ -0,0 +1,508 @@
+// TOML Parser.
+
+package toml
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type tomlParser struct {
+ flowIdx int
+ flow []token
+ tree *Tree
+ currentTable []string
+ seenTableKeys []string
+}
+
+type tomlParserStateFn func() tomlParserStateFn
+
+// Formats and panics an error message based on a token
+func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) {
+ panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...))
+}
+
+func (p *tomlParser) run() {
+ for state := p.parseStart; state != nil; {
+ state = state()
+ }
+}
+
+func (p *tomlParser) peek() *token {
+ if p.flowIdx >= len(p.flow) {
+ return nil
+ }
+ return &p.flow[p.flowIdx]
+}
+
+func (p *tomlParser) assume(typ tokenType) {
+ tok := p.getToken()
+ if tok == nil {
+ p.raiseError(tok, "was expecting token %s, but token stream is empty", tok)
+ }
+ if tok.typ != typ {
+ p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok)
+ }
+}
+
+func (p *tomlParser) getToken() *token {
+ tok := p.peek()
+ if tok == nil {
+ return nil
+ }
+ p.flowIdx++
+ return tok
+}
+
+func (p *tomlParser) parseStart() tomlParserStateFn {
+ tok := p.peek()
+
+ // end of stream, parsing is finished
+ if tok == nil {
+ return nil
+ }
+
+ switch tok.typ {
+ case tokenDoubleLeftBracket:
+ return p.parseGroupArray
+ case tokenLeftBracket:
+ return p.parseGroup
+ case tokenKey:
+ return p.parseAssign
+ case tokenEOF:
+ return nil
+ case tokenError:
+ p.raiseError(tok, "parsing error: %s", tok.String())
+ default:
+ p.raiseError(tok, "unexpected token %s", tok.typ)
+ }
+ return nil
+}
+
+func (p *tomlParser) parseGroupArray() tomlParserStateFn {
+ startToken := p.getToken() // discard the [[
+ key := p.getToken()
+ if key.typ != tokenKeyGroupArray {
+ p.raiseError(key, "unexpected token %s, was expecting a table array key", key)
+ }
+
+ // get or create table array element at the indicated part in the path
+ keys, err := parseKey(key.val)
+ if err != nil {
+ p.raiseError(key, "invalid table array key: %s", err)
+ }
+ p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries
+ destTree := p.tree.GetPath(keys)
+ var array []*Tree
+ if destTree == nil {
+ array = make([]*Tree, 0)
+ } else if target, ok := destTree.([]*Tree); ok && target != nil {
+ array = destTree.([]*Tree)
+ } else {
+ p.raiseError(key, "key %s is already assigned and not of type table array", key)
+ }
+ p.currentTable = keys
+
+ // add a new tree to the end of the table array
+ newTree := newTree()
+ newTree.position = startToken.Position
+ array = append(array, newTree)
+ p.tree.SetPath(p.currentTable, array)
+
+ // remove all keys that were children of this table array
+ prefix := key.val + "."
+ found := false
+ for ii := 0; ii < len(p.seenTableKeys); {
+ tableKey := p.seenTableKeys[ii]
+ if strings.HasPrefix(tableKey, prefix) {
+ p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...)
+ } else {
+ found = (tableKey == key.val)
+ ii++
+ }
+ }
+
+ // keep this key name from use by other kinds of assignments
+ if !found {
+ p.seenTableKeys = append(p.seenTableKeys, key.val)
+ }
+
+ // move to next parser state
+ p.assume(tokenDoubleRightBracket)
+ return p.parseStart
+}
+
+func (p *tomlParser) parseGroup() tomlParserStateFn {
+ startToken := p.getToken() // discard the [
+ key := p.getToken()
+ if key.typ != tokenKeyGroup {
+ p.raiseError(key, "unexpected token %s, was expecting a table key", key)
+ }
+ for _, item := range p.seenTableKeys {
+ if item == key.val {
+ p.raiseError(key, "duplicated tables")
+ }
+ }
+
+ p.seenTableKeys = append(p.seenTableKeys, key.val)
+ keys, err := parseKey(key.val)
+ if err != nil {
+ p.raiseError(key, "invalid table array key: %s", err)
+ }
+ if err := p.tree.createSubTree(keys, startToken.Position); err != nil {
+ p.raiseError(key, "%s", err)
+ }
+ destTree := p.tree.GetPath(keys)
+ if target, ok := destTree.(*Tree); ok && target != nil && target.inline {
+ p.raiseError(key, "could not re-define exist inline table or its sub-table : %s",
+ strings.Join(keys, "."))
+ }
+ p.assume(tokenRightBracket)
+ p.currentTable = keys
+ return p.parseStart
+}
+
+func (p *tomlParser) parseAssign() tomlParserStateFn {
+ key := p.getToken()
+ p.assume(tokenEqual)
+
+ parsedKey, err := parseKey(key.val)
+ if err != nil {
+ p.raiseError(key, "invalid key: %s", err.Error())
+ }
+
+ value := p.parseRvalue()
+ var tableKey []string
+ if len(p.currentTable) > 0 {
+ tableKey = p.currentTable
+ } else {
+ tableKey = []string{}
+ }
+
+ prefixKey := parsedKey[0 : len(parsedKey)-1]
+ tableKey = append(tableKey, prefixKey...)
+
+ // find the table to assign, looking out for arrays of tables
+ var targetNode *Tree
+ switch node := p.tree.GetPath(tableKey).(type) {
+ case []*Tree:
+ targetNode = node[len(node)-1]
+ case *Tree:
+ targetNode = node
+ case nil:
+ // create intermediate
+ if err := p.tree.createSubTree(tableKey, key.Position); err != nil {
+ p.raiseError(key, "could not create intermediate group: %s", err)
+ }
+ targetNode = p.tree.GetPath(tableKey).(*Tree)
+ default:
+ p.raiseError(key, "Unknown table type for path: %s",
+ strings.Join(tableKey, "."))
+ }
+
+ if targetNode.inline {
+ p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s",
+ strings.Join(tableKey, "."))
+ }
+
+ // assign value to the found table
+ keyVal := parsedKey[len(parsedKey)-1]
+ localKey := []string{keyVal}
+ finalKey := append(tableKey, keyVal)
+ if targetNode.GetPath(localKey) != nil {
+ p.raiseError(key, "The following key was defined twice: %s",
+ strings.Join(finalKey, "."))
+ }
+ var toInsert interface{}
+
+ switch value.(type) {
+ case *Tree, []*Tree:
+ toInsert = value
+ default:
+ toInsert = &tomlValue{value: value, position: key.Position}
+ }
+ targetNode.values[keyVal] = toInsert
+ return p.parseStart
+}
+
+var errInvalidUnderscore = errors.New("invalid use of _ in number")
+
+func numberContainsInvalidUnderscore(value string) error {
+ // For large numbers, you may use underscores between digits to enhance
+ // readability. Each underscore must be surrounded by at least one digit on
+ // each side.
+
+ hasBefore := false
+ for idx, r := range value {
+ if r == '_' {
+ if !hasBefore || idx+1 >= len(value) {
+ // can't end with an underscore
+ return errInvalidUnderscore
+ }
+ }
+ hasBefore = isDigit(r)
+ }
+ return nil
+}
+
+var errInvalidUnderscoreHex = errors.New("invalid use of _ in hex number")
+
+func hexNumberContainsInvalidUnderscore(value string) error {
+ hasBefore := false
+ for idx, r := range value {
+ if r == '_' {
+ if !hasBefore || idx+1 >= len(value) {
+ // can't end with an underscore
+ return errInvalidUnderscoreHex
+ }
+ }
+ hasBefore = isHexDigit(r)
+ }
+ return nil
+}
+
+func cleanupNumberToken(value string) string {
+ cleanedVal := strings.Replace(value, "_", "", -1)
+ return cleanedVal
+}
+
+func (p *tomlParser) parseRvalue() interface{} {
+ tok := p.getToken()
+ if tok == nil || tok.typ == tokenEOF {
+ p.raiseError(tok, "expecting a value")
+ }
+
+ switch tok.typ {
+ case tokenString:
+ return tok.val
+ case tokenTrue:
+ return true
+ case tokenFalse:
+ return false
+ case tokenInf:
+ if tok.val[0] == '-' {
+ return math.Inf(-1)
+ }
+ return math.Inf(1)
+ case tokenNan:
+ return math.NaN()
+ case tokenInteger:
+ cleanedVal := cleanupNumberToken(tok.val)
+ var err error
+ var val int64
+ if len(cleanedVal) >= 3 && cleanedVal[0] == '0' {
+ switch cleanedVal[1] {
+ case 'x':
+ err = hexNumberContainsInvalidUnderscore(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ val, err = strconv.ParseInt(cleanedVal[2:], 16, 64)
+ case 'o':
+ err = numberContainsInvalidUnderscore(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ val, err = strconv.ParseInt(cleanedVal[2:], 8, 64)
+ case 'b':
+ err = numberContainsInvalidUnderscore(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ val, err = strconv.ParseInt(cleanedVal[2:], 2, 64)
+ default:
+ panic("invalid base") // the lexer should catch this first
+ }
+ } else {
+ err = numberContainsInvalidUnderscore(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ val, err = strconv.ParseInt(cleanedVal, 10, 64)
+ }
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ return val
+ case tokenFloat:
+ err := numberContainsInvalidUnderscore(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ cleanedVal := cleanupNumberToken(tok.val)
+ val, err := strconv.ParseFloat(cleanedVal, 64)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ return val
+ case tokenLocalTime:
+ val, err := ParseLocalTime(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ return val
+ case tokenLocalDate:
+ // a local date may be followed by:
+ // * nothing: this is a local date
+ // * a local time: this is a local date-time
+
+ next := p.peek()
+ if next == nil || next.typ != tokenLocalTime {
+ val, err := ParseLocalDate(tok.val)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ return val
+ }
+
+ localDate := tok
+ localTime := p.getToken()
+
+ next = p.peek()
+ if next == nil || next.typ != tokenTimeOffset {
+ v := localDate.val + "T" + localTime.val
+ val, err := ParseLocalDateTime(v)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ return val
+ }
+
+ offset := p.getToken()
+
+ layout := time.RFC3339Nano
+ v := localDate.val + "T" + localTime.val + offset.val
+ val, err := time.ParseInLocation(layout, v, time.UTC)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ return val
+ case tokenLeftBracket:
+ return p.parseArray()
+ case tokenLeftCurlyBrace:
+ return p.parseInlineTable()
+ case tokenEqual:
+ p.raiseError(tok, "cannot have multiple equals for the same key")
+ case tokenError:
+ p.raiseError(tok, "%s", tok)
+ default:
+ panic(fmt.Errorf("unhandled token: %v", tok))
+ }
+
+ return nil
+}
+
+func tokenIsComma(t *token) bool {
+ return t != nil && t.typ == tokenComma
+}
+
+func (p *tomlParser) parseInlineTable() *Tree {
+ tree := newTree()
+ var previous *token
+Loop:
+ for {
+ follow := p.peek()
+ if follow == nil || follow.typ == tokenEOF {
+ p.raiseError(follow, "unterminated inline table")
+ }
+ switch follow.typ {
+ case tokenRightCurlyBrace:
+ p.getToken()
+ break Loop
+ case tokenKey, tokenInteger, tokenString:
+ if !tokenIsComma(previous) && previous != nil {
+ p.raiseError(follow, "comma expected between fields in inline table")
+ }
+ key := p.getToken()
+ p.assume(tokenEqual)
+
+ parsedKey, err := parseKey(key.val)
+ if err != nil {
+ p.raiseError(key, "invalid key: %s", err)
+ }
+
+ value := p.parseRvalue()
+ tree.SetPath(parsedKey, value)
+ case tokenComma:
+ if tokenIsComma(previous) {
+ p.raiseError(follow, "need field between two commas in inline table")
+ }
+ p.getToken()
+ default:
+ p.raiseError(follow, "unexpected token type in inline table: %s", follow.String())
+ }
+ previous = follow
+ }
+ if tokenIsComma(previous) {
+ p.raiseError(previous, "trailing comma at the end of inline table")
+ }
+ tree.inline = true
+ return tree
+}
+
+func (p *tomlParser) parseArray() interface{} {
+ var array []interface{}
+ arrayType := reflect.TypeOf(newTree())
+ for {
+ follow := p.peek()
+ if follow == nil || follow.typ == tokenEOF {
+ p.raiseError(follow, "unterminated array")
+ }
+ if follow.typ == tokenRightBracket {
+ p.getToken()
+ break
+ }
+ val := p.parseRvalue()
+ if reflect.TypeOf(val) != arrayType {
+ arrayType = nil
+ }
+ array = append(array, val)
+ follow = p.peek()
+ if follow == nil || follow.typ == tokenEOF {
+ p.raiseError(follow, "unterminated array")
+ }
+ if follow.typ != tokenRightBracket && follow.typ != tokenComma {
+ p.raiseError(follow, "missing comma")
+ }
+ if follow.typ == tokenComma {
+ p.getToken()
+ }
+ }
+
+ // if the array is a mixed-type array or its length is 0,
+ // don't convert it to a table array
+ if len(array) <= 0 {
+ arrayType = nil
+ }
+ // An array of Trees is actually an array of inline
+ // tables, which is a shorthand for a table array. If the
+ // array was not converted from []interface{} to []*Tree,
+ // the two notations would not be equivalent.
+ if arrayType == reflect.TypeOf(newTree()) {
+ tomlArray := make([]*Tree, len(array))
+ for i, v := range array {
+ tomlArray[i] = v.(*Tree)
+ }
+ return tomlArray
+ }
+ return array
+}
+
+func parseToml(flow []token) *Tree {
+ result := newTree()
+ result.position = Position{1, 1}
+ parser := &tomlParser{
+ flowIdx: 0,
+ flow: flow,
+ tree: result,
+ currentTable: make([]string, 0),
+ seenTableKeys: make([]string, 0),
+ }
+ parser.run()
+ return result
+}
diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go
new file mode 100644
index 00000000000..c17bff87baa
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/position.go
@@ -0,0 +1,29 @@
+// Position support for go-toml
+
+package toml
+
+import (
+ "fmt"
+)
+
+// Position of a document element within a TOML document.
+//
+// Line and Col are both 1-indexed positions for the element's line number and
+// column number, respectively. Values of zero or less will cause Invalid(),
+// to return true.
+type Position struct {
+ Line int // line within the document
+ Col int // column within the line
+}
+
+// String representation of the position.
+// Displays 1-indexed line and column numbers.
+func (p Position) String() string {
+ return fmt.Sprintf("(%d, %d)", p.Line, p.Col)
+}
+
+// Invalid returns whether or not the position is valid (i.e. with negative or
+// null values)
+func (p Position) Invalid() bool {
+ return p.Line <= 0 || p.Col <= 0
+}
diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go
new file mode 100644
index 00000000000..b437fdd3b6a
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/token.go
@@ -0,0 +1,136 @@
+package toml
+
+import "fmt"
+
+// Define tokens
+type tokenType int
+
+const (
+ eof = -(iota + 1)
+)
+
+const (
+ tokenError tokenType = iota
+ tokenEOF
+ tokenComment
+ tokenKey
+ tokenString
+ tokenInteger
+ tokenTrue
+ tokenFalse
+ tokenFloat
+ tokenInf
+ tokenNan
+ tokenEqual
+ tokenLeftBracket
+ tokenRightBracket
+ tokenLeftCurlyBrace
+ tokenRightCurlyBrace
+ tokenLeftParen
+ tokenRightParen
+ tokenDoubleLeftBracket
+ tokenDoubleRightBracket
+ tokenLocalDate
+ tokenLocalTime
+ tokenTimeOffset
+ tokenKeyGroup
+ tokenKeyGroupArray
+ tokenComma
+ tokenColon
+ tokenDollar
+ tokenStar
+ tokenQuestion
+ tokenDot
+ tokenDotDot
+ tokenEOL
+)
+
+var tokenTypeNames = []string{
+ "Error",
+ "EOF",
+ "Comment",
+ "Key",
+ "String",
+ "Integer",
+ "True",
+ "False",
+ "Float",
+ "Inf",
+ "NaN",
+ "=",
+ "[",
+ "]",
+ "{",
+ "}",
+ "(",
+ ")",
+ "]]",
+ "[[",
+ "LocalDate",
+ "LocalTime",
+ "TimeOffset",
+ "KeyGroup",
+ "KeyGroupArray",
+ ",",
+ ":",
+ "$",
+ "*",
+ "?",
+ ".",
+ "..",
+ "EOL",
+}
+
+type token struct {
+ Position
+ typ tokenType
+ val string
+}
+
+func (tt tokenType) String() string {
+ idx := int(tt)
+ if idx < len(tokenTypeNames) {
+ return tokenTypeNames[idx]
+ }
+ return "Unknown"
+}
+
+func (t token) String() string {
+ switch t.typ {
+ case tokenEOF:
+ return "EOF"
+ case tokenError:
+ return t.val
+ }
+
+ return fmt.Sprintf("%q", t.val)
+}
+
+func isSpace(r rune) bool {
+ return r == ' ' || r == '\t'
+}
+
+func isAlphanumeric(r rune) bool {
+ return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_'
+}
+
+func isKeyChar(r rune) bool {
+ // Keys start with the first character that isn't whitespace or [ and end
+ // with the last non-whitespace character before the equals sign. Keys
+ // cannot contain a # character."
+ return !(r == '\r' || r == '\n' || r == eof || r == '=')
+}
+
+func isKeyStartChar(r rune) bool {
+ return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[')
+}
+
+func isDigit(r rune) bool {
+ return '0' <= r && r <= '9'
+}
+
+func isHexDigit(r rune) bool {
+ return isDigit(r) ||
+ (r >= 'a' && r <= 'f') ||
+ (r >= 'A' && r <= 'F')
+}
diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go
new file mode 100644
index 00000000000..6d82587c488
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/toml.go
@@ -0,0 +1,533 @@
+package toml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "strings"
+)
+
+type tomlValue struct {
+ value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list
+ comment string
+ commented bool
+ multiline bool
+ literal bool
+ position Position
+}
+
+// Tree is the result of the parsing of a TOML file.
+type Tree struct {
+ values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree
+ comment string
+ commented bool
+ inline bool
+ position Position
+}
+
+func newTree() *Tree {
+ return newTreeWithPosition(Position{})
+}
+
+func newTreeWithPosition(pos Position) *Tree {
+ return &Tree{
+ values: make(map[string]interface{}),
+ position: pos,
+ }
+}
+
+// TreeFromMap initializes a new Tree object using the given map.
+func TreeFromMap(m map[string]interface{}) (*Tree, error) {
+ result, err := toTree(m)
+ if err != nil {
+ return nil, err
+ }
+ return result.(*Tree), nil
+}
+
+// Position returns the position of the tree.
+func (t *Tree) Position() Position {
+ return t.position
+}
+
+// Has returns a boolean indicating if the given key exists.
+func (t *Tree) Has(key string) bool {
+ if key == "" {
+ return false
+ }
+ return t.HasPath(strings.Split(key, "."))
+}
+
+// HasPath returns true if the given path of keys exists, false otherwise.
+func (t *Tree) HasPath(keys []string) bool {
+ return t.GetPath(keys) != nil
+}
+
+// Keys returns the keys of the toplevel tree (does not recurse).
+func (t *Tree) Keys() []string {
+ keys := make([]string, len(t.values))
+ i := 0
+ for k := range t.values {
+ keys[i] = k
+ i++
+ }
+ return keys
+}
+
+// Get the value at key in the Tree.
+// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings.
+// If you need to retrieve non-bare keys, use GetPath.
+// Returns nil if the path does not exist in the tree.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) Get(key string) interface{} {
+ if key == "" {
+ return t
+ }
+ return t.GetPath(strings.Split(key, "."))
+}
+
+// GetPath returns the element in the tree indicated by 'keys'.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) GetPath(keys []string) interface{} {
+ if len(keys) == 0 {
+ return t
+ }
+ subtree := t
+ for _, intermediateKey := range keys[:len(keys)-1] {
+ value, exists := subtree.values[intermediateKey]
+ if !exists {
+ return nil
+ }
+ switch node := value.(type) {
+ case *Tree:
+ subtree = node
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return nil
+ }
+ subtree = node[len(node)-1]
+ default:
+ return nil // cannot navigate through other node types
+ }
+ }
+ // branch based on final node type
+ switch node := subtree.values[keys[len(keys)-1]].(type) {
+ case *tomlValue:
+ return node.value
+ default:
+ return node
+ }
+}
+
+// GetArray returns the value at key in the Tree.
+// It returns []string, []int64, etc type if key has homogeneous lists
+// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings.
+// Returns nil if the path does not exist in the tree.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) GetArray(key string) interface{} {
+ if key == "" {
+ return t
+ }
+ return t.GetArrayPath(strings.Split(key, "."))
+}
+
+// GetArrayPath returns the element in the tree indicated by 'keys'.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) GetArrayPath(keys []string) interface{} {
+ if len(keys) == 0 {
+ return t
+ }
+ subtree := t
+ for _, intermediateKey := range keys[:len(keys)-1] {
+ value, exists := subtree.values[intermediateKey]
+ if !exists {
+ return nil
+ }
+ switch node := value.(type) {
+ case *Tree:
+ subtree = node
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return nil
+ }
+ subtree = node[len(node)-1]
+ default:
+ return nil // cannot navigate through other node types
+ }
+ }
+ // branch based on final node type
+ switch node := subtree.values[keys[len(keys)-1]].(type) {
+ case *tomlValue:
+ switch n := node.value.(type) {
+ case []interface{}:
+ return getArray(n)
+ default:
+ return node.value
+ }
+ default:
+ return node
+ }
+}
+
+// if homogeneous array, then return slice type object over []interface{}
+func getArray(n []interface{}) interface{} {
+ var s []string
+ var i64 []int64
+ var f64 []float64
+ var bl []bool
+ for _, value := range n {
+ switch v := value.(type) {
+ case string:
+ s = append(s, v)
+ case int64:
+ i64 = append(i64, v)
+ case float64:
+ f64 = append(f64, v)
+ case bool:
+ bl = append(bl, v)
+ default:
+ return n
+ }
+ }
+ if len(s) == len(n) {
+ return s
+ } else if len(i64) == len(n) {
+ return i64
+ } else if len(f64) == len(n) {
+ return f64
+ } else if len(bl) == len(n) {
+ return bl
+ }
+ return n
+}
+
+// GetPosition returns the position of the given key.
+func (t *Tree) GetPosition(key string) Position {
+ if key == "" {
+ return t.position
+ }
+ return t.GetPositionPath(strings.Split(key, "."))
+}
+
+// SetPositionPath sets the position of element in the tree indicated by 'keys'.
+// If keys is of length zero, the current tree position is set.
+func (t *Tree) SetPositionPath(keys []string, pos Position) {
+ if len(keys) == 0 {
+ t.position = pos
+ return
+ }
+ subtree := t
+ for _, intermediateKey := range keys[:len(keys)-1] {
+ value, exists := subtree.values[intermediateKey]
+ if !exists {
+ return
+ }
+ switch node := value.(type) {
+ case *Tree:
+ subtree = node
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return
+ }
+ subtree = node[len(node)-1]
+ default:
+ return
+ }
+ }
+ // branch based on final node type
+ switch node := subtree.values[keys[len(keys)-1]].(type) {
+ case *tomlValue:
+ node.position = pos
+ return
+ case *Tree:
+ node.position = pos
+ return
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return
+ }
+ node[len(node)-1].position = pos
+ return
+ }
+}
+
+// GetPositionPath returns the element in the tree indicated by 'keys'.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) GetPositionPath(keys []string) Position {
+ if len(keys) == 0 {
+ return t.position
+ }
+ subtree := t
+ for _, intermediateKey := range keys[:len(keys)-1] {
+ value, exists := subtree.values[intermediateKey]
+ if !exists {
+ return Position{0, 0}
+ }
+ switch node := value.(type) {
+ case *Tree:
+ subtree = node
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return Position{0, 0}
+ }
+ subtree = node[len(node)-1]
+ default:
+ return Position{0, 0}
+ }
+ }
+ // branch based on final node type
+ switch node := subtree.values[keys[len(keys)-1]].(type) {
+ case *tomlValue:
+ return node.position
+ case *Tree:
+ return node.position
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return Position{0, 0}
+ }
+ return node[len(node)-1].position
+ default:
+ return Position{0, 0}
+ }
+}
+
+// GetDefault works like Get but with a default value
+func (t *Tree) GetDefault(key string, def interface{}) interface{} {
+ val := t.Get(key)
+ if val == nil {
+ return def
+ }
+ return val
+}
+
+// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour.
+// The default values within the struct are valid default options.
+type SetOptions struct {
+ Comment string
+ Commented bool
+ Multiline bool
+ Literal bool
+}
+
+// SetWithOptions is the same as Set, but allows you to provide formatting
+// instructions to the key, that will be used by Marshal().
+func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) {
+ t.SetPathWithOptions(strings.Split(key, "."), opts, value)
+}
+
+// SetPathWithOptions is the same as SetPath, but allows you to provide
+// formatting instructions to the key, that will be reused by Marshal().
+func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) {
+ subtree := t
+ for i, intermediateKey := range keys[:len(keys)-1] {
+ nextTree, exists := subtree.values[intermediateKey]
+ if !exists {
+ nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})
+ subtree.values[intermediateKey] = nextTree // add new element here
+ }
+ switch node := nextTree.(type) {
+ case *Tree:
+ subtree = node
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ // create element if it does not exist
+ node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}))
+ subtree.values[intermediateKey] = node
+ }
+ subtree = node[len(node)-1]
+ }
+ }
+
+ var toInsert interface{}
+
+ switch v := value.(type) {
+ case *Tree:
+ v.comment = opts.Comment
+ v.commented = opts.Commented
+ toInsert = value
+ case []*Tree:
+ for i := range v {
+ v[i].commented = opts.Commented
+ }
+ toInsert = value
+ case *tomlValue:
+ v.comment = opts.Comment
+ v.commented = opts.Commented
+ v.multiline = opts.Multiline
+ v.literal = opts.Literal
+ toInsert = v
+ default:
+ toInsert = &tomlValue{value: value,
+ comment: opts.Comment,
+ commented: opts.Commented,
+ multiline: opts.Multiline,
+ literal: opts.Literal,
+ position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}}
+ }
+
+ subtree.values[keys[len(keys)-1]] = toInsert
+}
+
+// Set an element in the tree.
+// Key is a dot-separated path (e.g. a.b.c).
+// Creates all necessary intermediate trees, if needed.
+func (t *Tree) Set(key string, value interface{}) {
+ t.SetWithComment(key, "", false, value)
+}
+
+// SetWithComment is the same as Set, but allows you to provide comment
+// information to the key, that will be reused by Marshal().
+func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) {
+ t.SetPathWithComment(strings.Split(key, "."), comment, commented, value)
+}
+
+// SetPath sets an element in the tree.
+// Keys is an array of path elements (e.g. {"a","b","c"}).
+// Creates all necessary intermediate trees, if needed.
+func (t *Tree) SetPath(keys []string, value interface{}) {
+ t.SetPathWithComment(keys, "", false, value)
+}
+
+// SetPathWithComment is the same as SetPath, but allows you to provide comment
+// information to the key, that will be reused by Marshal().
+func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) {
+ t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value)
+}
+
+// Delete removes a key from the tree.
+// Key is a dot-separated path (e.g. a.b.c).
+func (t *Tree) Delete(key string) error {
+ keys, err := parseKey(key)
+ if err != nil {
+ return err
+ }
+ return t.DeletePath(keys)
+}
+
+// DeletePath removes a key from the tree.
+// Keys is an array of path elements (e.g. {"a","b","c"}).
+func (t *Tree) DeletePath(keys []string) error {
+ keyLen := len(keys)
+ if keyLen == 1 {
+ delete(t.values, keys[0])
+ return nil
+ }
+ tree := t.GetPath(keys[:keyLen-1])
+ item := keys[keyLen-1]
+ switch node := tree.(type) {
+ case *Tree:
+ delete(node.values, item)
+ return nil
+ }
+ return errors.New("no such key to delete")
+}
+
+// createSubTree takes a tree and a key and create the necessary intermediate
+// subtrees to create a subtree at that point. In-place.
+//
+// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b]
+// and tree[a][b][c]
+//
+// Returns nil on success, error object on failure
+func (t *Tree) createSubTree(keys []string, pos Position) error {
+ subtree := t
+ for i, intermediateKey := range keys {
+ nextTree, exists := subtree.values[intermediateKey]
+ if !exists {
+ tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})
+ tree.position = pos
+ tree.inline = subtree.inline
+ subtree.values[intermediateKey] = tree
+ nextTree = tree
+ }
+
+ switch node := nextTree.(type) {
+ case []*Tree:
+ subtree = node[len(node)-1]
+ case *Tree:
+ subtree = node
+ default:
+ return fmt.Errorf("unknown type for path %s (%s): %T (%#v)",
+ strings.Join(keys, "."), intermediateKey, nextTree, nextTree)
+ }
+ }
+ return nil
+}
+
+// LoadBytes creates a Tree from a []byte.
+func LoadBytes(b []byte) (tree *Tree, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = errors.New(r.(string))
+ }
+ }()
+
+ if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) {
+ b = b[4:]
+ } else if len(b) >= 3 && hasUTF8BOM3(b) {
+ b = b[3:]
+ } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) {
+ b = b[2:]
+ }
+
+ tree = parseToml(lexToml(b))
+ return
+}
+
+func hasUTF16BigEndianBOM2(b []byte) bool {
+ return b[0] == 0xFE && b[1] == 0xFF
+}
+
+func hasUTF16LittleEndianBOM2(b []byte) bool {
+ return b[0] == 0xFF && b[1] == 0xFE
+}
+
+func hasUTF8BOM3(b []byte) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+func hasUTF32BigEndianBOM4(b []byte) bool {
+ return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF
+}
+
+func hasUTF32LittleEndianBOM4(b []byte) bool {
+ return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00
+}
+
+// LoadReader creates a Tree from any io.Reader.
+func LoadReader(reader io.Reader) (tree *Tree, err error) {
+ inputBytes, err := ioutil.ReadAll(reader)
+ if err != nil {
+ return
+ }
+ tree, err = LoadBytes(inputBytes)
+ return
+}
+
+// Load creates a Tree from a string.
+func Load(content string) (tree *Tree, err error) {
+ return LoadBytes([]byte(content))
+}
+
+// LoadFile creates a Tree from a file.
+func LoadFile(path string) (tree *Tree, err error) {
+ file, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ return LoadReader(file)
+}
diff --git a/vendor/github.com/pelletier/go-toml/tomlpub.go b/vendor/github.com/pelletier/go-toml/tomlpub.go
new file mode 100644
index 00000000000..4136b462544
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomlpub.go
@@ -0,0 +1,71 @@
+package toml
+
+// PubTOMLValue wrapping tomlValue in order to access all properties from outside.
+type PubTOMLValue = tomlValue
+
+func (ptv *PubTOMLValue) Value() interface{} {
+ return ptv.value
+}
+func (ptv *PubTOMLValue) Comment() string {
+ return ptv.comment
+}
+func (ptv *PubTOMLValue) Commented() bool {
+ return ptv.commented
+}
+func (ptv *PubTOMLValue) Multiline() bool {
+ return ptv.multiline
+}
+func (ptv *PubTOMLValue) Position() Position {
+ return ptv.position
+}
+
+func (ptv *PubTOMLValue) SetValue(v interface{}) {
+ ptv.value = v
+}
+func (ptv *PubTOMLValue) SetComment(s string) {
+ ptv.comment = s
+}
+func (ptv *PubTOMLValue) SetCommented(c bool) {
+ ptv.commented = c
+}
+func (ptv *PubTOMLValue) SetMultiline(m bool) {
+ ptv.multiline = m
+}
+func (ptv *PubTOMLValue) SetPosition(p Position) {
+ ptv.position = p
+}
+
+// PubTree wrapping Tree in order to access all properties from outside.
+type PubTree = Tree
+
+func (pt *PubTree) Values() map[string]interface{} {
+ return pt.values
+}
+
+func (pt *PubTree) Comment() string {
+ return pt.comment
+}
+
+func (pt *PubTree) Commented() bool {
+ return pt.commented
+}
+
+func (pt *PubTree) Inline() bool {
+ return pt.inline
+}
+
+func (pt *PubTree) SetValues(v map[string]interface{}) {
+ pt.values = v
+}
+
+func (pt *PubTree) SetComment(c string) {
+ pt.comment = c
+}
+
+func (pt *PubTree) SetCommented(c bool) {
+ pt.commented = c
+}
+
+func (pt *PubTree) SetInline(i bool) {
+ pt.inline = i
+}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go
new file mode 100644
index 00000000000..80353500a0a
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go
@@ -0,0 +1,155 @@
+package toml
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+)
+
+var kindToType = [reflect.String + 1]reflect.Type{
+ reflect.Bool: reflect.TypeOf(true),
+ reflect.String: reflect.TypeOf(""),
+ reflect.Float32: reflect.TypeOf(float64(1)),
+ reflect.Float64: reflect.TypeOf(float64(1)),
+ reflect.Int: reflect.TypeOf(int64(1)),
+ reflect.Int8: reflect.TypeOf(int64(1)),
+ reflect.Int16: reflect.TypeOf(int64(1)),
+ reflect.Int32: reflect.TypeOf(int64(1)),
+ reflect.Int64: reflect.TypeOf(int64(1)),
+ reflect.Uint: reflect.TypeOf(uint64(1)),
+ reflect.Uint8: reflect.TypeOf(uint64(1)),
+ reflect.Uint16: reflect.TypeOf(uint64(1)),
+ reflect.Uint32: reflect.TypeOf(uint64(1)),
+ reflect.Uint64: reflect.TypeOf(uint64(1)),
+}
+
+// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found.
+// supported values:
+// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32
+func typeFor(k reflect.Kind) reflect.Type {
+ if k > 0 && int(k) < len(kindToType) {
+ return kindToType[k]
+ }
+ return nil
+}
+
+func simpleValueCoercion(object interface{}) (interface{}, error) {
+ switch original := object.(type) {
+ case string, bool, int64, uint64, float64, time.Time:
+ return original, nil
+ case int:
+ return int64(original), nil
+ case int8:
+ return int64(original), nil
+ case int16:
+ return int64(original), nil
+ case int32:
+ return int64(original), nil
+ case uint:
+ return uint64(original), nil
+ case uint8:
+ return uint64(original), nil
+ case uint16:
+ return uint64(original), nil
+ case uint32:
+ return uint64(original), nil
+ case float32:
+ return float64(original), nil
+ case fmt.Stringer:
+ return original.String(), nil
+ case []interface{}:
+ value := reflect.ValueOf(original)
+ length := value.Len()
+ arrayValue := reflect.MakeSlice(value.Type(), 0, length)
+ for i := 0; i < length; i++ {
+ val := value.Index(i).Interface()
+ simpleValue, err := simpleValueCoercion(val)
+ if err != nil {
+ return nil, err
+ }
+ arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue))
+ }
+ return arrayValue.Interface(), nil
+ default:
+ return nil, fmt.Errorf("cannot convert type %T to Tree", object)
+ }
+}
+
+func sliceToTree(object interface{}) (interface{}, error) {
+ // arrays are a bit tricky, since they can represent either a
+ // collection of simple values, which is represented by one
+ // *tomlValue, or an array of tables, which is represented by an
+ // array of *Tree.
+
+ // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice
+ value := reflect.ValueOf(object)
+ insideType := value.Type().Elem()
+ length := value.Len()
+ if length > 0 {
+ insideType = reflect.ValueOf(value.Index(0).Interface()).Type()
+ }
+ if insideType.Kind() == reflect.Map {
+ // this is considered as an array of tables
+ tablesArray := make([]*Tree, 0, length)
+ for i := 0; i < length; i++ {
+ table := value.Index(i)
+ tree, err := toTree(table.Interface())
+ if err != nil {
+ return nil, err
+ }
+ tablesArray = append(tablesArray, tree.(*Tree))
+ }
+ return tablesArray, nil
+ }
+
+ sliceType := typeFor(insideType.Kind())
+ if sliceType == nil {
+ sliceType = insideType
+ }
+
+ arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length)
+
+ for i := 0; i < length; i++ {
+ val := value.Index(i).Interface()
+ simpleValue, err := simpleValueCoercion(val)
+ if err != nil {
+ return nil, err
+ }
+ arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue))
+ }
+ return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil
+}
+
+func toTree(object interface{}) (interface{}, error) {
+ value := reflect.ValueOf(object)
+
+ if value.Kind() == reflect.Map {
+ values := map[string]interface{}{}
+ keys := value.MapKeys()
+ for _, key := range keys {
+ if key.Kind() != reflect.String {
+ if _, ok := key.Interface().(string); !ok {
+ return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind())
+ }
+ }
+
+ v := value.MapIndex(key)
+ newValue, err := toTree(v.Interface())
+ if err != nil {
+ return nil, err
+ }
+ values[key.String()] = newValue
+ }
+ return &Tree{values: values, position: Position{}}, nil
+ }
+
+ if value.Kind() == reflect.Array || value.Kind() == reflect.Slice {
+ return sliceToTree(object)
+ }
+
+ simpleValue, err := simpleValueCoercion(object)
+ if err != nil {
+ return nil, err
+ }
+ return &tomlValue{value: simpleValue, position: Position{}}, nil
+}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go
new file mode 100644
index 00000000000..c9afbdab76e
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go
@@ -0,0 +1,552 @@
+package toml
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "math/big"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type valueComplexity int
+
+const (
+ valueSimple valueComplexity = iota + 1
+ valueComplex
+)
+
+type sortNode struct {
+ key string
+ complexity valueComplexity
+}
+
+// Encodes a string to a TOML-compliant multi-line string value
+// This function is a clone of the existing encodeTomlString function, except that whitespace characters
+// are preserved. Quotation marks and backslashes are also not escaped.
+func encodeMultilineTomlString(value string, commented string) string {
+ var b bytes.Buffer
+ adjacentQuoteCount := 0
+
+ b.WriteString(commented)
+ for i, rr := range value {
+ if rr != '"' {
+ adjacentQuoteCount = 0
+ } else {
+ adjacentQuoteCount++
+ }
+ switch rr {
+ case '\b':
+ b.WriteString(`\b`)
+ case '\t':
+ b.WriteString("\t")
+ case '\n':
+ b.WriteString("\n" + commented)
+ case '\f':
+ b.WriteString(`\f`)
+ case '\r':
+ b.WriteString("\r")
+ case '"':
+ if adjacentQuoteCount >= 3 || i == len(value)-1 {
+ adjacentQuoteCount = 0
+ b.WriteString(`\"`)
+ } else {
+ b.WriteString(`"`)
+ }
+ case '\\':
+ b.WriteString(`\`)
+ default:
+ intRr := uint16(rr)
+ if intRr < 0x001F {
+ b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
+ } else {
+ b.WriteRune(rr)
+ }
+ }
+ }
+ return b.String()
+}
+
+// Encodes a string to a TOML-compliant string value
+func encodeTomlString(value string) string {
+ var b bytes.Buffer
+
+ for _, rr := range value {
+ switch rr {
+ case '\b':
+ b.WriteString(`\b`)
+ case '\t':
+ b.WriteString(`\t`)
+ case '\n':
+ b.WriteString(`\n`)
+ case '\f':
+ b.WriteString(`\f`)
+ case '\r':
+ b.WriteString(`\r`)
+ case '"':
+ b.WriteString(`\"`)
+ case '\\':
+ b.WriteString(`\\`)
+ default:
+ intRr := uint16(rr)
+ if intRr < 0x001F {
+ b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
+ } else {
+ b.WriteRune(rr)
+ }
+ }
+ }
+ return b.String()
+}
+
+func tomlTreeStringRepresentation(t *Tree, ord MarshalOrder) (string, error) {
+ var orderedVals []sortNode
+ switch ord {
+ case OrderPreserve:
+ orderedVals = sortByLines(t)
+ default:
+ orderedVals = sortAlphabetical(t)
+ }
+
+ var values []string
+ for _, node := range orderedVals {
+ k := node.key
+ v := t.values[k]
+
+ repr, err := tomlValueStringRepresentation(v, "", "", ord, false)
+ if err != nil {
+ return "", err
+ }
+ values = append(values, quoteKeyIfNeeded(k)+" = "+repr)
+ }
+ return "{ " + strings.Join(values, ", ") + " }", nil
+}
+
+func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) {
+ // this interface check is added to dereference the change made in the writeTo function.
+ // That change was made to allow this function to see formatting options.
+ tv, ok := v.(*tomlValue)
+ if ok {
+ v = tv.value
+ } else {
+ tv = &tomlValue{}
+ }
+
+ switch value := v.(type) {
+ case uint64:
+ return strconv.FormatUint(value, 10), nil
+ case int64:
+ return strconv.FormatInt(value, 10), nil
+ case float64:
+ // Default bit length is full 64
+ bits := 64
+ // Float panics if nan is used
+ if !math.IsNaN(value) {
+ // if 32 bit accuracy is enough to exactly show, use 32
+ _, acc := big.NewFloat(value).Float32()
+ if acc == big.Exact {
+ bits = 32
+ }
+ }
+ if math.Trunc(value) == value {
+ return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil
+ }
+ return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil
+ case string:
+ if tv.multiline {
+ if tv.literal {
+ b := strings.Builder{}
+ b.WriteString("'''\n")
+ b.Write([]byte(value))
+ b.WriteString("\n'''")
+ return b.String(), nil
+ } else {
+ return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil
+ }
+ }
+ return "\"" + encodeTomlString(value) + "\"", nil
+ case []byte:
+ b, _ := v.([]byte)
+ return string(b), nil
+ case bool:
+ if value {
+ return "true", nil
+ }
+ return "false", nil
+ case time.Time:
+ return value.Format(time.RFC3339), nil
+ case LocalDate:
+ return value.String(), nil
+ case LocalDateTime:
+ return value.String(), nil
+ case LocalTime:
+ return value.String(), nil
+ case *Tree:
+ return tomlTreeStringRepresentation(value, ord)
+ case nil:
+ return "", nil
+ }
+
+ rv := reflect.ValueOf(v)
+
+ if rv.Kind() == reflect.Slice {
+ var values []string
+ for i := 0; i < rv.Len(); i++ {
+ item := rv.Index(i).Interface()
+ itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine)
+ if err != nil {
+ return "", err
+ }
+ values = append(values, itemRepr)
+ }
+ if arraysOneElementPerLine && len(values) > 1 {
+ stringBuffer := bytes.Buffer{}
+ valueIndent := indent + ` ` // TODO: move that to a shared encoder state
+
+ stringBuffer.WriteString("[\n")
+
+ for _, value := range values {
+ stringBuffer.WriteString(valueIndent)
+ stringBuffer.WriteString(commented + value)
+ stringBuffer.WriteString(`,`)
+ stringBuffer.WriteString("\n")
+ }
+
+ stringBuffer.WriteString(indent + commented + "]")
+
+ return stringBuffer.String(), nil
+ }
+ return "[" + strings.Join(values, ", ") + "]", nil
+ }
+ return "", fmt.Errorf("unsupported value type %T: %v", v, v)
+}
+
+func getTreeArrayLine(trees []*Tree) (line int) {
+ // Prevent returning 0 for empty trees
+ line = int(^uint(0) >> 1)
+ // get lowest line number >= 0
+ for _, tv := range trees {
+ if tv.position.Line < line || line == 0 {
+ line = tv.position.Line
+ }
+ }
+ return
+}
+
+func sortByLines(t *Tree) (vals []sortNode) {
+ var (
+ line int
+ lines []int
+ tv *Tree
+ tom *tomlValue
+ node sortNode
+ )
+ vals = make([]sortNode, 0)
+ m := make(map[int]sortNode)
+
+ for k := range t.values {
+ v := t.values[k]
+ switch v.(type) {
+ case *Tree:
+ tv = v.(*Tree)
+ line = tv.position.Line
+ node = sortNode{key: k, complexity: valueComplex}
+ case []*Tree:
+ line = getTreeArrayLine(v.([]*Tree))
+ node = sortNode{key: k, complexity: valueComplex}
+ default:
+ tom = v.(*tomlValue)
+ line = tom.position.Line
+ node = sortNode{key: k, complexity: valueSimple}
+ }
+ lines = append(lines, line)
+ vals = append(vals, node)
+ m[line] = node
+ }
+ sort.Ints(lines)
+
+ for i, line := range lines {
+ vals[i] = m[line]
+ }
+
+ return vals
+}
+
+func sortAlphabetical(t *Tree) (vals []sortNode) {
+ var (
+ node sortNode
+ simpVals []string
+ compVals []string
+ )
+ vals = make([]sortNode, 0)
+ m := make(map[string]sortNode)
+
+ for k := range t.values {
+ v := t.values[k]
+ switch v.(type) {
+ case *Tree, []*Tree:
+ node = sortNode{key: k, complexity: valueComplex}
+ compVals = append(compVals, node.key)
+ default:
+ node = sortNode{key: k, complexity: valueSimple}
+ simpVals = append(simpVals, node.key)
+ }
+ vals = append(vals, node)
+ m[node.key] = node
+ }
+
+ // Simples first to match previous implementation
+ sort.Strings(simpVals)
+ i := 0
+ for _, key := range simpVals {
+ vals[i] = m[key]
+ i++
+ }
+
+ sort.Strings(compVals)
+ for _, key := range compVals {
+ vals[i] = m[key]
+ i++
+ }
+
+ return vals
+}
+
+func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) {
+ return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false, false)
+}
+
+func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord MarshalOrder, indentString string, compactComments, parentCommented bool) (int64, error) {
+ var orderedVals []sortNode
+
+ switch ord {
+ case OrderPreserve:
+ orderedVals = sortByLines(t)
+ default:
+ orderedVals = sortAlphabetical(t)
+ }
+
+ for _, node := range orderedVals {
+ switch node.complexity {
+ case valueComplex:
+ k := node.key
+ v := t.values[k]
+
+ combinedKey := quoteKeyIfNeeded(k)
+ if keyspace != "" {
+ combinedKey = keyspace + "." + combinedKey
+ }
+
+ switch node := v.(type) {
+ // node has to be of those two types given how keys are sorted above
+ case *Tree:
+ tv, ok := t.values[k].(*Tree)
+ if !ok {
+ return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
+ }
+ if tv.comment != "" {
+ comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1)
+ start := "# "
+ if strings.HasPrefix(comment, "#") {
+ start = ""
+ }
+ writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment)
+ bytesCount += int64(writtenBytesCountComment)
+ if errc != nil {
+ return bytesCount, errc
+ }
+ }
+
+ var commented string
+ if parentCommented || t.commented || tv.commented {
+ commented = "# "
+ }
+ writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n")
+ bytesCount += int64(writtenBytesCount)
+ if err != nil {
+ return bytesCount, err
+ }
+ bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || tv.commented)
+ if err != nil {
+ return bytesCount, err
+ }
+ case []*Tree:
+ for _, subTree := range node {
+ var commented string
+ if parentCommented || t.commented || subTree.commented {
+ commented = "# "
+ }
+ writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n")
+ bytesCount += int64(writtenBytesCount)
+ if err != nil {
+ return bytesCount, err
+ }
+
+ bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || subTree.commented)
+ if err != nil {
+ return bytesCount, err
+ }
+ }
+ }
+ default: // Simple
+ k := node.key
+ v, ok := t.values[k].(*tomlValue)
+ if !ok {
+ return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
+ }
+
+ var commented string
+ if parentCommented || t.commented || v.commented {
+ commented = "# "
+ }
+ repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine)
+ if err != nil {
+ return bytesCount, err
+ }
+
+ if v.comment != "" {
+ comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1)
+ start := "# "
+ if strings.HasPrefix(comment, "#") {
+ start = ""
+ }
+ if !compactComments {
+ writtenBytesCountComment, errc := writeStrings(w, "\n")
+ bytesCount += int64(writtenBytesCountComment)
+ if errc != nil {
+ return bytesCount, errc
+ }
+ }
+ writtenBytesCountComment, errc := writeStrings(w, indent, start, comment, "\n")
+ bytesCount += int64(writtenBytesCountComment)
+ if errc != nil {
+ return bytesCount, errc
+ }
+ }
+
+ quotedKey := quoteKeyIfNeeded(k)
+ writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n")
+ bytesCount += int64(writtenBytesCount)
+ if err != nil {
+ return bytesCount, err
+ }
+ }
+ }
+
+ return bytesCount, nil
+}
+
+// quote a key if it does not fit the bare key format (A-Za-z0-9_-)
+// quoted keys use the same rules as strings
+func quoteKeyIfNeeded(k string) string {
+ // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain
+ // keys that have already been quoted.
+ // not an ideal situation, but good enough of a stop gap.
+ if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' {
+ return k
+ }
+ isBare := true
+ for _, r := range k {
+ if !isValidBareChar(r) {
+ isBare = false
+ break
+ }
+ }
+ if isBare {
+ return k
+ }
+ return quoteKey(k)
+}
+
+func quoteKey(k string) string {
+ return "\"" + encodeTomlString(k) + "\""
+}
+
+func writeStrings(w io.Writer, s ...string) (int, error) {
+ var n int
+ for i := range s {
+ b, err := io.WriteString(w, s[i])
+ n += b
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+}
+
+// WriteTo encode the Tree as Toml and writes it to the writer w.
+// Returns the number of bytes written in case of success, or an error if anything happened.
+func (t *Tree) WriteTo(w io.Writer) (int64, error) {
+ return t.writeTo(w, "", "", 0, false)
+}
+
+// ToTomlString generates a human-readable representation of the current tree.
+// Output spans multiple lines, and is suitable for ingest by a TOML parser.
+// If the conversion cannot be performed, ToString returns a non-nil error.
+func (t *Tree) ToTomlString() (string, error) {
+ b, err := t.Marshal()
+ if err != nil {
+ return "", err
+ }
+ return string(b), nil
+}
+
+// String generates a human-readable representation of the current tree.
+// Alias of ToString. Present to implement the fmt.Stringer interface.
+func (t *Tree) String() string {
+ result, _ := t.ToTomlString()
+ return result
+}
+
+// ToMap recursively generates a representation of the tree using Go built-in structures.
+// The following types are used:
+//
+// * bool
+// * float64
+// * int64
+// * string
+// * uint64
+// * time.Time
+// * map[string]interface{} (where interface{} is any of this list)
+// * []interface{} (where interface{} is any of this list)
+func (t *Tree) ToMap() map[string]interface{} {
+ result := map[string]interface{}{}
+
+ for k, v := range t.values {
+ switch node := v.(type) {
+ case []*Tree:
+ var array []interface{}
+ for _, item := range node {
+ array = append(array, item.ToMap())
+ }
+ result[k] = array
+ case *Tree:
+ result[k] = node.ToMap()
+ case *tomlValue:
+ result[k] = tomlValueToGo(node.value)
+ }
+ }
+ return result
+}
+
+func tomlValueToGo(v interface{}) interface{} {
+ if tree, ok := v.(*Tree); ok {
+ return tree.ToMap()
+ }
+
+ rv := reflect.ValueOf(v)
+
+ if rv.Kind() != reflect.Slice {
+ return v
+ }
+ values := make([]interface{}, rv.Len())
+ for i := 0; i < rv.Len(); i++ {
+ item := rv.Index(i).Interface()
+ values[i] = tomlValueToGo(item)
+ }
+ return values
+}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_writepub.go b/vendor/github.com/pelletier/go-toml/tomltree_writepub.go
new file mode 100644
index 00000000000..fa326308cfe
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomltree_writepub.go
@@ -0,0 +1,6 @@
+package toml
+
+// ValueStringRepresentation transforms an interface{} value into its toml string representation.
+func ValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) {
+ return tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine)
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index aca9b1cab7f..0e7057b0251 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -55,6 +55,7 @@ github.com/containerd/containerd/images
github.com/containerd/containerd/labels
github.com/containerd/containerd/log
github.com/containerd/containerd/pkg/seed
+github.com/containerd/containerd/pkg/userns
github.com/containerd/containerd/platforms
github.com/containerd/containerd/reference
github.com/containerd/containerd/remotes
@@ -143,12 +144,15 @@ github.com/docker/docker/api/types/versions
github.com/docker/docker/api/types/volume
github.com/docker/docker/client
github.com/docker/docker/errdefs
+github.com/docker/docker/pkg/archive
github.com/docker/docker/pkg/fileutils
github.com/docker/docker/pkg/homedir
+github.com/docker/docker/pkg/idtools
github.com/docker/docker/pkg/ioutils
github.com/docker/docker/pkg/jsonmessage
github.com/docker/docker/pkg/longpath
github.com/docker/docker/pkg/namesgenerator
+github.com/docker/docker/pkg/pools
github.com/docker/docker/pkg/stdcopy
github.com/docker/docker/pkg/stringid
github.com/docker/docker/pkg/system
@@ -354,6 +358,11 @@ github.com/opencontainers/go-digest
## explicit
github.com/opencontainers/image-spec/specs-go
github.com/opencontainers/image-spec/specs-go/v1
+# github.com/opencontainers/runc v1.0.1
+github.com/opencontainers/runc/libcontainer/user
+# github.com/pelletier/go-toml v1.9.4
+## explicit
+github.com/pelletier/go-toml
# github.com/pkg/errors v0.9.1
## explicit
github.com/pkg/errors