From 21487c5d56309933597fe5dfa3d3b34f7c70cbad Mon Sep 17 00:00:00 2001 From: Guillaume Lours <705411+glours@users.noreply.github.com> Date: Thu, 8 Feb 2024 15:34:22 +0100 Subject: [PATCH] use compose-go to load and parse compose files Signed-off-by: Guillaume Lours <705411+glours@users.noreply.github.com> --- cli/command/stack/config.go | 16 +- cli/command/stack/config_test.go | 27 +- cli/command/stack/deploy.go | 4 +- cli/command/stack/loader/loader.go | 29 +- cli/command/stack/swarm/deploy.go | 6 +- cli/command/stack/swarm/deploy_composefile.go | 18 +- cli/compose/convert/compose.go | 10 +- cli/compose/convert/compose_test.go | 14 +- cli/compose/convert/service.go | 76 +- cli/compose/convert/service_test.go | 46 +- cli/compose/convert/volume.go | 12 +- cli/compose/convert/volume_test.go | 39 +- cli/compose/interpolation/interpolation.go | 164 - .../interpolation/interpolation_test.go | 148 - cli/compose/loader/full-struct_test.go | 189 +- cli/compose/loader/interpolate.go | 75 - cli/compose/loader/loader.go | 966 +----- cli/compose/loader/loader_test.go | 1785 +--------- cli/compose/loader/merge.go | 304 -- cli/compose/loader/merge_test.go | 1299 ------- .../loader/testdata/full-example.json.golden | 138 +- .../loader/testdata/full-example.yaml.golden | 59 +- cli/compose/loader/types_test.go | 20 +- cli/compose/loader/volume.go | 2 +- cli/compose/loader/volume_test.go | 2 +- cli/compose/template/template.go | 248 -- cli/compose/template/template_test.go | 286 -- cli/compose/types/types.go | 561 +-- e2e/stack/testdata/full-stack.yml | 1 - vendor.mod | 23 +- vendor.sum | 45 +- vendor/dario.cat/mergo/.deepsource.toml | 12 - vendor/dario.cat/mergo/.gitignore | 33 - vendor/dario.cat/mergo/.travis.yml | 12 - vendor/dario.cat/mergo/CODE_OF_CONDUCT.md | 46 - vendor/dario.cat/mergo/CONTRIBUTING.md | 112 - vendor/dario.cat/mergo/README.md | 248 -- vendor/dario.cat/mergo/SECURITY.md | 14 - vendor/dario.cat/mergo/doc.go | 148 - vendor/dario.cat/mergo/map.go | 178 - vendor/dario.cat/mergo/merge.go | 409 --- vendor/dario.cat/mergo/mergo.go | 81 - .../compose-spec/compose-go/v2/LICENSE | 191 ++ .../compose-spec/compose-go/v2/NOTICE | 2 + .../compose-go/v2/consts/consts.go | 29 + .../compose-spec/compose-go/v2/dotenv/LICENSE | 22 + .../compose-spec/compose-go/v2/dotenv/env.go | 76 + .../compose-go/v2/dotenv/godotenv.go | 175 + .../compose-go/v2/dotenv/parser.go | 282 ++ .../compose-go/v2/errdefs/errors.go | 53 + .../compose-go/v2/format/volume.go | 184 + .../compose-spec/compose-go/v2/graph/graph.go | 111 + .../compose-go/v2/graph/services.go | 80 + .../compose-go/v2/graph/traversal.go | 211 ++ .../v2/interpolation/interpolation.go | 137 + .../compose-go/v2/loader/example1.env | 10 + .../compose-go/v2/loader/example2.env | 4 + .../compose-go/v2/loader/extends.go | 214 ++ .../compose-spec/compose-go/v2/loader/fix.go | 36 + .../compose-go/v2/loader/full-example.yml | 455 +++ .../compose-go/v2/loader/include.go | 173 + .../compose-go/v2/loader/interpolate.go | 117 + .../compose-go/v2/loader/loader.go | 787 +++++ .../compose-go/v2/loader/mapstructure.go | 79 + .../compose-go/v2/loader/normalize.go | 283 ++ .../compose-go/v2/loader/paths.go | 74 + .../compose-go/v2/loader/reset.go | 126 + .../compose-go/v2/loader/validate.go | 146 + .../compose-go/v2/override/extends.go | 27 + .../compose-go/v2/override/merge.go | 253 ++ .../compose-go/v2/override/uncity.go | 204 ++ .../compose-go/v2/paths/context.go | 44 + .../compose-go/v2/paths/extends.go | 25 + .../compose-spec/compose-go/v2/paths/home.go | 37 + .../compose-go/v2/paths/resolve.go | 161 + .../compose-spec/compose-go/v2/paths/unix.go | 40 + .../compose-go/v2/paths/windows_path.go | 82 + .../compose-go/v2/schema/compose-spec.json | 907 +++++ .../compose-go/v2/schema/schema.go | 164 + .../compose-go/v2/template/template.go | 473 +++ .../compose-go/v2/transform/build.go | 48 + .../compose-go/v2/transform/canonical.go | 107 + .../compose-go/v2/transform/defaults.go | 87 + .../compose-go/v2/transform/dependson.go | 53 + .../compose-go/v2/transform/envfile.go | 55 + .../compose-go/v2/transform/extends.go | 36 + .../compose-go/v2/transform/external.go | 54 + .../compose-go/v2/transform/include.go | 36 + .../compose-go/v2/transform/mapping.go | 43 + .../compose-go/v2/transform/ports.go | 86 + .../compose-go/v2/transform/secrets.go | 49 + .../compose-go/v2/transform/services.go | 41 + .../compose-go/v2/transform/ssh.go | 51 + .../compose-go/v2/transform/ulimits.go | 34 + .../compose-go/v2/transform/volume.go | 49 + .../compose-spec/compose-go/v2/tree/path.go | 87 + .../compose-spec/compose-go/v2/types/bytes.go | 48 + .../compose-go/v2/types/command.go | 86 + .../compose-go/v2/types/config.go | 135 + .../compose-go/v2/types/develop.go | 38 + .../compose-go/v2/types/device.go | 52 + .../compose-go/v2/types/duration.go | 60 + .../compose-go/v2/types/envfile.go | 46 + .../compose-go/v2/types/healthcheck.go | 53 + .../compose-go/v2/types/hostList.go | 133 + .../compose-go/v2/types/labels.go | 77 + .../compose-go/v2/types/mapping.go | 217 ++ .../compose-go/v2/types/options.go | 42 + .../compose-go/v2/types/project.go | 664 ++++ .../compose-go/v2/types/services.go | 45 + .../compose-spec/compose-go/v2/types/ssh.go | 73 + .../compose-go/v2/types/stringOrList.go | 57 + .../compose-spec/compose-go/v2/types/types.go | 756 ++++ .../compose-go/v2/utils/collectionutils.go | 68 + .../compose-spec/compose-go/v2/utils/set.go | 95 + .../compose-go/v2/utils/stringutils.go | 48 + .../compose-go/v2/validation/external.go | 49 + .../compose-go/v2/validation/validation.go | 96 + .../compose-go/v2/validation/volume.go | 39 + .../mattn/go-shellwords/.travis.yml | 16 + vendor/github.com/mattn/go-shellwords/LICENSE | 21 + .../github.com/mattn/go-shellwords/README.md | 55 + .../github.com/mattn/go-shellwords/go.test.sh | 12 + .../mattn/go-shellwords/shellwords.go | 317 ++ .../mattn/go-shellwords/util_posix.go | 29 + .../mattn/go-shellwords/util_windows.go | 29 + .../mitchellh/copystructure/LICENSE | 21 + .../mitchellh/copystructure/README.md | 21 + .../mitchellh/copystructure/copier_time.go | 15 + .../mitchellh/copystructure/copystructure.go | 631 ++++ .../mitchellh/reflectwalk/.travis.yml | 1 + .../github.com/mitchellh/reflectwalk/LICENSE | 21 + .../mitchellh/reflectwalk/README.md | 6 + .../mitchellh/reflectwalk/location.go | 19 + .../mitchellh/reflectwalk/location_string.go | 16 + .../mitchellh/reflectwalk/reflectwalk.go | 420 +++ .../mergo => golang.org/x/exp}/LICENSE | 3 +- vendor/golang.org/x/exp/PATENTS | 22 + .../x/exp/constraints/constraints.go | 50 + vendor/golang.org/x/exp/maps/maps.go | 94 + vendor/golang.org/x/exp/slices/cmp.go | 44 + vendor/golang.org/x/exp/slices/slices.go | 499 +++ vendor/golang.org/x/exp/slices/sort.go | 195 ++ .../golang.org/x/exp/slices/zsortanyfunc.go | 479 +++ .../golang.org/x/exp/slices/zsortordered.go | 481 +++ vendor/golang.org/x/tools/go/packages/doc.go | 34 +- .../x/tools/go/packages/packages.go | 3 +- .../x/tools/go/types/objectpath/objectpath.go | 14 +- .../x/tools/internal/event/keys/util.go | 21 + .../x/tools/internal/gcimporter/iexport.go | 31 +- .../x/tools/internal/gcimporter/iimport.go | 39 +- .../x/tools/internal/typeparams/common.go | 24 +- .../x/tools/internal/typeparams/coretype.go | 8 +- .../internal/typeparams/enabled_go117.go | 12 - .../internal/typeparams/enabled_go118.go | 15 - .../x/tools/internal/typeparams/normalize.go | 20 +- .../internal/typeparams/typeparams_go117.go | 197 -- .../internal/typeparams/typeparams_go118.go | 151 - .../{versions_go121.go => versions.go} | 9 +- .../tools/internal/versions/versions_go122.go | 38 - vendor/gopkg.in/yaml.v3/LICENSE | 50 + vendor/gopkg.in/yaml.v3/NOTICE | 13 + vendor/gopkg.in/yaml.v3/README.md | 150 + vendor/gopkg.in/yaml.v3/apic.go | 747 ++++ vendor/gopkg.in/yaml.v3/decode.go | 1000 ++++++ vendor/gopkg.in/yaml.v3/emitterc.go | 2020 +++++++++++ vendor/gopkg.in/yaml.v3/encode.go | 577 ++++ vendor/gopkg.in/yaml.v3/parserc.go | 1258 +++++++ vendor/gopkg.in/yaml.v3/readerc.go | 434 +++ vendor/gopkg.in/yaml.v3/resolve.go | 326 ++ vendor/gopkg.in/yaml.v3/scannerc.go | 3038 +++++++++++++++++ vendor/gopkg.in/yaml.v3/sorter.go | 134 + vendor/gopkg.in/yaml.v3/writerc.go | 48 + vendor/gopkg.in/yaml.v3/yaml.go | 698 ++++ vendor/gopkg.in/yaml.v3/yamlh.go | 807 +++++ vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 ++ vendor/modules.txt | 46 +- 177 files changed, 26092 insertions(+), 7857 deletions(-) delete mode 100644 cli/compose/interpolation/interpolation.go delete mode 100644 cli/compose/interpolation/interpolation_test.go delete mode 100644 cli/compose/loader/interpolate.go delete mode 100644 cli/compose/loader/merge.go delete mode 100644 cli/compose/loader/merge_test.go delete mode 100644 cli/compose/template/template.go delete mode 100644 cli/compose/template/template_test.go delete mode 100644 vendor/dario.cat/mergo/.deepsource.toml delete mode 100644 vendor/dario.cat/mergo/.gitignore delete mode 100644 vendor/dario.cat/mergo/.travis.yml delete mode 100644 vendor/dario.cat/mergo/CODE_OF_CONDUCT.md delete mode 100644 vendor/dario.cat/mergo/CONTRIBUTING.md delete mode 100644 vendor/dario.cat/mergo/README.md delete mode 100644 vendor/dario.cat/mergo/SECURITY.md delete mode 100644 vendor/dario.cat/mergo/doc.go delete mode 100644 vendor/dario.cat/mergo/map.go delete mode 100644 vendor/dario.cat/mergo/merge.go delete mode 100644 vendor/dario.cat/mergo/mergo.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/LICENSE create mode 100644 vendor/github.com/compose-spec/compose-go/v2/NOTICE create mode 100644 vendor/github.com/compose-spec/compose-go/v2/consts/consts.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/dotenv/LICENSE create mode 100644 vendor/github.com/compose-spec/compose-go/v2/dotenv/env.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/dotenv/godotenv.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/dotenv/parser.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/errdefs/errors.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/format/volume.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/graph/graph.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/graph/services.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/graph/traversal.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/interpolation/interpolation.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/example1.env create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/example2.env create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/extends.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/fix.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/full-example.yml create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/include.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/interpolate.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/loader.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/mapstructure.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/normalize.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/paths.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/reset.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/validate.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/override/extends.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/override/merge.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/override/uncity.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/paths/context.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/paths/extends.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/paths/home.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/paths/unix.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/paths/windows_path.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json create mode 100644 vendor/github.com/compose-spec/compose-go/v2/schema/schema.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/template/template.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/build.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/canonical.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/dependson.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/envfile.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/extends.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/external.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/include.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/mapping.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/ports.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/secrets.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/services.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/ssh.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/ulimits.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/volume.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/tree/path.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/bytes.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/command.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/config.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/develop.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/device.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/duration.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/envfile.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/healthcheck.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/hostList.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/labels.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/mapping.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/options.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/project.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/services.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/ssh.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/stringOrList.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/types.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/utils/collectionutils.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/utils/set.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/utils/stringutils.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/validation/external.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/validation/validation.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/validation/volume.go create mode 100644 vendor/github.com/mattn/go-shellwords/.travis.yml create mode 100644 vendor/github.com/mattn/go-shellwords/LICENSE create mode 100644 vendor/github.com/mattn/go-shellwords/README.md create mode 100644 vendor/github.com/mattn/go-shellwords/go.test.sh create mode 100644 vendor/github.com/mattn/go-shellwords/shellwords.go create mode 100644 vendor/github.com/mattn/go-shellwords/util_posix.go create mode 100644 vendor/github.com/mattn/go-shellwords/util_windows.go create mode 100644 vendor/github.com/mitchellh/copystructure/LICENSE create mode 100644 vendor/github.com/mitchellh/copystructure/README.md create mode 100644 vendor/github.com/mitchellh/copystructure/copier_time.go create mode 100644 vendor/github.com/mitchellh/copystructure/copystructure.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/.travis.yml create mode 100644 vendor/github.com/mitchellh/reflectwalk/LICENSE create mode 100644 vendor/github.com/mitchellh/reflectwalk/README.md create mode 100644 vendor/github.com/mitchellh/reflectwalk/location.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/location_string.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/reflectwalk.go rename vendor/{dario.cat/mergo => golang.org/x/exp}/LICENSE (92%) create mode 100644 vendor/golang.org/x/exp/PATENTS create mode 100644 vendor/golang.org/x/exp/constraints/constraints.go create mode 100644 vendor/golang.org/x/exp/maps/maps.go create mode 100644 vendor/golang.org/x/exp/slices/cmp.go create mode 100644 vendor/golang.org/x/exp/slices/slices.go create mode 100644 vendor/golang.org/x/exp/slices/sort.go create mode 100644 vendor/golang.org/x/exp/slices/zsortanyfunc.go create mode 100644 vendor/golang.org/x/exp/slices/zsortordered.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/util.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go rename vendor/golang.org/x/tools/internal/versions/{versions_go121.go => versions.go} (80%) delete mode 100644 vendor/golang.org/x/tools/internal/versions/versions_go122.go create mode 100644 vendor/gopkg.in/yaml.v3/LICENSE create mode 100644 vendor/gopkg.in/yaml.v3/NOTICE create mode 100644 vendor/gopkg.in/yaml.v3/README.md create mode 100644 vendor/gopkg.in/yaml.v3/apic.go create mode 100644 vendor/gopkg.in/yaml.v3/decode.go create mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v3/encode.go create mode 100644 vendor/gopkg.in/yaml.v3/parserc.go create mode 100644 vendor/gopkg.in/yaml.v3/readerc.go create mode 100644 vendor/gopkg.in/yaml.v3/resolve.go create mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v3/sorter.go create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go create mode 100644 vendor/gopkg.in/yaml.v3/yaml.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go diff --git a/cli/command/stack/config.go b/cli/command/stack/config.go index b63a719fc84b..d23e389f8ed2 100644 --- a/cli/command/stack/config.go +++ b/cli/command/stack/config.go @@ -2,14 +2,16 @@ package stack import ( "fmt" + "path/filepath" + specLoader "github.com/compose-spec/compose-go/v2/loader" + compose "github.com/compose-spec/compose-go/v2/types" "github.com/docker/cli/cli" "github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command/completion" "github.com/docker/cli/cli/command/stack/loader" "github.com/docker/cli/cli/command/stack/options" composeLoader "github.com/docker/cli/cli/compose/loader" - composetypes "github.com/docker/cli/cli/compose/types" "github.com/spf13/cobra" yaml "gopkg.in/yaml.v2" ) @@ -45,9 +47,17 @@ func newConfigCommand(dockerCli command.Cli) *cobra.Command { } // outputConfig returns the merged and interpolated config file -func outputConfig(configFiles composetypes.ConfigDetails, skipInterpolation bool) (string, error) { - optsFunc := func(opts *composeLoader.Options) { +func outputConfig(configFiles compose.ConfigDetails, skipInterpolation bool) (string, error) { + var dir string + for _, f := range configFiles.ConfigFiles { + if f.Filename != "" { + dir = filepath.Base(f.Filename) + break + } + } + optsFunc := func(opts *specLoader.Options) { opts.SkipInterpolation = skipInterpolation + opts.SetProjectName(specLoader.NormalizeProjectName(dir), true) } config, err := composeLoader.Load(configFiles, optsFunc) if err != nil { diff --git a/cli/command/stack/config_test.go b/cli/command/stack/config_test.go index b4f50160fd22..d1634cc4f1dd 100644 --- a/cli/command/stack/config_test.go +++ b/cli/command/stack/config_test.go @@ -4,8 +4,8 @@ import ( "io" "testing" - "github.com/docker/cli/cli/compose/loader" - composetypes "github.com/docker/cli/cli/compose/types" + composetypes "github.com/compose-spec/compose-go/v2/types" + "github.com/docker/cli/internal/test" "gotest.tools/v3/assert" ) @@ -40,13 +40,18 @@ services: image: busybox:${VERSION} command: cat file2.txt `, - expected: `version: "3.7" + expected: `name: firstconfig services: foo: command: - cat - file2.txt image: busybox:1.0 + networks: + default: null +networks: + default: + name: firstconfig_default `, }, { @@ -64,28 +69,28 @@ services: image: busybox:${VERSION} command: cat file2.txt `, - expected: `version: "3.7" + expected: `name: firstconfig services: foo: command: - cat - file2.txt image: busybox:${VERSION} + networks: + default: null +networks: + default: + name: firstconfig_default `, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - firstConfigData, err := loader.ParseYAML([]byte(tc.fileOne)) - assert.Check(t, err) - secondConfigData, err := loader.ParseYAML([]byte(tc.fileTwo)) - assert.Check(t, err) - actual, err := outputConfig(composetypes.ConfigDetails{ ConfigFiles: []composetypes.ConfigFile{ - {Config: firstConfigData, Filename: "firstConfig"}, - {Config: secondConfigData, Filename: "secondConfig"}, + {Content: []byte(tc.fileOne), Filename: "firstConfig"}, + {Content: []byte(tc.fileTwo), Filename: "secondConfig"}, }, Environment: map[string]string{ "VERSION": "1.0", diff --git a/cli/command/stack/deploy.go b/cli/command/stack/deploy.go index 8238e4b46417..f4c6ddd5d673 100644 --- a/cli/command/stack/deploy.go +++ b/cli/command/stack/deploy.go @@ -22,11 +22,11 @@ func newDeployCommand(dockerCli command.Cli) *cobra.Command { if err := validateStackName(opts.Namespace); err != nil { return err } - config, err := loader.LoadComposefile(dockerCli, opts) + project, err := loader.LoadComposefile(dockerCli, opts) if err != nil { return err } - return swarm.RunDeploy(cmd.Context(), dockerCli, opts, config) + return swarm.RunDeploy(cmd.Context(), dockerCli, opts, project) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return completeNames(dockerCli)(cmd, args, toComplete) diff --git a/cli/command/stack/loader/loader.go b/cli/command/stack/loader/loader.go index 782275451dcf..e3adc43bd6b7 100644 --- a/cli/command/stack/loader/loader.go +++ b/cli/command/stack/loader/loader.go @@ -12,23 +12,26 @@ import ( "sort" "strings" + specloader "github.com/compose-spec/compose-go/v2/loader" + composetypes "github.com/compose-spec/compose-go/v2/types" "github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command/stack/options" "github.com/docker/cli/cli/compose/loader" "github.com/docker/cli/cli/compose/schema" - composetypes "github.com/docker/cli/cli/compose/types" "github.com/pkg/errors" ) // LoadComposefile parse the composefile specified in the cli and returns its Config and version. -func LoadComposefile(dockerCli command.Cli, opts options.Deploy) (*composetypes.Config, error) { +func LoadComposefile(dockerCli command.Cli, opts options.Deploy) (*composetypes.Project, error) { configDetails, err := GetConfigDetails(opts.Composefiles, dockerCli.In()) if err != nil { return nil, err } - dicts := getDictsFrom(configDetails.ConfigFiles) - config, err := loader.Load(configDetails) + projectNameFunc := func(o *specloader.Options) { + o.SetProjectName(opts.Namespace, true) + } + project, err := loader.Load(configDetails, projectNameFunc) if err != nil { if fpe, ok := err.(*loader.ForbiddenPropertiesError); ok { // this error is intentionally formatted multi-line @@ -38,28 +41,18 @@ func LoadComposefile(dockerCli command.Cli, opts options.Deploy) (*composetypes. return nil, err } - unsupportedProperties := loader.GetUnsupportedProperties(dicts...) + unsupportedProperties := loader.GetUnsupportedProperties(project.Services) if len(unsupportedProperties) > 0 { fmt.Fprintf(dockerCli.Err(), "Ignoring unsupported options: %s\n\n", strings.Join(unsupportedProperties, ", ")) } - deprecatedProperties := loader.GetDeprecatedProperties(dicts...) + deprecatedProperties := loader.GetDeprecatedProperties(project.Services) if len(deprecatedProperties) > 0 { fmt.Fprintf(dockerCli.Err(), "Ignoring deprecated options:\n\n%s\n\n", propertyWarnings(deprecatedProperties)) } - return config, nil -} - -func getDictsFrom(configFiles []composetypes.ConfigFile) []map[string]any { - dicts := []map[string]any{} - - for _, configFile := range configFiles { - dicts = append(dicts, configFile.Config) - } - - return dicts + return project, nil } func propertyWarnings(properties map[string]string) string { @@ -157,7 +150,7 @@ func loadConfigFile(filename string, stdin io.Reader) (*composetypes.ConfigFile, return nil, err } - config, err := loader.ParseYAML(bytes) + config, err := specloader.ParseYAML(bytes) if err != nil { return nil, err } diff --git a/cli/command/stack/swarm/deploy.go b/cli/command/stack/swarm/deploy.go index c6159dc21a3d..4e4a8ec4f506 100644 --- a/cli/command/stack/swarm/deploy.go +++ b/cli/command/stack/swarm/deploy.go @@ -4,10 +4,10 @@ import ( "context" "fmt" + composetypes "github.com/compose-spec/compose-go/v2/types" "github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command/stack/options" "github.com/docker/cli/cli/compose/convert" - composetypes "github.com/docker/cli/cli/compose/types" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" "github.com/pkg/errors" @@ -22,7 +22,7 @@ const ( ) // RunDeploy is the swarm implementation of docker stack deploy -func RunDeploy(ctx context.Context, dockerCli command.Cli, opts options.Deploy, cfg *composetypes.Config) error { +func RunDeploy(ctx context.Context, dockerCli command.Cli, opts options.Deploy, project *composetypes.Project) error { if err := validateResolveImageFlag(&opts); err != nil { return err } @@ -32,7 +32,7 @@ func RunDeploy(ctx context.Context, dockerCli command.Cli, opts options.Deploy, opts.ResolveImage = ResolveImageNever } - return deployCompose(ctx, dockerCli, opts, cfg) + return deployCompose(ctx, dockerCli, opts, project) } // validateResolveImageFlag validates the opts.resolveImage command line option diff --git a/cli/command/stack/swarm/deploy_composefile.go b/cli/command/stack/swarm/deploy_composefile.go index 73810aad8f1b..8015fb1580c8 100644 --- a/cli/command/stack/swarm/deploy_composefile.go +++ b/cli/command/stack/swarm/deploy_composefile.go @@ -4,10 +4,10 @@ import ( "context" "fmt" + composetypes "github.com/compose-spec/compose-go/v2/types" "github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command/stack/options" "github.com/docker/cli/cli/compose/convert" - composetypes "github.com/docker/cli/cli/compose/types" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/swarm" @@ -16,7 +16,7 @@ import ( "github.com/pkg/errors" ) -func deployCompose(ctx context.Context, dockerCli command.Cli, opts options.Deploy, config *composetypes.Config) error { +func deployCompose(ctx context.Context, dockerCli command.Cli, opts options.Deploy, project *composetypes.Project) error { if err := checkDaemonIsSwarmManager(ctx, dockerCli); err != nil { return err } @@ -25,14 +25,14 @@ func deployCompose(ctx context.Context, dockerCli command.Cli, opts options.Depl if opts.Prune { services := map[string]struct{}{} - for _, service := range config.Services { + for _, service := range project.Services { services[service.Name] = struct{}{} } pruneServices(ctx, dockerCli, namespace, services) } - serviceNetworks := getServicesDeclaredNetworks(config.Services) - networks, externalNetworks := convert.Networks(namespace, config.Networks, serviceNetworks) + serviceNetworks := getServicesDeclaredNetworks(project.Services) + networks, externalNetworks := convert.Networks(namespace, project.Networks, serviceNetworks) if err := validateExternalNetworks(ctx, dockerCli.Client(), externalNetworks); err != nil { return err } @@ -40,7 +40,7 @@ func deployCompose(ctx context.Context, dockerCli command.Cli, opts options.Depl return err } - secrets, err := convert.Secrets(namespace, config.Secrets) + secrets, err := convert.Secrets(namespace, project.Secrets) if err != nil { return err } @@ -48,7 +48,7 @@ func deployCompose(ctx context.Context, dockerCli command.Cli, opts options.Depl return err } - configs, err := convert.Configs(namespace, config.Configs) + configs, err := convert.Configs(namespace, project.Configs) if err != nil { return err } @@ -56,14 +56,14 @@ func deployCompose(ctx context.Context, dockerCli command.Cli, opts options.Depl return err } - services, err := convert.Services(ctx, namespace, config, dockerCli.Client()) + services, err := convert.Services(ctx, namespace, project, dockerCli.Client()) if err != nil { return err } return deployServices(ctx, dockerCli, services, namespace, opts.SendRegistryAuth, opts.ResolveImage) } -func getServicesDeclaredNetworks(serviceConfigs []composetypes.ServiceConfig) map[string]struct{} { +func getServicesDeclaredNetworks(serviceConfigs composetypes.Services) map[string]struct{} { serviceNetworks := map[string]struct{}{} for _, serviceConfig := range serviceConfigs { if len(serviceConfig.Networks) == 0 { diff --git a/cli/compose/convert/compose.go b/cli/compose/convert/compose.go index 98cd92989b8f..bc6189e94e78 100644 --- a/cli/compose/convert/compose.go +++ b/cli/compose/convert/compose.go @@ -4,7 +4,7 @@ import ( "os" "strings" - composetypes "github.com/docker/cli/cli/compose/types" + composetypes "github.com/compose-spec/compose-go/v2/types" "github.com/docker/docker/api/types" networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/swarm" @@ -52,7 +52,7 @@ func AddStackLabel(namespace Namespace, labels map[string]string) map[string]str type networkMap map[string]composetypes.NetworkConfig // Networks from the compose-file type to the engine API type -func Networks(namespace Namespace, networks networkMap, servicesNetworks map[string]struct{}) (map[string]types.NetworkCreate, []string) { +func Networks(namespace Namespace, networks composetypes.Networks, servicesNetworks map[string]struct{}) (map[string]types.NetworkCreate, []string) { if networks == nil { networks = make(map[string]composetypes.NetworkConfig) } @@ -61,7 +61,7 @@ func Networks(namespace Namespace, networks networkMap, servicesNetworks map[str result := make(map[string]types.NetworkCreate) for internalName := range servicesNetworks { network := networks[internalName] - if network.External.External { + if network.External { externalNetworks = append(externalNetworks, network.Name) continue } @@ -102,7 +102,7 @@ func Networks(namespace Namespace, networks networkMap, servicesNetworks map[str func Secrets(namespace Namespace, secrets map[string]composetypes.SecretConfig) ([]swarm.SecretSpec, error) { result := []swarm.SecretSpec{} for name, secret := range secrets { - if secret.External.External { + if secret.External { continue } @@ -137,7 +137,7 @@ func Secrets(namespace Namespace, secrets map[string]composetypes.SecretConfig) func Configs(namespace Namespace, configs map[string]composetypes.ConfigObjConfig) ([]swarm.ConfigSpec, error) { result := []swarm.ConfigSpec{} for name, config := range configs { - if config.External.External { + if config.External { continue } diff --git a/cli/compose/convert/compose_test.go b/cli/compose/convert/compose_test.go index 0dfac14e156b..b4afb38c9108 100644 --- a/cli/compose/convert/compose_test.go +++ b/cli/compose/convert/compose_test.go @@ -3,7 +3,7 @@ package convert import ( "testing" - composetypes "github.com/docker/cli/cli/compose/types" + composetypes "github.com/compose-spec/compose-go/v2/types" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/network" "gotest.tools/v3/assert" @@ -37,7 +37,7 @@ func TestNetworks(t *testing.T) { "attachablenet": {}, "named": {}, } - source := networkMap{ + source := composetypes.Networks{ "normal": composetypes.NetworkConfig{ Driver: "overlay", DriverOpts: map[string]string{ @@ -56,7 +56,7 @@ func TestNetworks(t *testing.T) { }, }, "outside": composetypes.NetworkConfig{ - External: composetypes.External{External: true}, + External: true, Name: "special", }, "attachablenet": composetypes.NetworkConfig{ @@ -121,9 +121,7 @@ func TestSecrets(t *testing.T) { Labels: map[string]string{"monster": "mash"}, }, "ext": { - External: composetypes.External{ - External: true, - }, + External: true, }, } @@ -152,9 +150,7 @@ func TestConfigs(t *testing.T) { Labels: map[string]string{"monster": "mash"}, }, "ext": { - External: composetypes.External{ - External: true, - }, + External: true, }, } diff --git a/cli/compose/convert/service.go b/cli/compose/convert/service.go index 708c5a7d202f..07308d3c1c08 100644 --- a/cli/compose/convert/service.go +++ b/cli/compose/convert/service.go @@ -4,11 +4,16 @@ import ( "context" "os" "sort" + "strconv" "strings" "time" + "golang.org/x/exp/slices" + + composetypes "github.com/compose-spec/compose-go/v2/types" servicecli "github.com/docker/cli/cli/command/service" - composetypes "github.com/docker/cli/cli/compose/types" + clitypes "github.com/docker/cli/cli/compose/types" + "github.com/docker/cli/opts" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/swarm" @@ -28,21 +33,21 @@ const ( func Services( ctx context.Context, namespace Namespace, - config *composetypes.Config, + project *composetypes.Project, apiClient client.CommonAPIClient, ) (map[string]swarm.ServiceSpec, error) { result := make(map[string]swarm.ServiceSpec) - for _, service := range config.Services { - secrets, err := convertServiceSecrets(ctx, apiClient, namespace, service.Secrets, config.Secrets) + for _, service := range project.Services { + secrets, err := convertServiceSecrets(ctx, apiClient, namespace, service.Secrets, project.Secrets) if err != nil { return nil, errors.Wrapf(err, "service %s", service.Name) } - configs, err := convertServiceConfigObjs(ctx, apiClient, namespace, service, config.Configs) + configs, err := convertServiceConfigObjs(ctx, apiClient, namespace, service, project.Configs) if err != nil { return nil, errors.Wrapf(err, "service %s", service.Name) } - serviceSpec, err := Service(apiClient.ClientVersion(), namespace, service, config.Networks, config.Volumes, secrets, configs) + serviceSpec, err := Service(apiClient.ClientVersion(), namespace, service, project.Networks, project.Volumes, secrets, configs) if err != nil { return nil, errors.Wrapf(err, "service %s", service.Name) } @@ -63,7 +68,13 @@ func Service( configs []*swarm.ConfigReference, ) (swarm.ServiceSpec, error) { name := namespace.Scope(service.Name) - endpoint := convertEndpointSpec(service.Deploy.EndpointMode, service.Ports) + if service.Deploy == nil { + service.Deploy = &composetypes.DeployConfig{Replicas: nil} + } + endpoint, err := convertEndpointSpec(service.Deploy.EndpointMode, service.Ports) + if err != nil { + return swarm.ServiceSpec{}, err + } mode, err := convertDeployMode(service.Deploy.Mode, service.Deploy.Replicas) if err != nil { @@ -135,7 +146,7 @@ func Service( Dir: service.WorkingDir, User: service.User, Mounts: mounts, - StopGracePeriod: composetypes.ConvertDurationPtr(service.StopGracePeriod), + StopGracePeriod: clitypes.ConvertDurationPtr(service.StopGracePeriod), StopSignal: service.StopSignal, TTY: service.Tty, OpenStdin: service.StdinOpen, @@ -326,7 +337,7 @@ func convertServiceConfigObjs( // if the credSpec uses a config, then we should grab the config name, and // create a config reference for it. A File or Registry-type CredentialSpec // does not need this operation. - if credSpec.Config != "" { + if credSpec != nil && credSpec.Config != "" { // look up the config in the configSpecs. obj, err := lookup(credSpec.Config) if err != nil { @@ -420,12 +431,12 @@ func uint32Ptr(value uint32) *uint32 { // "IP-address hostname(s)". The original order of mappings is preserved. func convertExtraHosts(extraHosts composetypes.HostsList) []string { hosts := make([]string, 0, len(extraHosts)) - for _, hostIP := range extraHosts { - if hostName, ipAddr, ok := strings.Cut(hostIP, ":"); ok { - // Convert to SwarmKit notation: IP-address hostname(s) - hosts = append(hosts, ipAddr+" "+hostName) + for hostName, hostIPs := range extraHosts { + for _, hostIP := range hostIPs { + hosts = append(hosts, hostIP+" "+hostName) } } + slices.Sort(hosts) return hosts } @@ -497,9 +508,9 @@ func convertRestartPolicy(restart string, source *composetypes.RestartPolicy) (* return &swarm.RestartPolicy{ Condition: swarm.RestartPolicyCondition(source.Condition), - Delay: composetypes.ConvertDurationPtr(source.Delay), + Delay: clitypes.ConvertDurationPtr(source.Delay), MaxAttempts: source.MaxAttempts, - Window: composetypes.ConvertDurationPtr(source.Window), + Window: clitypes.ConvertDurationPtr(source.Window), }, nil } @@ -570,13 +581,17 @@ func convertResources(source composetypes.Resources) (*swarm.ResourceRequirement return resources, nil } -func convertEndpointSpec(endpointMode string, source []composetypes.ServicePortConfig) *swarm.EndpointSpec { +func convertEndpointSpec(endpointMode string, source []composetypes.ServicePortConfig) (*swarm.EndpointSpec, error) { portConfigs := []swarm.PortConfig{} for _, port := range source { + publishedPort, err := strconv.ParseUint(port.Published, 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "invalid port configuration %s", port.Published) + } portConfig := swarm.PortConfig{ Protocol: swarm.PortConfigProtocol(port.Protocol), TargetPort: port.Target, - PublishedPort: port.Published, + PublishedPort: uint32(publishedPort), PublishMode: swarm.PortConfigPublishMode(port.Mode), } portConfigs = append(portConfigs, portConfig) @@ -589,7 +604,7 @@ func convertEndpointSpec(endpointMode string, source []composetypes.ServicePortC return &swarm.EndpointSpec{ Mode: swarm.ResolutionMode(strings.ToLower(endpointMode)), Ports: portConfigs, - } + }, nil } // convertEnvironment converts key/value mappings to a slice, and sorts @@ -609,9 +624,13 @@ func convertEnvironment(source map[string]*string) []string { return output } -func convertDeployMode(mode string, replicas *uint64) (swarm.ServiceMode, error) { +func convertDeployMode(mode string, replicas *int) (swarm.ServiceMode, error) { serviceMode := swarm.ServiceMode{} - + var uintReplicas *uint64 + if replicas != nil { + tmpReplicas := uint64(*replicas) + uintReplicas = &tmpReplicas + } switch mode { case "global-job": if replicas != nil { @@ -625,11 +644,11 @@ func convertDeployMode(mode string, replicas *uint64) (swarm.ServiceMode, error) serviceMode.Global = &swarm.GlobalService{} case "replicated-job": serviceMode.ReplicatedJob = &swarm.ReplicatedJob{ - MaxConcurrent: replicas, - TotalCompletions: replicas, + MaxConcurrent: uintReplicas, + TotalCompletions: uintReplicas, } case "replicated", "": - serviceMode.Replicated = &swarm.ReplicatedService{Replicas: replicas} + serviceMode.Replicated = &swarm.ReplicatedService{Replicas: uintReplicas} default: return serviceMode, errors.Errorf("Unknown mode: %s", mode) } @@ -646,9 +665,12 @@ func convertDNSConfig(dns []string, dnsSearch []string) *swarm.DNSConfig { return nil } -func convertCredentialSpec(namespace Namespace, spec composetypes.CredentialSpecConfig, refs []*swarm.ConfigReference) (*swarm.CredentialSpec, error) { +func convertCredentialSpec(namespace Namespace, spec *composetypes.CredentialSpecConfig, refs []*swarm.ConfigReference) (*swarm.CredentialSpec, error) { var o []string + if spec == nil { + return nil, nil + } // Config was added in API v1.40 if spec.Config != "" { o = append(o, `"Config"`) @@ -668,7 +690,11 @@ func convertCredentialSpec(namespace Namespace, spec composetypes.CredentialSpec case l > 2: return nil, errors.Errorf("invalid credential spec: cannot specify both %s, and %s", strings.Join(o[:l-1], ", "), o[l-1]) } - swarmCredSpec := swarm.CredentialSpec(spec) + swarmCredSpec := swarm.CredentialSpec{ + Config: spec.Config, + File: spec.File, + Registry: spec.Registry, + } // if we're using a swarm Config for the credential spec, over-write it // here with the config ID if swarmCredSpec.Config != "" { diff --git a/cli/compose/convert/service_test.go b/cli/compose/convert/service_test.go index 3cf2ee9e0330..7132e1a8354d 100644 --- a/cli/compose/convert/service_test.go +++ b/cli/compose/convert/service_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - composetypes "github.com/docker/cli/cli/compose/types" + composetypes "github.com/compose-spec/compose-go/v2/types" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/swarm" @@ -62,17 +62,18 @@ func TestConvertEnvironment(t *testing.T) { } func TestConvertExtraHosts(t *testing.T) { - source := composetypes.HostsList{ + source, err := composetypes.NewHostsList([]string{ "zulu:127.0.0.2", "alpha:127.0.0.1", "zulu:ff02::1", - } - assert.Check(t, is.DeepEqual([]string{"127.0.0.2 zulu", "127.0.0.1 alpha", "ff02::1 zulu"}, convertExtraHosts(source))) + }) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]string{"127.0.0.1 alpha", "127.0.0.2 zulu", "ff02::1 zulu"}, convertExtraHosts(source))) } func TestConvertResourcesFull(t *testing.T) { source := composetypes.Resources{ - Limits: &composetypes.ResourceLimit{ + Limits: &composetypes.Resource{ NanoCPUs: "0.003", MemoryBytes: composetypes.UnitBytes(300000000), }, @@ -99,7 +100,7 @@ func TestConvertResourcesFull(t *testing.T) { func TestConvertResourcesOnlyMemory(t *testing.T) { source := composetypes.Resources{ - Limits: &composetypes.ResourceLimit{ + Limits: &composetypes.Resource{ MemoryBytes: composetypes.UnitBytes(300000000), }, Reservations: &composetypes.Resource{ @@ -174,15 +175,16 @@ func TestConvertEndpointSpec(t *testing.T) { { Protocol: "udp", Target: 53, - Published: 1053, + Published: "1053", Mode: "host", }, { Target: 8080, - Published: 80, + Published: "80", }, } - endpoint := convertEndpointSpec("vip", source) + endpoint, err := convertEndpointSpec("vip", source) + assert.NilError(t, err) expected := swarm.EndpointSpec{ Mode: swarm.ResolutionMode(strings.ToLower("vip")), @@ -223,7 +225,7 @@ func TestConvertServiceNetworksOnlyDefault(t *testing.T) { func TestConvertServiceNetworks(t *testing.T) { networkConfigs := networkMap{ "front": composetypes.NetworkConfig{ - External: composetypes.External{External: true}, + External: true, Name: "fronttier", }, "back": composetypes.NetworkConfig{}, @@ -258,7 +260,7 @@ func TestConvertServiceNetworks(t *testing.T) { func TestConvertServiceNetworksCustomDefault(t *testing.T) { networkConfigs := networkMap{ "default": composetypes.NetworkConfig{ - External: composetypes.External{External: true}, + External: true, Name: "custom", }, } @@ -315,7 +317,7 @@ func TestConvertDNSConfigSearch(t *testing.T) { func TestConvertCredentialSpec(t *testing.T) { tests := []struct { name string - in composetypes.CredentialSpecConfig + in *composetypes.CredentialSpecConfig out *swarm.CredentialSpec configs []*swarm.ConfigReference expectedErr string @@ -325,27 +327,27 @@ func TestConvertCredentialSpec(t *testing.T) { }, { name: "config-and-file", - in: composetypes.CredentialSpecConfig{Config: "0bt9dmxjvjiqermk6xrop3ekq", File: "somefile.json"}, + in: &composetypes.CredentialSpecConfig{Config: "0bt9dmxjvjiqermk6xrop3ekq", File: "somefile.json"}, expectedErr: `invalid credential spec: cannot specify both "Config" and "File"`, }, { name: "config-and-registry", - in: composetypes.CredentialSpecConfig{Config: "0bt9dmxjvjiqermk6xrop3ekq", Registry: "testing"}, + in: &composetypes.CredentialSpecConfig{Config: "0bt9dmxjvjiqermk6xrop3ekq", Registry: "testing"}, expectedErr: `invalid credential spec: cannot specify both "Config" and "Registry"`, }, { name: "file-and-registry", - in: composetypes.CredentialSpecConfig{File: "somefile.json", Registry: "testing"}, + in: &composetypes.CredentialSpecConfig{File: "somefile.json", Registry: "testing"}, expectedErr: `invalid credential spec: cannot specify both "File" and "Registry"`, }, { name: "config-and-file-and-registry", - in: composetypes.CredentialSpecConfig{Config: "0bt9dmxjvjiqermk6xrop3ekq", File: "somefile.json", Registry: "testing"}, + in: &composetypes.CredentialSpecConfig{Config: "0bt9dmxjvjiqermk6xrop3ekq", File: "somefile.json", Registry: "testing"}, expectedErr: `invalid credential spec: cannot specify both "Config", "File", and "Registry"`, }, { name: "missing-config-reference", - in: composetypes.CredentialSpecConfig{Config: "missing"}, + in: &composetypes.CredentialSpecConfig{Config: "missing"}, expectedErr: "invalid credential spec: spec specifies config missing, but no such config can be found", configs: []*swarm.ConfigReference{ { @@ -356,7 +358,7 @@ func TestConvertCredentialSpec(t *testing.T) { }, { name: "namespaced-config", - in: composetypes.CredentialSpecConfig{Config: "name"}, + in: &composetypes.CredentialSpecConfig{Config: "name"}, configs: []*swarm.ConfigReference{ { ConfigName: "namespaced-config_name", @@ -367,7 +369,7 @@ func TestConvertCredentialSpec(t *testing.T) { }, { name: "config", - in: composetypes.CredentialSpecConfig{Config: "someName"}, + in: &composetypes.CredentialSpecConfig{Config: "someName"}, configs: []*swarm.ConfigReference{ { ConfigName: "someOtherName", @@ -381,12 +383,12 @@ func TestConvertCredentialSpec(t *testing.T) { }, { name: "file", - in: composetypes.CredentialSpecConfig{File: "somefile.json"}, + in: &composetypes.CredentialSpecConfig{File: "somefile.json"}, out: &swarm.CredentialSpec{File: "somefile.json"}, }, { name: "registry", - in: composetypes.CredentialSpecConfig{Registry: "testing"}, + in: &composetypes.CredentialSpecConfig{Registry: "testing"}, out: &swarm.CredentialSpec{Registry: "testing"}, }, } @@ -540,7 +542,7 @@ func TestConvertServiceConfigs(t *testing.T) { {Source: "foo_config"}, {Source: "bar_config"}, }, - CredentialSpec: composetypes.CredentialSpecConfig{ + CredentialSpec: &composetypes.CredentialSpecConfig{ Config: "baz_config", }, } diff --git a/cli/compose/convert/volume.go b/cli/compose/convert/volume.go index 387362203feb..561d7043cf92 100644 --- a/cli/compose/convert/volume.go +++ b/cli/compose/convert/volume.go @@ -3,7 +3,7 @@ package convert import ( "strings" - composetypes "github.com/docker/cli/cli/compose/types" + composetypes "github.com/compose-spec/compose-go/v2/types" "github.com/docker/docker/api/types/mount" "github.com/pkg/errors" ) @@ -46,7 +46,7 @@ func handleVolumeToMount( if volume.Bind != nil { return mount.Mount{}, errors.New("bind options are incompatible with type volume") } - if volume.Cluster != nil { + if volume.Extensions["x-cluster-spec"] != nil { return mount.Mount{}, errors.New("cluster options are incompatible with type volume") } // Anonymous volumes @@ -71,7 +71,7 @@ func handleVolumeToMount( } // External named volumes - if stackVolume.External.External { + if stackVolume.External { return result, nil } @@ -98,7 +98,7 @@ func handleBindToMount(volume composetypes.ServiceVolumeConfig) (mount.Mount, er if volume.Tmpfs != nil { return mount.Mount{}, errors.New("tmpfs options are incompatible with type bind") } - if volume.Cluster != nil { + if volume.Extensions["x-cluster-spec"] != nil { return mount.Mount{}, errors.New("cluster options are incompatible with type bind") } if volume.Bind != nil { @@ -121,12 +121,12 @@ func handleTmpfsToMount(volume composetypes.ServiceVolumeConfig) (mount.Mount, e if volume.Volume != nil { return mount.Mount{}, errors.New("volume options are incompatible with type tmpfs") } - if volume.Cluster != nil { + if _, ok := volume.Extensions["x-cluster-spec"]; ok { return mount.Mount{}, errors.New("cluster options are incompatible with type tmpfs") } if volume.Tmpfs != nil { result.TmpfsOptions = &mount.TmpfsOptions{ - SizeBytes: volume.Tmpfs.Size, + SizeBytes: int64(volume.Tmpfs.Size), } } return result, nil diff --git a/cli/compose/convert/volume_test.go b/cli/compose/convert/volume_test.go index 7f9da874fbc8..4d088e33ec6f 100644 --- a/cli/compose/convert/volume_test.go +++ b/cli/compose/convert/volume_test.go @@ -3,7 +3,8 @@ package convert import ( "testing" - composetypes "github.com/docker/cli/cli/compose/types" + composetypes "github.com/compose-spec/compose-go/v2/types" + clitypes "github.com/docker/cli/cli/compose/types" "github.com/docker/docker/api/types/mount" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" @@ -229,7 +230,7 @@ func TestConvertVolumeToMountNamedVolumeExternal(t *testing.T) { stackVolumes := volumes{ "outside": composetypes.VolumeConfig{ Name: "special", - External: composetypes.External{External: true}, + External: true, }, } namespace := NewNamespace("foo") @@ -253,7 +254,7 @@ func TestConvertVolumeToMountNamedVolumeExternalNoCopy(t *testing.T) { stackVolumes := volumes{ "outside": composetypes.VolumeConfig{ Name: "special", - External: composetypes.External{External: true}, + External: true, }, } namespace := NewNamespace("foo") @@ -364,14 +365,16 @@ func TestConvertVolumeMountClusterName(t *testing.T) { stackVolumes := volumes{ "my-csi": composetypes.VolumeConfig{ Driver: "mycsidriver", - Spec: &composetypes.ClusterVolumeSpec{ - Group: "mygroup", - AccessMode: &composetypes.AccessMode{ - Scope: "single", - Sharing: "none", - BlockVolume: &composetypes.BlockVolume{}, + Extensions: map[string]any{ + "x-cluster-spec": &clitypes.ClusterVolumeSpec{ + Group: "mygroup", + AccessMode: &clitypes.AccessMode{ + Scope: "single", + Sharing: "none", + BlockVolume: &clitypes.BlockVolume{}, + }, + Availability: "active", }, - Availability: "active", }, }, } @@ -398,14 +401,16 @@ func TestConvertVolumeMountClusterGroup(t *testing.T) { stackVolumes := volumes{ "my-csi": composetypes.VolumeConfig{ Driver: "mycsidriver", - Spec: &composetypes.ClusterVolumeSpec{ - Group: "mygroup", - AccessMode: &composetypes.AccessMode{ - Scope: "single", - Sharing: "none", - BlockVolume: &composetypes.BlockVolume{}, + Extensions: map[string]any{ + "x-cluster-spec": &clitypes.ClusterVolumeSpec{ + Group: "mygroup", + AccessMode: &clitypes.AccessMode{ + Scope: "single", + Sharing: "none", + BlockVolume: &clitypes.BlockVolume{}, + }, + Availability: "active", }, - Availability: "active", }, }, } diff --git a/cli/compose/interpolation/interpolation.go b/cli/compose/interpolation/interpolation.go deleted file mode 100644 index 584ade8ad600..000000000000 --- a/cli/compose/interpolation/interpolation.go +++ /dev/null @@ -1,164 +0,0 @@ -// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.19 - -package interpolation - -import ( - "os" - "strings" - - "github.com/docker/cli/cli/compose/template" - "github.com/pkg/errors" -) - -// Options supported by Interpolate -type Options struct { - // LookupValue from a key - LookupValue LookupValue - // TypeCastMapping maps key paths to functions to cast to a type - TypeCastMapping map[Path]Cast - // Substitution function to use - Substitute func(string, template.Mapping) (string, error) -} - -// LookupValue is a function which maps from variable names to values. -// Returns the value as a string and a bool indicating whether -// the value is present, to distinguish between an empty string -// and the absence of a value. -type LookupValue func(key string) (string, bool) - -// Cast a value to a new type, or return an error if the value can't be cast -type Cast func(value string) (any, error) - -// Interpolate replaces variables in a string with the values from a mapping -func Interpolate(config map[string]any, opts Options) (map[string]any, error) { - if opts.LookupValue == nil { - opts.LookupValue = os.LookupEnv - } - if opts.TypeCastMapping == nil { - opts.TypeCastMapping = make(map[Path]Cast) - } - if opts.Substitute == nil { - opts.Substitute = template.Substitute - } - - out := map[string]any{} - - for key, value := range config { - interpolatedValue, err := recursiveInterpolate(value, NewPath(key), opts) - if err != nil { - return out, err - } - out[key] = interpolatedValue - } - - return out, nil -} - -func recursiveInterpolate(value any, path Path, opts Options) (any, error) { - switch value := value.(type) { - case string: - newValue, err := opts.Substitute(value, template.Mapping(opts.LookupValue)) - if err != nil || newValue == value { - return value, newPathError(path, err) - } - caster, ok := opts.getCasterForPath(path) - if !ok { - return newValue, nil - } - casted, err := caster(newValue) - return casted, newPathError(path, errors.Wrap(err, "failed to cast to expected type")) - - case map[string]any: - out := map[string]any{} - for key, elem := range value { - interpolatedElem, err := recursiveInterpolate(elem, path.Next(key), opts) - if err != nil { - return nil, err - } - out[key] = interpolatedElem - } - return out, nil - - case []any: - out := make([]any, len(value)) - for i, elem := range value { - interpolatedElem, err := recursiveInterpolate(elem, path.Next(PathMatchList), opts) - if err != nil { - return nil, err - } - out[i] = interpolatedElem - } - return out, nil - - default: - return value, nil - } -} - -func newPathError(path Path, err error) error { - switch err := err.(type) { - case nil: - return nil - case *template.InvalidTemplateError: - return errors.Errorf( - "invalid interpolation format for %s: %#v; you may need to escape any $ with another $", - path, err.Template) - default: - return errors.Wrapf(err, "error while interpolating %s", path) - } -} - -const pathSeparator = "." - -// PathMatchAll is a token used as part of a Path to match any key at that level -// in the nested structure -const PathMatchAll = "*" - -// PathMatchList is a token used as part of a Path to match items in a list -const PathMatchList = "[]" - -// Path is a dotted path of keys to a value in a nested mapping structure. A * -// section in a path will match any key in the mapping structure. -type Path string - -// NewPath returns a new Path -func NewPath(items ...string) Path { - return Path(strings.Join(items, pathSeparator)) -} - -// Next returns a new path by append part to the current path -func (p Path) Next(part string) Path { - return Path(string(p) + pathSeparator + part) -} - -func (p Path) parts() []string { - return strings.Split(string(p), pathSeparator) -} - -func (p Path) matches(pattern Path) bool { - patternParts := pattern.parts() - parts := p.parts() - - if len(patternParts) != len(parts) { - return false - } - for index, part := range parts { - switch patternParts[index] { - case PathMatchAll, part: - continue - default: - return false - } - } - return true -} - -func (o Options) getCasterForPath(path Path) (Cast, bool) { - for pattern, caster := range o.TypeCastMapping { - if path.matches(pattern) { - return caster, true - } - } - return nil, false -} diff --git a/cli/compose/interpolation/interpolation_test.go b/cli/compose/interpolation/interpolation_test.go deleted file mode 100644 index f33654affd5d..000000000000 --- a/cli/compose/interpolation/interpolation_test.go +++ /dev/null @@ -1,148 +0,0 @@ -// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.19 - -package interpolation - -import ( - "strconv" - "testing" - - "gotest.tools/v3/assert" - is "gotest.tools/v3/assert/cmp" -) - -var defaults = map[string]string{ - "USER": "jenny", - "FOO": "bar", - "count": "5", -} - -func defaultMapping(name string) (string, bool) { - val, ok := defaults[name] - return val, ok -} - -func TestInterpolate(t *testing.T) { - services := map[string]any{ - "servicea": map[string]any{ - "image": "example:${USER}", - "volumes": []any{"$FOO:/target"}, - "logging": map[string]any{ - "driver": "${FOO}", - "options": map[string]any{ - "user": "$USER", - }, - }, - }, - } - expected := map[string]any{ - "servicea": map[string]any{ - "image": "example:jenny", - "volumes": []any{"bar:/target"}, - "logging": map[string]any{ - "driver": "bar", - "options": map[string]any{ - "user": "jenny", - }, - }, - }, - } - result, err := Interpolate(services, Options{LookupValue: defaultMapping}) - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, result)) -} - -func TestInvalidInterpolation(t *testing.T) { - services := map[string]any{ - "servicea": map[string]any{ - "image": "${", - }, - } - _, err := Interpolate(services, Options{LookupValue: defaultMapping}) - assert.Error(t, err, `invalid interpolation format for servicea.image: "${"; you may need to escape any $ with another $`) -} - -func TestInterpolateWithDefaults(t *testing.T) { - t.Setenv("FOO", "BARZ") - - config := map[string]any{ - "networks": map[string]any{ - "foo": "thing_${FOO}", - }, - } - expected := map[string]any{ - "networks": map[string]any{ - "foo": "thing_BARZ", - }, - } - result, err := Interpolate(config, Options{}) - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, result)) -} - -func TestInterpolateWithCast(t *testing.T) { - config := map[string]any{ - "foo": map[string]any{ - "replicas": "$count", - }, - } - toInt := func(value string) (any, error) { - return strconv.Atoi(value) - } - result, err := Interpolate(config, Options{ - LookupValue: defaultMapping, - TypeCastMapping: map[Path]Cast{NewPath(PathMatchAll, "replicas"): toInt}, - }) - assert.NilError(t, err) - expected := map[string]any{ - "foo": map[string]any{ - "replicas": 5, - }, - } - assert.Check(t, is.DeepEqual(expected, result)) -} - -func TestPathMatches(t *testing.T) { - testcases := []struct { - doc string - path Path - pattern Path - expected bool - }{ - { - doc: "pattern too short", - path: NewPath("one", "two", "three"), - pattern: NewPath("one", "two"), - }, - { - doc: "pattern too long", - path: NewPath("one", "two"), - pattern: NewPath("one", "two", "three"), - }, - { - doc: "pattern mismatch", - path: NewPath("one", "three", "two"), - pattern: NewPath("one", "two", "three"), - }, - { - doc: "pattern mismatch with match-all part", - path: NewPath("one", "three", "two"), - pattern: NewPath(PathMatchAll, "two", "three"), - }, - { - doc: "pattern match with match-all part", - path: NewPath("one", "two", "three"), - pattern: NewPath("one", "*", "three"), - expected: true, - }, - { - doc: "pattern match", - path: NewPath("one", "two", "three"), - pattern: NewPath("one", "two", "three"), - expected: true, - }, - } - for _, testcase := range testcases { - assert.Check(t, is.Equal(testcase.expected, testcase.path.matches(testcase.pattern))) - } -} diff --git a/cli/compose/loader/full-struct_test.go b/cli/compose/loader/full-struct_test.go index f93951ac298f..ff3d021724ad 100644 --- a/cli/compose/loader/full-struct_test.go +++ b/cli/compose/loader/full-struct_test.go @@ -4,20 +4,22 @@ package loader import ( + "path/filepath" "time" - "github.com/docker/cli/cli/compose/types" + "github.com/compose-spec/compose-go/v2/types" + clitypes "github.com/docker/cli/cli/compose/types" ) -func fullExampleConfig(workingDir, homeDir string) *types.Config { - return &types.Config{ - Version: "3.12", +func fullExampleConfig(workingDir, homeDir string) *types.Project { + return &types.Project{ + Name: "full-example", Services: services(workingDir, homeDir), Networks: networks(), Volumes: volumes(), Configs: configs(workingDir), Secrets: secrets(workingDir), - Extras: map[string]any{ + Extensions: map[string]any{ "x-foo": "bar", "x-bar": "baz", "x-nested": map[string]any{ @@ -28,12 +30,11 @@ func fullExampleConfig(workingDir, homeDir string) *types.Config { } } -func services(workingDir, homeDir string) []types.ServiceConfig { - return []types.ServiceConfig{ - { +func services(workingDir, homeDir string) types.Services { + return types.Services{ + "foo": { Name: "foo", - - Build: types.BuildConfig{ + Build: &types.BuildConfig{ Context: "./dir", Dockerfile: "Dockerfile", Args: map[string]*string{"foo": strPtr("bar")}, @@ -41,8 +42,8 @@ func services(workingDir, homeDir string) []types.ServiceConfig { Network: "foo", CacheFrom: []string{"foo", "bar"}, ExtraHosts: types.HostsList{ - "ipv4.example.com:127.0.0.1", - "ipv6.example.com:::1", + "ipv4.example.com": []string{"127.0.0.1"}, + "ipv6.example.com": []string{"::1"}, }, Labels: map[string]string{"FOO": "BAR"}, }, @@ -63,10 +64,13 @@ func services(workingDir, homeDir string) []types.ServiceConfig { }, }, ContainerName: "my-web-container", - DependsOn: []string{"db", "redis"}, - Deploy: types.DeployConfig{ + DependsOn: types.DependsOnConfig{ + "db": {Condition: types.ServiceConditionStarted, Required: true}, + "redis": {Condition: types.ServiceConditionStarted, Required: true}, + }, + Deploy: &types.DeployConfig{ Mode: "replicated", - Replicas: uint64Ptr(6), + Replicas: intPtr(6), Labels: map[string]string{"FOO": "BAR"}, RollbackConfig: &types.UpdateConfig{ Parallelism: uint64Ptr(3), @@ -85,7 +89,7 @@ func services(workingDir, homeDir string) []types.ServiceConfig { Order: "start-first", }, Resources: types.Resources{ - Limits: &types.ResourceLimit{ + Limits: &types.Resource{ NanoCPUs: "0.001", MemoryBytes: 50 * 1024 * 1024, Pids: 100, @@ -137,9 +141,15 @@ func services(workingDir, homeDir string) []types.ServiceConfig { "BAZ": strPtr("baz_from_service_def"), "QUX": strPtr("qux_from_environment"), }, - EnvFile: []string{ - "./example1.env", - "./example2.env", + EnvFiles: []types.EnvFile{ + { + Path: filepath.Join(workingDir, "example1.env"), + Required: true, + }, + { + Path: filepath.Join(workingDir, "example2.env"), + Required: false, + }, }, Expose: []string{"3000", "8000"}, ExternalLinks: []string{ @@ -147,12 +157,12 @@ func services(workingDir, homeDir string) []types.ServiceConfig { "project_db_1:mysql", "project_db_1:postgresql", }, - ExtraHosts: []string{ - "somehost:162.242.195.82", - "otherhost:50.31.209.229", - "host.docker.internal:host-gateway", + ExtraHosts: types.HostsList{ + "somehost": []string{"162.242.195.82"}, + "otherhost": []string{"50.31.209.229"}, + "host.docker.internal": []string{"host-gateway"}, }, - Extras: map[string]any{ + Extensions: map[string]any{ "x-bar": "baz", "x-foo": "bar", }, @@ -234,101 +244,101 @@ func services(workingDir, homeDir string) []types.ServiceConfig { { Mode: "ingress", Target: 8000, - Published: 8000, + Published: "8000", Protocol: "tcp", }, // "9090-9091:8080-8081", { Mode: "ingress", Target: 8080, - Published: 9090, + Published: "9090", Protocol: "tcp", }, { Mode: "ingress", Target: 8081, - Published: 9091, + Published: "9091", Protocol: "tcp", }, // "49100:22", { Mode: "ingress", Target: 22, - Published: 49100, + Published: "49100", Protocol: "tcp", }, // "127.0.0.1:8001:8001", { Mode: "ingress", Target: 8001, - Published: 8001, + Published: "8001", Protocol: "tcp", }, // "127.0.0.1:5000-5010:5000-5010", { Mode: "ingress", Target: 5000, - Published: 5000, + Published: "5000", Protocol: "tcp", }, { Mode: "ingress", Target: 5001, - Published: 5001, + Published: "5001", Protocol: "tcp", }, { Mode: "ingress", Target: 5002, - Published: 5002, + Published: "5002", Protocol: "tcp", }, { Mode: "ingress", Target: 5003, - Published: 5003, + Published: "5003", Protocol: "tcp", }, { Mode: "ingress", Target: 5004, - Published: 5004, + Published: "5004", Protocol: "tcp", }, { Mode: "ingress", Target: 5005, - Published: 5005, + Published: "5005", Protocol: "tcp", }, { Mode: "ingress", Target: 5006, - Published: 5006, + Published: "5006", Protocol: "tcp", }, { Mode: "ingress", Target: 5007, - Published: 5007, + Published: "5007", Protocol: "tcp", }, { Mode: "ingress", Target: 5008, - Published: 5008, + Published: "5008", Protocol: "tcp", }, { Mode: "ingress", Target: 5009, - Published: 5009, + Published: "5009", Protocol: "tcp", }, { Mode: "ingress", Target: 5010, - Published: 5010, + Published: "5010", Protocol: "tcp", }, }, @@ -379,7 +389,7 @@ func services(workingDir, homeDir string) []types.ServiceConfig { {Source: "datavolume", Target: "/var/lib/mysql", Type: "volume"}, {Source: workingDir + "/opt", Target: "/opt", Consistency: "cached", Type: "bind"}, {Target: "/opt", Type: "tmpfs", Tmpfs: &types.ServiceVolumeTmpfs{ - Size: int64(10000), + Size: types.UnitBytes(10000), }}, {Source: "group:mygroup", Target: "/srv", Type: "cluster"}, }, @@ -412,13 +422,13 @@ func networks() map[string]types.NetworkConfig { "external-network": { Name: "external-network", - External: types.External{External: true}, + External: true, }, "other-external-network": { Name: "my-cool-network", - External: types.External{External: true}, - Extras: map[string]any{ + External: true, + Extensions: map[string]any{ "x-bar": "baz", "x-foo": "bar", }, @@ -449,53 +459,55 @@ func volumes() map[string]types.VolumeConfig { }, "external-volume": { Name: "external-volume", - External: types.External{External: true}, + External: true, }, "other-external-volume": { Name: "my-cool-volume", - External: types.External{External: true}, + External: true, }, "external-volume3": { Name: "this-is-volume3", - External: types.External{External: true}, - Extras: map[string]any{ + External: true, + Extensions: map[string]any{ "x-bar": "baz", "x-foo": "bar", }, }, "cluster-volume": { Driver: "my-csi-driver", - Spec: &types.ClusterVolumeSpec{ - Group: "mygroup", - AccessMode: &types.AccessMode{ - Scope: "single", - Sharing: "none", - BlockVolume: &types.BlockVolume{}, - }, - AccessibilityRequirements: &types.TopologyRequirement{ - Requisite: []types.Topology{ - { - Segments: types.Mapping{"region": "R1", "zone": "Z1"}, + Extensions: map[string]any{ + "x-cluster-spec": clitypes.ClusterVolumeSpec{ + Group: "mygroup", + AccessMode: &clitypes.AccessMode{ + Scope: "single", + Sharing: "none", + BlockVolume: &clitypes.BlockVolume{}, + }, + AccessibilityRequirements: &clitypes.TopologyRequirement{ + Requisite: []clitypes.Topology{ + { + Segments: types.Mapping{"region": "R1", "zone": "Z1"}, + }, + { + Segments: types.Mapping{"region": "R1", "zone": "Z2"}, + }, }, - { - Segments: types.Mapping{"region": "R1", "zone": "Z2"}, + Preferred: []clitypes.Topology{ + { + Segments: types.Mapping{"region": "R1", "zone": "Z1"}, + }, }, }, - Preferred: []types.Topology{ - { - Segments: types.Mapping{"region": "R1", "zone": "Z1"}, - }, + CapacityRange: &clitypes.CapacityRange{ + RequiredBytes: types.UnitBytes(1 * 1024 * 1024 * 1024), + LimitBytes: types.UnitBytes(8 * 1024 * 1024 * 1024), }, + Secrets: []clitypes.VolumeSecret{ + {Key: "mycsisecret", Secret: "secret1"}, + {Key: "mycsisecret2", Secret: "secret4"}, + }, + Availability: "active", }, - CapacityRange: &types.CapacityRange{ - RequiredBytes: types.UnitBytes(1 * 1024 * 1024 * 1024), - LimitBytes: types.UnitBytes(8 * 1024 * 1024 * 1024), - }, - Secrets: []types.VolumeSecret{ - {Key: "mycsisecret", Secret: "secret1"}, - {Key: "mycsisecret2", Secret: "secret4"}, - }, - Availability: "active", }, }, } @@ -511,16 +523,16 @@ func configs(workingDir string) map[string]types.ConfigObjConfig { }, "config2": { Name: "my_config", - External: types.External{External: true}, + External: true, }, "config3": { Name: "config3", - External: types.External{External: true}, + External: true, }, "config4": { Name: "foo", File: workingDir, - Extras: map[string]any{ + Extensions: map[string]any{ "x-bar": "baz", "x-foo": "bar", }, @@ -538,19 +550,36 @@ func secrets(workingDir string) map[string]types.SecretConfig { }, "secret2": { Name: "my_secret", - External: types.External{External: true}, + External: true, }, "secret3": { Name: "secret3", - External: types.External{External: true}, + External: true, }, "secret4": { Name: "bar", File: workingDir, - Extras: map[string]any{ + Extensions: map[string]any{ "x-bar": "baz", "x-foo": "bar", }, }, } } + +func durationPtr(value time.Duration) *types.Duration { + result := types.Duration(value) + return &result +} + +func intPtr(value int) *int { + return &value +} + +func uint64Ptr(value uint64) *uint64 { + return &value +} + +func uint32Ptr(value uint32) *uint32 { + return &value +} diff --git a/cli/compose/loader/interpolate.go b/cli/compose/loader/interpolate.go deleted file mode 100644 index 24682614877b..000000000000 --- a/cli/compose/loader/interpolate.go +++ /dev/null @@ -1,75 +0,0 @@ -// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.19 - -package loader - -import ( - "strconv" - "strings" - - interp "github.com/docker/cli/cli/compose/interpolation" - "github.com/pkg/errors" -) - -var interpolateTypeCastMapping = map[interp.Path]interp.Cast{ - servicePath("configs", interp.PathMatchList, "mode"): toInt, - servicePath("secrets", interp.PathMatchList, "mode"): toInt, - servicePath("healthcheck", "retries"): toInt, - servicePath("healthcheck", "disable"): toBoolean, - servicePath("deploy", "replicas"): toInt, - servicePath("deploy", "update_config", "parallelism"): toInt, - servicePath("deploy", "update_config", "max_failure_ratio"): toFloat, - servicePath("deploy", "rollback_config", "parallelism"): toInt, - servicePath("deploy", "rollback_config", "max_failure_ratio"): toFloat, - servicePath("deploy", "restart_policy", "max_attempts"): toInt, - servicePath("deploy", "placement", "max_replicas_per_node"): toInt, - servicePath("ports", interp.PathMatchList, "target"): toInt, - servicePath("ports", interp.PathMatchList, "published"): toInt, - servicePath("ulimits", interp.PathMatchAll): toInt, - servicePath("ulimits", interp.PathMatchAll, "hard"): toInt, - servicePath("ulimits", interp.PathMatchAll, "soft"): toInt, - servicePath("privileged"): toBoolean, - servicePath("read_only"): toBoolean, - servicePath("stdin_open"): toBoolean, - servicePath("tty"): toBoolean, - servicePath("volumes", interp.PathMatchList, "read_only"): toBoolean, - servicePath("volumes", interp.PathMatchList, "volume", "nocopy"): toBoolean, - iPath("networks", interp.PathMatchAll, "external"): toBoolean, - iPath("networks", interp.PathMatchAll, "internal"): toBoolean, - iPath("networks", interp.PathMatchAll, "attachable"): toBoolean, - iPath("volumes", interp.PathMatchAll, "external"): toBoolean, - iPath("secrets", interp.PathMatchAll, "external"): toBoolean, - iPath("configs", interp.PathMatchAll, "external"): toBoolean, -} - -func iPath(parts ...string) interp.Path { - return interp.NewPath(parts...) -} - -func servicePath(parts ...string) interp.Path { - return iPath(append([]string{"services", interp.PathMatchAll}, parts...)...) -} - -func toInt(value string) (any, error) { - return strconv.Atoi(value) -} - -func toFloat(value string) (any, error) { - return strconv.ParseFloat(value, 64) -} - -// should match http://yaml.org/type/bool.html -func toBoolean(value string) (any, error) { - switch strings.ToLower(value) { - case "y", "yes", "true", "on": - return true, nil - case "n", "no", "false", "off": - return false, nil - default: - return nil, errors.Errorf("invalid boolean: %s", value) - } -} - -func interpolateConfig(configDict map[string]any, opts interp.Options) (map[string]any, error) { - return interp.Interpolate(configDict, opts) -} diff --git a/cli/compose/loader/loader.go b/cli/compose/loader/loader.go index 84090075a7e0..41b1f37881c9 100644 --- a/cli/compose/loader/loader.go +++ b/cli/compose/loader/loader.go @@ -4,258 +4,84 @@ package loader import ( - "fmt" - "path" - "path/filepath" + "context" "reflect" - "sort" - "strconv" - "strings" - "time" - interp "github.com/docker/cli/cli/compose/interpolation" - "github.com/docker/cli/cli/compose/schema" - "github.com/docker/cli/cli/compose/template" + composeLoader "github.com/compose-spec/compose-go/v2/loader" + compose "github.com/compose-spec/compose-go/v2/types" "github.com/docker/cli/cli/compose/types" - "github.com/docker/cli/opts" - "github.com/docker/docker/api/types/versions" - "github.com/docker/go-connections/nat" - units "github.com/docker/go-units" - "github.com/google/shlex" - "github.com/mitchellh/mapstructure" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - yaml "gopkg.in/yaml.v2" + "golang.org/x/exp/slices" ) -// Options supported by Load -type Options struct { - // Skip schema validation - SkipValidation bool - // Skip interpolation - SkipInterpolation bool - // Interpolation options - Interpolate *interp.Options - // Discard 'env_file' entries after resolving to 'environment' section - discardEnvFiles bool -} - -// WithDiscardEnvFiles sets the Options to discard the `env_file` section after resolving to -// the `environment` section -func WithDiscardEnvFiles(options *Options) { - options.discardEnvFiles = true -} - -// ParseYAML reads the bytes from a file, parses the bytes into a mapping -// structure, and returns it. -func ParseYAML(source []byte) (map[string]any, error) { - var cfg any - if err := yaml.Unmarshal(source, &cfg); err != nil { - return nil, err - } - cfgMap, ok := cfg.(map[any]any) - if !ok { - return nil, errors.Errorf("top-level object must be a mapping") +// Load reads a ConfigDetails and returns a fully loaded configuration +func Load(configDetails compose.ConfigDetails, opt ...func(*composeLoader.Options)) (*compose.Project, error) { + opts := []func(*composeLoader.Options){} + opts = append(opts, opt...) + clusterVolumeExtension := func(options *composeLoader.Options) { + options.KnownExtensions = map[string]any{ + "x-cluster-spec": types.ClusterVolumeSpec{}, + } } - converted, err := convertToStringKeysRecursive(cfgMap, "") + opts = append(opts, clusterVolumeExtension) + project, err := composeLoader.LoadWithContext(context.Background(), configDetails, opts...) if err != nil { return nil, err } - return converted.(map[string]any), nil -} - -// Load reads a ConfigDetails and returns a fully loaded configuration -func Load(configDetails types.ConfigDetails, opt ...func(*Options)) (*types.Config, error) { - if len(configDetails.ConfigFiles) < 1 { - return nil, errors.Errorf("No files specified") - } - - options := &Options{ - Interpolate: &interp.Options{ - Substitute: template.Substitute, - LookupValue: configDetails.LookupEnv, - TypeCastMapping: interpolateTypeCastMapping, - }, - } - - for _, op := range opt { - op(options) - } - - configs := []*types.Config{} - var err error - for _, file := range configDetails.ConfigFiles { - configDict := file.Config - version := schema.Version(configDict) - if configDetails.Version == "" { - configDetails.Version = version - } - if configDetails.Version != version { - return nil, errors.Errorf("version mismatched between two composefiles : %v and %v", configDetails.Version, version) - } - - if err := validateForbidden(configDict); err != nil { - return nil, err - } - - if !options.SkipInterpolation { - configDict, err = interpolateConfig(configDict, *options.Interpolate) - if err != nil { - return nil, err - } - } - - if !options.SkipValidation { - if err := schema.Validate(configDict, configDetails.Version); err != nil { - return nil, err - } - } - - cfg, err := loadSections(configDict, configDetails) - if err != nil { - return nil, err - } - cfg.Filename = file.Filename - if options.discardEnvFiles { - for i := range cfg.Services { - cfg.Services[i].EnvFile = nil - } - } - - configs = append(configs, cfg) + if err := validateForbidden(project.Services); err != nil { + return nil, err } - - return merge(configs) + return project, nil } -func validateForbidden(configDict map[string]any) error { - servicesDict, ok := configDict["services"].(map[string]any) - if !ok { - return nil - } - forbidden := getProperties(servicesDict, types.ForbiddenProperties) +func validateForbidden(services compose.Services) error { + forbidden := getProperties(services, types.ForbiddenProperties) if len(forbidden) > 0 { return &ForbiddenPropertiesError{Properties: forbidden} } return nil } -func loadSections(config map[string]any, configDetails types.ConfigDetails) (*types.Config, error) { - var err error - cfg := types.Config{ - Version: schema.Version(config), - } - - loaders := []struct { - key string - fnc func(config map[string]any) error - }{ - { - key: "services", - fnc: func(config map[string]any) error { - cfg.Services, err = LoadServices(config, configDetails.WorkingDir, configDetails.LookupEnv) - return err - }, - }, - { - key: "networks", - fnc: func(config map[string]any) error { - cfg.Networks, err = LoadNetworks(config, configDetails.Version) - return err - }, - }, - { - key: "volumes", - fnc: func(config map[string]any) error { - cfg.Volumes, err = LoadVolumes(config, configDetails.Version) - return err - }, - }, - { - key: "secrets", - fnc: func(config map[string]any) error { - cfg.Secrets, err = LoadSecrets(config, configDetails) - return err - }, - }, - { - key: "configs", - fnc: func(config map[string]any) error { - cfg.Configs, err = LoadConfigObjs(config, configDetails) - return err - }, - }, - } - for _, loader := range loaders { - if err := loader.fnc(getSection(config, loader.key)); err != nil { - return nil, err - } - } - cfg.Extras = getExtras(config) - return &cfg, nil -} - -func getSection(config map[string]any, key string) map[string]any { - section, ok := config[key] - if !ok { - return make(map[string]any) - } - return section.(map[string]any) -} - // GetUnsupportedProperties returns the list of any unsupported properties that are // used in the Compose files. -func GetUnsupportedProperties(configDicts ...map[string]any) []string { - unsupported := map[string]bool{} +func GetUnsupportedProperties(services compose.Services) []string { + unsupported := []string{} - for _, configDict := range configDicts { - for _, service := range getServices(configDict) { - serviceDict := service.(map[string]any) - for _, property := range types.UnsupportedProperties { - if _, isSet := serviceDict[property]; isSet { - unsupported[property] = true - } + for _, service := range services { + r := reflect.ValueOf(service) + for property, value := range types.UnsupportedProperties { + f := reflect.Indirect(r).FieldByName(property) + if f.IsValid() && !f.IsZero() && !slices.Contains(unsupported, value) { + unsupported = append(unsupported, value) } } } - - return sortedKeys(unsupported) -} - -func sortedKeys(set map[string]bool) []string { - keys := make([]string, 0, len(set)) - for key := range set { - keys = append(keys, key) - } - sort.Strings(keys) - return keys + slices.Sort(unsupported) + return unsupported } // GetDeprecatedProperties returns the list of any deprecated properties that // are used in the compose files. -func GetDeprecatedProperties(configDicts ...map[string]any) map[string]string { +func GetDeprecatedProperties(services compose.Services) map[string]string { deprecated := map[string]string{} - for _, configDict := range configDicts { - deprecatedProperties := getProperties(getServices(configDict), types.DeprecatedProperties) - for key, value := range deprecatedProperties { - deprecated[key] = value - } + deprecatedProperties := getProperties(services, types.DeprecatedProperties) + for key, value := range deprecatedProperties { + deprecated[key] = value } return deprecated } -func getProperties(services map[string]any, propertyMap map[string]string) map[string]string { +func getProperties(services compose.Services, propertyMap map[string]types.Pair[string, string]) map[string]string { output := map[string]string{} for _, service := range services { - if serviceDict, ok := service.(map[string]any); ok { - for property, description := range propertyMap { - if _, isSet := serviceDict[property]; isSet { - output[property] = description - } + r := reflect.ValueOf(service) + for property, pair := range propertyMap { + f := reflect.Indirect(r).FieldByName(property) + if f.IsValid() && !f.IsZero() { + output[pair.Key()] = pair.Value() } } } @@ -272,717 +98,3 @@ type ForbiddenPropertiesError struct { func (e *ForbiddenPropertiesError) Error() string { return "Configuration contains forbidden properties" } - -func getServices(configDict map[string]any) map[string]any { - if services, ok := configDict["services"]; ok { - if servicesDict, ok := services.(map[string]any); ok { - return servicesDict - } - } - - return map[string]any{} -} - -// Transform converts the source into the target struct with compose types transformer -// and the specified transformers if any. -func Transform(source any, target any, additionalTransformers ...Transformer) error { - data := mapstructure.Metadata{} - config := &mapstructure.DecoderConfig{ - DecodeHook: mapstructure.ComposeDecodeHookFunc( - createTransformHook(additionalTransformers...), - mapstructure.StringToTimeDurationHookFunc()), - Result: target, - Metadata: &data, - } - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - return decoder.Decode(source) -} - -// TransformerFunc defines a function to perform the actual transformation -type TransformerFunc func(any) (any, error) - -// Transformer defines a map to type transformer -type Transformer struct { - TypeOf reflect.Type - Func TransformerFunc -} - -func createTransformHook(additionalTransformers ...Transformer) mapstructure.DecodeHookFuncType { - transforms := map[reflect.Type]func(any) (any, error){ - reflect.TypeOf(types.External{}): transformExternal, - reflect.TypeOf(types.HealthCheckTest{}): transformHealthCheckTest, - reflect.TypeOf(types.ShellCommand{}): transformShellCommand, - reflect.TypeOf(types.StringList{}): transformStringList, - reflect.TypeOf(map[string]string{}): transformMapStringString, - reflect.TypeOf(types.UlimitsConfig{}): transformUlimits, - reflect.TypeOf(types.UnitBytes(0)): transformSize, - reflect.TypeOf([]types.ServicePortConfig{}): transformServicePort, - reflect.TypeOf(types.ServiceSecretConfig{}): transformStringSourceMap, - reflect.TypeOf(types.ServiceConfigObjConfig{}): transformStringSourceMap, - reflect.TypeOf(types.StringOrNumberList{}): transformStringOrNumberList, - reflect.TypeOf(map[string]*types.ServiceNetworkConfig{}): transformServiceNetworkMap, - reflect.TypeOf(types.Mapping{}): transformMappingOrListFunc("=", false), - reflect.TypeOf(types.MappingWithEquals{}): transformMappingOrListFunc("=", true), - reflect.TypeOf(types.Labels{}): transformMappingOrListFunc("=", false), - reflect.TypeOf(types.MappingWithColon{}): transformMappingOrListFunc(":", false), - reflect.TypeOf(types.HostsList{}): transformHostsList, - reflect.TypeOf(types.ServiceVolumeConfig{}): transformServiceVolumeConfig, - reflect.TypeOf(types.BuildConfig{}): transformBuildConfig, - reflect.TypeOf(types.Duration(0)): transformStringToDuration, - } - - for _, transformer := range additionalTransformers { - transforms[transformer.TypeOf] = transformer.Func - } - - return func(_ reflect.Type, target reflect.Type, data any) (any, error) { - transform, ok := transforms[target] - if !ok { - return data, nil - } - return transform(data) - } -} - -// keys needs to be converted to strings for jsonschema -func convertToStringKeysRecursive(value any, keyPrefix string) (any, error) { - if mapping, ok := value.(map[any]any); ok { - dict := make(map[string]any) - for key, entry := range mapping { - str, ok := key.(string) - if !ok { - return nil, formatInvalidKeyError(keyPrefix, key) - } - var newKeyPrefix string - if keyPrefix == "" { - newKeyPrefix = str - } else { - newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str) - } - convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) - if err != nil { - return nil, err - } - dict[str] = convertedEntry - } - return dict, nil - } - if list, ok := value.([]any); ok { - var convertedList []any - for index, entry := range list { - newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index) - convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) - if err != nil { - return nil, err - } - convertedList = append(convertedList, convertedEntry) - } - return convertedList, nil - } - return value, nil -} - -func formatInvalidKeyError(keyPrefix string, key any) error { - var location string - if keyPrefix == "" { - location = "at top level" - } else { - location = "in " + keyPrefix - } - return errors.Errorf("non-string key %s: %#v", location, key) -} - -// LoadServices produces a ServiceConfig map from a compose file Dict -// the servicesDict is not validated if directly used. Use Load() to enable validation -func LoadServices(servicesDict map[string]any, workingDir string, lookupEnv template.Mapping) ([]types.ServiceConfig, error) { - services := make([]types.ServiceConfig, 0, len(servicesDict)) - - for name, serviceDef := range servicesDict { - serviceConfig, err := LoadService(name, serviceDef.(map[string]any), workingDir, lookupEnv) - if err != nil { - return nil, err - } - services = append(services, *serviceConfig) - } - - return services, nil -} - -// LoadService produces a single ServiceConfig from a compose file Dict -// the serviceDict is not validated if directly used. Use Load() to enable validation -func LoadService(name string, serviceDict map[string]any, workingDir string, lookupEnv template.Mapping) (*types.ServiceConfig, error) { - serviceConfig := &types.ServiceConfig{} - if err := Transform(serviceDict, serviceConfig); err != nil { - return nil, err - } - serviceConfig.Name = name - - if err := resolveEnvironment(serviceConfig, workingDir, lookupEnv); err != nil { - return nil, err - } - - if err := resolveVolumePaths(serviceConfig.Volumes, workingDir, lookupEnv); err != nil { - return nil, err - } - - serviceConfig.Extras = getExtras(serviceDict) - - return serviceConfig, nil -} - -func loadExtras(name string, source map[string]any) map[string]any { - if dict, ok := source[name].(map[string]any); ok { - return getExtras(dict) - } - return nil -} - -func getExtras(dict map[string]any) map[string]any { - extras := map[string]any{} - for key, value := range dict { - if strings.HasPrefix(key, "x-") { - extras[key] = value - } - } - if len(extras) == 0 { - return nil - } - return extras -} - -func updateEnvironment(environment map[string]*string, vars map[string]*string, lookupEnv template.Mapping) { - for k, v := range vars { - interpolatedV, ok := lookupEnv(k) - if (v == nil || *v == "") && ok { - // lookupEnv is prioritized over vars - environment[k] = &interpolatedV - } else { - environment[k] = v - } - } -} - -func resolveEnvironment(serviceConfig *types.ServiceConfig, workingDir string, lookupEnv template.Mapping) error { - environment := make(map[string]*string) - - if len(serviceConfig.EnvFile) > 0 { - var envVars []string - - for _, file := range serviceConfig.EnvFile { - filePath := absPath(workingDir, file) - fileVars, err := opts.ParseEnvFile(filePath) - if err != nil { - return err - } - envVars = append(envVars, fileVars...) - } - updateEnvironment(environment, - opts.ConvertKVStringsToMapWithNil(envVars), lookupEnv) - } - - updateEnvironment(environment, serviceConfig.Environment, lookupEnv) - serviceConfig.Environment = environment - return nil -} - -func resolveVolumePaths(volumes []types.ServiceVolumeConfig, workingDir string, lookupEnv template.Mapping) error { - for i, volume := range volumes { - if volume.Type != "bind" { - continue - } - - if volume.Source == "" { - return errors.New(`invalid mount config for type "bind": field Source must not be empty`) - } - - filePath := expandUser(volume.Source, lookupEnv) - // Check if source is an absolute path (either Unix or Windows), to - // handle a Windows client with a Unix daemon or vice-versa. - // - // Note that this is not required for Docker for Windows when specifying - // a local Windows path, because Docker for Windows translates the Windows - // path into a valid path within the VM. - if !path.IsAbs(filePath) && !isAbs(filePath) { - filePath = absPath(workingDir, filePath) - } - volume.Source = filePath - volumes[i] = volume - } - return nil -} - -// TODO: make this more robust -func expandUser(srcPath string, lookupEnv template.Mapping) string { - if strings.HasPrefix(srcPath, "~") { - home, ok := lookupEnv("HOME") - if !ok { - logrus.Warn("cannot expand '~', because the environment lacks HOME") - return srcPath - } - return strings.Replace(srcPath, "~", home, 1) - } - return srcPath -} - -func transformUlimits(data any) (any, error) { - switch value := data.(type) { - case int: - return types.UlimitsConfig{Single: value}, nil - case map[string]any: - ulimit := types.UlimitsConfig{} - ulimit.Soft = value["soft"].(int) - ulimit.Hard = value["hard"].(int) - return ulimit, nil - default: - return data, errors.Errorf("invalid type %T for ulimits", value) - } -} - -// LoadNetworks produces a NetworkConfig map from a compose file Dict -// the source Dict is not validated if directly used. Use Load() to enable validation -func LoadNetworks(source map[string]any, version string) (map[string]types.NetworkConfig, error) { - networks := make(map[string]types.NetworkConfig) - err := Transform(source, &networks) - if err != nil { - return networks, err - } - for name, network := range networks { - if !network.External.External { - continue - } - switch { - case network.External.Name != "": - if network.Name != "" { - return nil, errors.Errorf("network %s: network.external.name and network.name conflict; only use network.name", name) - } - if versions.GreaterThanOrEqualTo(version, "3.5") { - logrus.Warnf("network %s: network.external.name is deprecated in favor of network.name", name) - } - network.Name = network.External.Name - network.External.Name = "" - case network.Name == "": - network.Name = name - } - network.Extras = loadExtras(name, source) - networks[name] = network - } - return networks, nil -} - -func externalVolumeError(volume, key string) error { - return errors.Errorf( - "conflicting parameters \"external\" and %q specified for volume %q", - key, volume) -} - -// LoadVolumes produces a VolumeConfig map from a compose file Dict -// the source Dict is not validated if directly used. Use Load() to enable validation -func LoadVolumes(source map[string]any, version string) (map[string]types.VolumeConfig, error) { - volumes := make(map[string]types.VolumeConfig) - if err := Transform(source, &volumes); err != nil { - return volumes, err - } - - for name, volume := range volumes { - if !volume.External.External { - continue - } - switch { - case volume.Driver != "": - return nil, externalVolumeError(name, "driver") - case len(volume.DriverOpts) > 0: - return nil, externalVolumeError(name, "driver_opts") - case len(volume.Labels) > 0: - return nil, externalVolumeError(name, "labels") - case volume.External.Name != "": - if volume.Name != "" { - return nil, errors.Errorf("volume %s: volume.external.name and volume.name conflict; only use volume.name", name) - } - if versions.GreaterThanOrEqualTo(version, "3.4") { - logrus.Warnf("volume %s: volume.external.name is deprecated in favor of volume.name", name) - } - volume.Name = volume.External.Name - volume.External.Name = "" - case volume.Name == "": - volume.Name = name - } - volume.Extras = loadExtras(name, source) - volumes[name] = volume - } - return volumes, nil -} - -// LoadSecrets produces a SecretConfig map from a compose file Dict -// the source Dict is not validated if directly used. Use Load() to enable validation -func LoadSecrets(source map[string]any, details types.ConfigDetails) (map[string]types.SecretConfig, error) { - secrets := make(map[string]types.SecretConfig) - if err := Transform(source, &secrets); err != nil { - return secrets, err - } - for name, secret := range secrets { - obj, err := loadFileObjectConfig(name, "secret", types.FileObjectConfig(secret), details) - if err != nil { - return nil, err - } - secretConfig := types.SecretConfig(obj) - secretConfig.Extras = loadExtras(name, source) - secrets[name] = secretConfig - } - return secrets, nil -} - -// LoadConfigObjs produces a ConfigObjConfig map from a compose file Dict -// the source Dict is not validated if directly used. Use Load() to enable validation -func LoadConfigObjs(source map[string]any, details types.ConfigDetails) (map[string]types.ConfigObjConfig, error) { - configs := make(map[string]types.ConfigObjConfig) - if err := Transform(source, &configs); err != nil { - return configs, err - } - for name, config := range configs { - obj, err := loadFileObjectConfig(name, "config", types.FileObjectConfig(config), details) - if err != nil { - return nil, err - } - configConfig := types.ConfigObjConfig(obj) - configConfig.Extras = loadExtras(name, source) - configs[name] = configConfig - } - return configs, nil -} - -func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfig, details types.ConfigDetails) (types.FileObjectConfig, error) { - // if "external: true" - switch { - case obj.External.External: - // handle deprecated external.name - if obj.External.Name != "" { - if obj.Name != "" { - return obj, errors.Errorf("%[1]s %[2]s: %[1]s.external.name and %[1]s.name conflict; only use %[1]s.name", objType, name) - } - if versions.GreaterThanOrEqualTo(details.Version, "3.5") { - logrus.Warnf("%[1]s %[2]s: %[1]s.external.name is deprecated in favor of %[1]s.name", objType, name) - } - obj.Name = obj.External.Name - obj.External.Name = "" - } else if obj.Name == "" { - obj.Name = name - } - // if not "external: true" - case obj.Driver != "": - if obj.File != "" { - return obj, errors.Errorf("%[1]s %[2]s: %[1]s.driver and %[1]s.file conflict; only use %[1]s.driver", objType, name) - } - default: - obj.File = absPath(details.WorkingDir, obj.File) - } - - return obj, nil -} - -func absPath(workingDir string, filePath string) string { - if filepath.IsAbs(filePath) { - return filePath - } - return filepath.Join(workingDir, filePath) -} - -var transformMapStringString TransformerFunc = func(data any) (any, error) { - switch value := data.(type) { - case map[string]any: - return toMapStringString(value, false), nil - case map[string]string: - return value, nil - default: - return data, errors.Errorf("invalid type %T for map[string]string", value) - } -} - -var transformExternal TransformerFunc = func(data any) (any, error) { - switch value := data.(type) { - case bool: - return map[string]any{"external": value}, nil - case map[string]any: - return map[string]any{"external": true, "name": value["name"]}, nil - default: - return data, errors.Errorf("invalid type %T for external", value) - } -} - -var transformServicePort TransformerFunc = func(data any) (any, error) { - switch entries := data.(type) { - case []any: - // We process the list instead of individual items here. - // The reason is that one entry might be mapped to multiple ServicePortConfig. - // Therefore we take an input of a list and return an output of a list. - ports := []any{} - for _, entry := range entries { - switch value := entry.(type) { - case int: - v, err := toServicePortConfigs(strconv.Itoa(value)) - if err != nil { - return data, err - } - ports = append(ports, v...) - case string: - v, err := toServicePortConfigs(value) - if err != nil { - return data, err - } - ports = append(ports, v...) - case map[string]any: - ports = append(ports, value) - default: - return data, errors.Errorf("invalid type %T for port", value) - } - } - return ports, nil - default: - return data, errors.Errorf("invalid type %T for port", entries) - } -} - -var transformStringSourceMap TransformerFunc = func(data any) (any, error) { - switch value := data.(type) { - case string: - return map[string]any{"source": value}, nil - case map[string]any: - return data, nil - default: - return data, errors.Errorf("invalid type %T for secret", value) - } -} - -var transformBuildConfig TransformerFunc = func(data any) (any, error) { - switch value := data.(type) { - case string: - return map[string]any{"context": value}, nil - case map[string]any: - return data, nil - default: - return data, errors.Errorf("invalid type %T for service build", value) - } -} - -var transformServiceVolumeConfig TransformerFunc = func(data any) (any, error) { - switch value := data.(type) { - case string: - return ParseVolume(value) - case map[string]any: - return data, nil - default: - return data, errors.Errorf("invalid type %T for service volume", value) - } -} - -var transformServiceNetworkMap TransformerFunc = func(value any) (any, error) { - if list, ok := value.([]any); ok { - mapValue := map[any]any{} - for _, name := range list { - mapValue[name] = nil - } - return mapValue, nil - } - return value, nil -} - -var transformStringOrNumberList TransformerFunc = func(value any) (any, error) { - list := value.([]any) - result := make([]string, len(list)) - for i, item := range list { - result[i] = fmt.Sprint(item) - } - return result, nil -} - -var transformStringList TransformerFunc = func(data any) (any, error) { - switch value := data.(type) { - case string: - return []string{value}, nil - case []any: - return value, nil - default: - return data, errors.Errorf("invalid type %T for string list", value) - } -} - -var transformHostsList TransformerFunc = func(data any) (any, error) { - hl := transformListOrMapping(data, ":", false, []string{"=", ":"}) - - // Remove brackets from IP addresses if present (for example "[::1]" -> "::1"). - result := make([]string, 0, len(hl)) - for _, hip := range hl { - host, ip, _ := strings.Cut(hip, ":") - if len(ip) > 2 && ip[0] == '[' && ip[len(ip)-1] == ']' { - ip = ip[1 : len(ip)-1] - } - result = append(result, fmt.Sprintf("%s:%s", host, ip)) - } - return result, nil -} - -// transformListOrMapping transforms pairs of strings that may be represented as -// a map, or a list of '=' or ':' separated strings, into a list of ':' separated -// strings. -func transformListOrMapping(listOrMapping any, sep string, allowNil bool, allowSeps []string) []string { - switch value := listOrMapping.(type) { - case map[string]any: - return toStringList(value, sep, allowNil) - case []any: - result := make([]string, 0, len(value)) - for _, entry := range value { - for i, allowSep := range allowSeps { - entry := fmt.Sprint(entry) - k, v, ok := strings.Cut(entry, allowSep) - if ok { - // Entry uses this allowed separator. Add it to the result, using - // sep as a separator. - result = append(result, fmt.Sprintf("%s%s%s", k, sep, v)) - break - } else if i == len(allowSeps)-1 { - // No more separators to try, keep the entry if allowNil. - if allowNil { - result = append(result, k) - } - } - } - } - return result - } - panic(errors.Errorf("expected a map or a list, got %T: %#v", listOrMapping, listOrMapping)) -} - -func transformMappingOrListFunc(sep string, allowNil bool) TransformerFunc { - return func(data any) (any, error) { - return transformMappingOrList(data, sep, allowNil), nil - } -} - -func transformMappingOrList(mappingOrList any, sep string, allowNil bool) any { - switch values := mappingOrList.(type) { - case map[string]any: - return toMapStringString(values, allowNil) - case []any: - result := make(map[string]any) - for _, v := range values { - key, val, hasValue := strings.Cut(v.(string), sep) - switch { - case !hasValue && allowNil: - result[key] = nil - case !hasValue && !allowNil: - result[key] = "" - default: - result[key] = val - } - } - return result - } - panic(errors.Errorf("expected a map or a list, got %T: %#v", mappingOrList, mappingOrList)) -} - -var transformShellCommand TransformerFunc = func(value any) (any, error) { - if str, ok := value.(string); ok { - return shlex.Split(str) - } - return value, nil -} - -var transformHealthCheckTest TransformerFunc = func(data any) (any, error) { - switch value := data.(type) { - case string: - return append([]string{"CMD-SHELL"}, value), nil - case []any: - return value, nil - default: - return value, errors.Errorf("invalid type %T for healthcheck.test", value) - } -} - -var transformSize TransformerFunc = func(value any) (any, error) { - switch value := value.(type) { - case int: - return int64(value), nil - case string: - return units.RAMInBytes(value) - } - panic(errors.Errorf("invalid type for size %T", value)) -} - -var transformStringToDuration TransformerFunc = func(value any) (any, error) { - switch value := value.(type) { - case string: - d, err := time.ParseDuration(value) - if err != nil { - return value, err - } - return types.Duration(d), nil - default: - return value, errors.Errorf("invalid type %T for duration", value) - } -} - -func toServicePortConfigs(value string) ([]any, error) { - var portConfigs []any - - ports, portBindings, err := nat.ParsePortSpecs([]string{value}) - if err != nil { - return nil, err - } - // We need to sort the key of the ports to make sure it is consistent - keys := []string{} - for port := range ports { - keys = append(keys, string(port)) - } - sort.Strings(keys) - - for _, key := range keys { - // Reuse ConvertPortToPortConfig so that it is consistent - portConfig, err := opts.ConvertPortToPortConfig(nat.Port(key), portBindings) - if err != nil { - return nil, err - } - for _, p := range portConfig { - portConfigs = append(portConfigs, types.ServicePortConfig{ - Protocol: string(p.Protocol), - Target: p.TargetPort, - Published: p.PublishedPort, - Mode: string(p.PublishMode), - }) - } - } - - return portConfigs, nil -} - -func toMapStringString(value map[string]any, allowNil bool) map[string]any { - output := make(map[string]any) - for key, value := range value { - output[key] = toString(value, allowNil) - } - return output -} - -func toString(value any, allowNil bool) any { - switch { - case value != nil: - return fmt.Sprint(value) - case allowNil: - return nil - default: - return "" - } -} - -func toStringList(value map[string]any, separator string, allowNil bool) []string { - output := []string{} - for key, value := range value { - if value == nil && !allowNil { - continue - } - output = append(output, fmt.Sprintf("%s%s%s", key, separator, value)) - } - sort.Strings(output) - return output -} diff --git a/cli/compose/loader/loader_test.go b/cli/compose/loader/loader_test.go index cdeaa6d6011d..c4f0fbef6d9c 100644 --- a/cli/compose/loader/loader_test.go +++ b/cli/compose/loader/loader_test.go @@ -4,844 +4,90 @@ package loader import ( - "bytes" + "fmt" "os" - "runtime" - "sort" "testing" - "time" - "github.com/docker/cli/cli/compose/types" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/sirupsen/logrus" + "github.com/compose-spec/compose-go/v2/types" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" - "gotest.tools/v3/skip" ) -func buildConfigDetails(source map[string]any, env map[string]string) types.ConfigDetails { +func buildConfigDetailsFromYAML(yaml string, env map[string]string) types.ConfigDetails { + return buildConfigDetailsMultipleFiles(env, yaml) +} + +func buildConfigDetailsMultipleFiles(env map[string]string, yamls ...string) types.ConfigDetails { workingDir, err := os.Getwd() if err != nil { panic(err) } + if env == nil { + env = map[string]string{} + } + return types.ConfigDetails{ - WorkingDir: workingDir, - ConfigFiles: []types.ConfigFile{ - {Filename: "filename.yml", Config: source}, - }, + WorkingDir: workingDir, + ConfigFiles: buildConfigFiles(yamls), Environment: env, } } -func loadYAML(yaml string) (*types.Config, error) { - return loadYAMLWithEnv(yaml, nil) -} - -func loadYAMLWithEnv(yaml string, env map[string]string) (*types.Config, error) { - dict, err := ParseYAML([]byte(yaml)) - if err != nil { - return nil, err +func buildConfigFiles(yamls []string) []types.ConfigFile { + configFiles := []types.ConfigFile{} + for i, yaml := range yamls { + configFiles = append(configFiles, types.ConfigFile{ + Filename: fmt.Sprintf("filename%d.yml", i), + Content: []byte(yaml), + }) } - - return Load(buildConfigDetails(dict, env)) + return configFiles } -var sampleYAML = ` -version: "3" -services: - foo: - image: busybox - networks: - with_me: - bar: - image: busybox - environment: - - FOO=1 - networks: - - with_ipam -volumes: - hello: - driver: default - driver_opts: - beep: boop -networks: - default: - driver: bridge - driver_opts: - beep: boop - with_ipam: - ipam: - driver: default - config: - - subnet: 172.28.0.0/16 -` - -var sampleDict = map[string]any{ - "version": "3", - "services": map[string]any{ - "foo": map[string]any{ - "image": "busybox", - "networks": map[string]any{"with_me": nil}, - }, - "bar": map[string]any{ - "image": "busybox", - "environment": []any{"FOO=1"}, - "networks": []any{"with_ipam"}, - }, - }, - "volumes": map[string]any{ - "hello": map[string]any{ - "driver": "default", - "driver_opts": map[string]any{ - "beep": "boop", - }, - }, - }, - "networks": map[string]any{ - "default": map[string]any{ - "driver": "bridge", - "driver_opts": map[string]any{ - "beep": "boop", - }, - }, - "with_ipam": map[string]any{ - "ipam": map[string]any{ - "driver": "default", - "config": []any{ - map[string]any{ - "subnet": "172.28.0.0/16", - }, - }, - }, - }, - }, +func loadYAML(yaml string) (*types.Project, error) { + return loadYAMLWithEnv(yaml, nil) } -var samplePortsConfig = []types.ServicePortConfig{ - { - Mode: "ingress", - Target: 8080, - Published: 80, - Protocol: "tcp", - }, - { - Mode: "ingress", - Target: 8081, - Published: 81, - Protocol: "tcp", - }, - { - Mode: "ingress", - Target: 8082, - Published: 82, - Protocol: "tcp", - }, - { - Mode: "ingress", - Target: 8090, - Published: 90, - Protocol: "udp", - }, - { - Mode: "ingress", - Target: 8091, - Published: 91, - Protocol: "udp", - }, - { - Mode: "ingress", - Target: 8092, - Published: 92, - Protocol: "udp", - }, - { - Mode: "ingress", - Target: 8500, - Published: 85, - Protocol: "tcp", - }, - { - Mode: "ingress", - Target: 8600, - Published: 0, - Protocol: "tcp", - }, - { - Target: 53, - Published: 10053, - Protocol: "udp", - }, - { - Mode: "host", - Target: 22, - Published: 10022, - }, +func loadYAMLWithEnv(yaml string, env map[string]string) (*types.Project, error) { + return Load(buildConfigDetailsFromYAML(yaml, env)) } func strPtr(val string) *string { return &val } -var sampleConfig = types.Config{ - Version: "3.12", - Services: []types.ServiceConfig{ - { - Name: "foo", - Image: "busybox", - Environment: map[string]*string{}, - Networks: map[string]*types.ServiceNetworkConfig{ - "with_me": nil, - }, - }, - { - Name: "bar", - Image: "busybox", - Environment: map[string]*string{"FOO": strPtr("1")}, - Networks: map[string]*types.ServiceNetworkConfig{ - "with_ipam": nil, - }, - }, - }, - Networks: map[string]types.NetworkConfig{ - "default": { - Driver: "bridge", - DriverOpts: map[string]string{ - "beep": "boop", - }, - }, - "with_ipam": { - Ipam: types.IPAMConfig{ - Driver: "default", - Config: []*types.IPAMPool{ - { - Subnet: "172.28.0.0/16", - }, - }, - }, - }, - }, - Volumes: map[string]types.VolumeConfig{ - "hello": { - Driver: "default", - DriverOpts: map[string]string{ - "beep": "boop", - }, - }, - }, -} - -func TestParseYAML(t *testing.T) { - dict, err := ParseYAML([]byte(sampleYAML)) - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(sampleDict, dict)) -} - -func TestLoad(t *testing.T) { - actual, err := Load(buildConfigDetails(sampleDict, nil)) - assert.NilError(t, err) - assert.Check(t, is.Equal(sampleConfig.Version, actual.Version)) - assert.Check(t, is.DeepEqual(serviceSort(sampleConfig.Services), serviceSort(actual.Services))) - assert.Check(t, is.DeepEqual(sampleConfig.Networks, actual.Networks)) - assert.Check(t, is.DeepEqual(sampleConfig.Volumes, actual.Volumes)) -} - -func TestLoadExtras(t *testing.T) { - actual, err := loadYAML(` -version: "3.7" -services: - foo: - image: busybox - x-foo: bar`) - assert.NilError(t, err) - assert.Check(t, is.Len(actual.Services, 1)) - service := actual.Services[0] - assert.Check(t, is.Equal("busybox", service.Image)) - extras := map[string]any{ - "x-foo": "bar", - } - assert.Check(t, is.DeepEqual(extras, service.Extras)) -} - -func TestLoadV31(t *testing.T) { - actual, err := loadYAML(` -version: "3.1" -services: - foo: - image: busybox - secrets: [super] -secrets: - super: - external: true -`) - assert.NilError(t, err) - assert.Check(t, is.Len(actual.Services, 1)) - assert.Check(t, is.Len(actual.Secrets, 1)) -} - -func TestLoadV33(t *testing.T) { - actual, err := loadYAML(` -version: "3.3" -services: - foo: - image: busybox - credential_spec: - file: "/foo" - configs: [super] -configs: - super: - external: true -`) - assert.NilError(t, err) - assert.Assert(t, is.Len(actual.Services, 1)) - assert.Check(t, is.Equal(actual.Services[0].CredentialSpec.File, "/foo")) - assert.Assert(t, is.Len(actual.Configs, 1)) -} - -func TestLoadV38(t *testing.T) { - actual, err := loadYAML(` -version: "3.8" -services: - foo: - image: busybox - credential_spec: - config: "0bt9dmxjvjiqermk6xrop3ekq" -`) - assert.NilError(t, err) - assert.Assert(t, is.Len(actual.Services, 1)) - assert.Check(t, is.Equal(actual.Services[0].CredentialSpec.Config, "0bt9dmxjvjiqermk6xrop3ekq")) -} - -func TestParseAndLoad(t *testing.T) { - actual, err := loadYAML(sampleYAML) - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(serviceSort(sampleConfig.Services), serviceSort(actual.Services))) - assert.Check(t, is.DeepEqual(sampleConfig.Networks, actual.Networks)) - assert.Check(t, is.DeepEqual(sampleConfig.Volumes, actual.Volumes)) -} - -func TestInvalidTopLevelObjectType(t *testing.T) { - _, err := loadYAML("1") - assert.Check(t, is.ErrorContains(err, "top-level object must be a mapping")) - - _, err = loadYAML(`"hello"`) - assert.Check(t, is.ErrorContains(err, "top-level object must be a mapping")) - - _, err = loadYAML(`["hello"]`) - assert.Check(t, is.ErrorContains(err, "top-level object must be a mapping")) -} - -func TestNonStringKeys(t *testing.T) { - _, err := loadYAML(` -version: "3" -123: - foo: - image: busybox -`) - assert.Check(t, is.ErrorContains(err, "non-string key at top level: 123")) - - _, err = loadYAML(` -version: "3" -services: - foo: - image: busybox - 123: - image: busybox -`) - assert.Check(t, is.ErrorContains(err, "non-string key in services: 123")) - - _, err = loadYAML(` -version: "3" -services: - foo: - image: busybox -networks: - default: - ipam: - config: - - 123: oh dear -`) - assert.Check(t, is.ErrorContains(err, "non-string key in networks.default.ipam.config[0]: 123")) - - _, err = loadYAML(` -version: "3" -services: - dict-env: - image: busybox - environment: - 1: FOO -`) - assert.Check(t, is.ErrorContains(err, "non-string key in services.dict-env.environment: 1")) -} - -func TestSupportedVersion(t *testing.T) { - _, err := loadYAML(` -version: "3" -services: - foo: - image: busybox -`) - assert.Check(t, err) - - _, err = loadYAML(` -version: "3.0" -services: - foo: - image: busybox -`) - assert.Check(t, err) -} - -func TestUnsupportedVersion(t *testing.T) { - _, err := loadYAML(` -version: "2" -services: - foo: - image: busybox -`) - assert.Check(t, is.ErrorContains(err, "version")) - - _, err = loadYAML(` -version: "2.0" -services: - foo: - image: busybox -`) - assert.Check(t, is.ErrorContains(err, "version")) -} - -func TestInvalidVersion(t *testing.T) { - _, err := loadYAML(` -version: 3 -services: - foo: - image: busybox -`) - assert.Check(t, is.ErrorContains(err, "version must be a string")) -} - -func TestV1Unsupported(t *testing.T) { - _, err := loadYAML(` -foo: - image: busybox -`) - assert.Check(t, is.ErrorContains(err, "(root) Additional property foo is not allowed")) - - _, err = loadYAML(` -version: "1.0" -foo: - image: busybox -`) - - assert.Check(t, is.ErrorContains(err, "unsupported Compose file version: 1.0")) -} - -func TestNonMappingObject(t *testing.T) { - _, err := loadYAML(` -version: "3" -services: - - foo: - image: busybox -`) - assert.Check(t, is.ErrorContains(err, "services must be a mapping")) - - _, err = loadYAML(` -version: "3" -services: - foo: busybox -`) - assert.Check(t, is.ErrorContains(err, "services.foo must be a mapping")) - - _, err = loadYAML(` -version: "3" -networks: - - default: - driver: bridge -`) - assert.Check(t, is.ErrorContains(err, "networks must be a mapping")) - - _, err = loadYAML(` -version: "3" -networks: - default: bridge -`) - assert.Check(t, is.ErrorContains(err, "networks.default must be a mapping")) - - _, err = loadYAML(` -version: "3" -volumes: - - data: - driver: local -`) - assert.Check(t, is.ErrorContains(err, "volumes must be a mapping")) - - _, err = loadYAML(` -version: "3" -volumes: - data: local -`) - assert.Check(t, is.ErrorContains(err, "volumes.data must be a mapping")) -} - -func TestNonStringImage(t *testing.T) { - _, err := loadYAML(` -version: "3" -services: - foo: - image: ["busybox", "latest"] -`) - assert.Check(t, is.ErrorContains(err, "services.foo.image must be a string")) -} - -func TestIgnoreBuildProperties(t *testing.T) { - _, err := loadYAML(` -services: - foo: - image: busybox - build: - unsupported_prop: foo -`) - assert.NilError(t, err) -} - -func TestLoadWithEnvironment(t *testing.T) { - config, err := loadYAMLWithEnv(` -version: "3" -services: - dict-env: - image: busybox - environment: - FOO: "1" - BAR: 2 - BAZ: 2.5 - QUX: - QUUX: - list-env: - image: busybox - environment: - - FOO=1 - - BAR=2 - - BAZ=2.5 - - QUX= - - QUUX -`, map[string]string{"QUX": "qux"}) - assert.NilError(t, err) - - expected := types.MappingWithEquals{ - "FOO": strPtr("1"), - "BAR": strPtr("2"), - "BAZ": strPtr("2.5"), - "QUX": strPtr("qux"), - "QUUX": nil, - } - - assert.Check(t, is.Equal(2, len(config.Services))) - - for _, service := range config.Services { - assert.Check(t, is.DeepEqual(expected, service.Environment)) - } -} - -func TestInvalidEnvironmentValue(t *testing.T) { - _, err := loadYAML(` -version: "3" -services: - dict-env: - image: busybox - environment: - FOO: ["1"] -`) - assert.Check(t, is.ErrorContains(err, "services.dict-env.environment.FOO must be a string, number or null")) -} - -func TestInvalidEnvironmentObject(t *testing.T) { - _, err := loadYAML(` -version: "3" -services: - dict-env: - image: busybox - environment: "FOO=1" -`) - assert.Check(t, is.ErrorContains(err, "services.dict-env.environment must be a mapping")) -} - -func TestLoadWithEnvironmentInterpolation(t *testing.T) { - home := "/home/foo" - config, err := loadYAMLWithEnv(` -version: "3" -services: - test: - image: busybox - labels: - - home1=$HOME - - home2=${HOME} - - nonexistent=$NONEXISTENT - - default=${NONEXISTENT-default} -networks: - test: - driver: $HOME -volumes: - test: - driver: $HOME -`, map[string]string{ - "HOME": home, - "FOO": "foo", - }) - - assert.NilError(t, err) - - expectedLabels := types.Labels{ - "home1": home, - "home2": home, - "nonexistent": "", - "default": "default", - } - - assert.Check(t, is.DeepEqual(expectedLabels, config.Services[0].Labels)) - assert.Check(t, is.Equal(home, config.Networks["test"].Driver)) - assert.Check(t, is.Equal(home, config.Volumes["test"].Driver)) -} - -func TestLoadWithInterpolationCastFull(t *testing.T) { - dict, err := ParseYAML([]byte(` -version: "3.8" -services: - web: - configs: - - source: appconfig - mode: $theint - secrets: - - source: super - mode: $theint - healthcheck: - retries: ${theint} - disable: $thebool - deploy: - replicas: $theint - update_config: - parallelism: $theint - max_failure_ratio: $thefloat - rollback_config: - parallelism: $theint - max_failure_ratio: $thefloat - restart_policy: - max_attempts: $theint - placement: - max_replicas_per_node: $theint - ports: - - $theint - - "34567" - - target: $theint - published: $theint - ulimits: - nproc: $theint - nofile: - hard: $theint - soft: $theint - privileged: $thebool - read_only: $thebool - stdin_open: ${thebool} - tty: $thebool - volumes: - - source: data - type: volume - read_only: $thebool - volume: - nocopy: $thebool - -configs: - appconfig: - external: $thebool -secrets: - super: - external: $thebool -volumes: - data: - external: $thebool -networks: - front: - external: $thebool - internal: $thebool - attachable: $thebool - -`)) - assert.NilError(t, err) - env := map[string]string{ - "theint": "555", - "thefloat": "3.14", - "thebool": "true", - } - - config, err := Load(buildConfigDetails(dict, env)) - assert.NilError(t, err) - expected := &types.Config{ - Filename: "filename.yml", - Version: "3.8", - Services: []types.ServiceConfig{ - { - Name: "web", - Configs: []types.ServiceConfigObjConfig{ - { - Source: "appconfig", - Mode: uint32Ptr(555), - }, - }, - Secrets: []types.ServiceSecretConfig{ - { - Source: "super", - Mode: uint32Ptr(555), - }, - }, - HealthCheck: &types.HealthCheckConfig{ - Retries: uint64Ptr(555), - Disable: true, - }, - Deploy: types.DeployConfig{ - Replicas: uint64Ptr(555), - UpdateConfig: &types.UpdateConfig{ - Parallelism: uint64Ptr(555), - MaxFailureRatio: 3.14, - }, - RollbackConfig: &types.UpdateConfig{ - Parallelism: uint64Ptr(555), - MaxFailureRatio: 3.14, - }, - RestartPolicy: &types.RestartPolicy{ - MaxAttempts: uint64Ptr(555), - }, - Placement: types.Placement{ - MaxReplicas: 555, - }, - }, - Ports: []types.ServicePortConfig{ - {Target: 555, Mode: "ingress", Protocol: "tcp"}, - {Target: 34567, Mode: "ingress", Protocol: "tcp"}, - {Target: 555, Published: 555}, - }, - Ulimits: map[string]*types.UlimitsConfig{ - "nproc": {Single: 555}, - "nofile": {Hard: 555, Soft: 555}, - }, - Privileged: true, - ReadOnly: true, - StdinOpen: true, - Tty: true, - Volumes: []types.ServiceVolumeConfig{ - { - Source: "data", - Type: "volume", - ReadOnly: true, - Volume: &types.ServiceVolumeVolume{NoCopy: true}, - }, - }, - Environment: types.MappingWithEquals{}, - }, - }, - Configs: map[string]types.ConfigObjConfig{ - "appconfig": {External: types.External{External: true}, Name: "appconfig"}, - }, - Secrets: map[string]types.SecretConfig{ - "super": {External: types.External{External: true}, Name: "super"}, - }, - Volumes: map[string]types.VolumeConfig{ - "data": {External: types.External{External: true}, Name: "data"}, - }, - Networks: map[string]types.NetworkConfig{ - "front": { - External: types.External{External: true}, - Name: "front", - Internal: true, - Attachable: true, - }, - }, - } - - assert.Check(t, is.DeepEqual(expected, config)) -} - func TestUnsupportedProperties(t *testing.T) { - dict, err := ParseYAML([]byte(` + config := ` version: "3" +name: unsupported-properties services: web: image: web build: context: ./web links: - - bar + - db pid: host db: image: db build: context: ./db -`)) - assert.NilError(t, err) +` - configDetails := buildConfigDetails(dict, nil) + configDetails := buildConfigDetailsFromYAML(config, nil) - _, err = Load(configDetails) + project, err := Load(configDetails) assert.NilError(t, err) - unsupported := GetUnsupportedProperties(dict) + unsupported := GetUnsupportedProperties(project.Services) assert.Check(t, is.DeepEqual([]string{"build", "links", "pid"}, unsupported)) } -func TestDiscardEnvFileOption(t *testing.T) { - dict, err := ParseYAML([]byte(`version: "3" -services: - web: - image: nginx - env_file: - - example1.env - - example2.env -`)) - expectedEnvironmentMap := types.MappingWithEquals{ - "FOO": strPtr("foo_from_env_file"), - "BAZ": strPtr("baz_from_env_file"), - "BAR": strPtr("bar_from_env_file_2"), // Original value is overwritten by example2.env - "QUX": strPtr("quz_from_env_file_2"), - } - assert.NilError(t, err) - configDetails := buildConfigDetails(dict, nil) - - // Default behavior keeps the `env_file` entries - configWithEnvFiles, err := Load(configDetails) - assert.NilError(t, err) - expected := types.StringList{"example1.env", "example2.env"} - assert.Check(t, is.DeepEqual(expected, configWithEnvFiles.Services[0].EnvFile)) - assert.Check(t, is.DeepEqual(expectedEnvironmentMap, configWithEnvFiles.Services[0].Environment)) - - // Custom behavior removes the `env_file` entries - configWithoutEnvFiles, err := Load(configDetails, WithDiscardEnvFiles) - assert.NilError(t, err) - expected = types.StringList(nil) - assert.Check(t, is.DeepEqual(expected, configWithoutEnvFiles.Services[0].EnvFile)) - assert.Check(t, is.DeepEqual(expectedEnvironmentMap, configWithoutEnvFiles.Services[0].Environment)) -} - -func TestBuildProperties(t *testing.T) { - dict, err := ParseYAML([]byte(` -version: "3" -services: - web: - image: web - build: . - links: - - bar - db: - image: db - build: - context: ./db -`)) - assert.NilError(t, err) - configDetails := buildConfigDetails(dict, nil) - _, err = Load(configDetails) - assert.NilError(t, err) -} - func TestDeprecatedProperties(t *testing.T) { - dict, err := ParseYAML([]byte(` + config := ` version: "3" +name: deprecated-properties services: web: image: web @@ -850,15 +96,14 @@ services: image: db container_name: db expose: ["5434"] -`)) - assert.NilError(t, err) +` - configDetails := buildConfigDetails(dict, nil) + configDetails := buildConfigDetailsFromYAML(config, nil) - _, err = Load(configDetails) + project, err := Load(configDetails) assert.NilError(t, err) - deprecated := GetDeprecatedProperties(dict) + deprecated := GetDeprecatedProperties(project.Services) assert.Check(t, is.Len(deprecated, 2)) assert.Check(t, is.Contains(deprecated, "container_name")) assert.Check(t, is.Contains(deprecated, "expose")) @@ -867,962 +112,26 @@ services: func TestForbiddenProperties(t *testing.T) { _, err := loadYAML(` version: "3" +name: forbidden-properties services: foo: image: busybox volumes: - /data - volume_driver: some-driver + volumes_from: + - bar bar: extends: - service: foo + service: quick + quick: + image: busybox + cpu_quota: 50000 `) assert.ErrorType(t, err, &ForbiddenPropertiesError{}) props := err.(*ForbiddenPropertiesError).Properties assert.Check(t, is.Len(props, 2)) - assert.Check(t, is.Contains(props, "volume_driver")) - assert.Check(t, is.Contains(props, "extends")) -} - -func TestInvalidResource(t *testing.T) { - _, err := loadYAML(` - version: "3" - services: - foo: - image: busybox - deploy: - resources: - impossible: - x: 1 -`) - assert.Check(t, is.ErrorContains(err, "Additional property impossible is not allowed")) -} - -func TestInvalidExternalAndDriverCombination(t *testing.T) { - _, err := loadYAML(` -version: "3" -volumes: - external_volume: - external: true - driver: foobar -`) - - assert.Check(t, is.ErrorContains(err, `conflicting parameters "external" and "driver" specified for volume`)) - assert.Check(t, is.ErrorContains(err, `external_volume`)) -} - -func TestInvalidExternalAndDirverOptsCombination(t *testing.T) { - _, err := loadYAML(` -version: "3" -volumes: - external_volume: - external: true - driver_opts: - beep: boop -`) - - assert.Check(t, is.ErrorContains(err, `conflicting parameters "external" and "driver_opts" specified for volume`)) - assert.Check(t, is.ErrorContains(err, `external_volume`)) -} - -func TestInvalidExternalAndLabelsCombination(t *testing.T) { - _, err := loadYAML(` -version: "3" -volumes: - external_volume: - external: true - labels: - - beep=boop -`) - - assert.Check(t, is.ErrorContains(err, `conflicting parameters "external" and "labels" specified for volume`)) - assert.Check(t, is.ErrorContains(err, `external_volume`)) -} - -func TestLoadVolumeInvalidExternalNameAndNameCombination(t *testing.T) { - _, err := loadYAML(` -version: "3.4" -volumes: - external_volume: - name: user_specified_name - external: - name: external_name -`) - - assert.Check(t, is.ErrorContains(err, "volume.external.name and volume.name conflict; only use volume.name")) - assert.Check(t, is.ErrorContains(err, `external_volume`)) -} - -func durationPtr(value time.Duration) *types.Duration { - result := types.Duration(value) - return &result -} - -func uint64Ptr(value uint64) *uint64 { - return &value -} - -func uint32Ptr(value uint32) *uint32 { - return &value -} - -func TestFullExample(t *testing.T) { - skip.If(t, runtime.GOOS == "windows", "FIXME: TestFullExample substitutes platform-specific HOME-directories and requires platform-specific golden files; see https://github.com/docker/cli/pull/4610") - - data, err := os.ReadFile("full-example.yml") - assert.NilError(t, err) - - homeDir := "/home/foo" - env := map[string]string{"HOME": homeDir, "QUX": "qux_from_environment"} - config, err := loadYAMLWithEnv(string(data), env) - assert.NilError(t, err) - - workingDir, err := os.Getwd() - assert.NilError(t, err) - - expected := fullExampleConfig(workingDir, homeDir) - - assert.Check(t, is.DeepEqual(expected.Services, config.Services)) - assert.Check(t, is.DeepEqual(expected.Networks, config.Networks)) - assert.Check(t, is.DeepEqual(expected.Volumes, config.Volumes)) - assert.Check(t, is.DeepEqual(expected.Secrets, config.Secrets)) - assert.Check(t, is.DeepEqual(expected.Configs, config.Configs)) - assert.Check(t, is.DeepEqual(expected.Extras, config.Extras)) -} - -func TestLoadTmpfsVolume(t *testing.T) { - config, err := loadYAML(` -version: "3.6" -services: - tmpfs: - image: nginx:latest - volumes: - - type: tmpfs - target: /app - tmpfs: - size: 10000 -`) - assert.NilError(t, err) - - expected := types.ServiceVolumeConfig{ - Target: "/app", - Type: "tmpfs", - Tmpfs: &types.ServiceVolumeTmpfs{ - Size: int64(10000), - }, - } - - assert.Assert(t, is.Len(config.Services, 1)) - assert.Check(t, is.Len(config.Services[0].Volumes, 1)) - assert.Check(t, is.DeepEqual(expected, config.Services[0].Volumes[0])) -} - -func TestLoadTmpfsVolumeAdditionalPropertyNotAllowed(t *testing.T) { - _, err := loadYAML(` -version: "3.5" -services: - tmpfs: - image: nginx:latest - volumes: - - type: tmpfs - target: /app - tmpfs: - size: 10000 -`) - assert.Check(t, is.ErrorContains(err, "services.tmpfs.volumes.0 Additional property tmpfs is not allowed")) -} - -func TestLoadBindMountSourceMustNotBeEmpty(t *testing.T) { - _, err := loadYAML(` -version: "3.5" -services: - tmpfs: - image: nginx:latest - volumes: - - type: bind - target: /app -`) - assert.Check(t, is.Error(err, `invalid mount config for type "bind": field Source must not be empty`)) -} - -func TestLoadBindMountSourceIsWindowsAbsolute(t *testing.T) { - tests := []struct { - doc string - yaml string - expected types.ServiceVolumeConfig - }{ - { - doc: "Z-drive lowercase", - yaml: ` -version: '3.3' - -services: - windows: - image: mcr.microsoft.com/windows/servercore/iis:windowsservercore-ltsc2019 - volumes: - - type: bind - source: z:\ - target: c:\data -`, - expected: types.ServiceVolumeConfig{Type: "bind", Source: `z:\`, Target: `c:\data`}, - }, - { - doc: "Z-drive uppercase", - yaml: ` -version: '3.3' - -services: - windows: - image: mcr.microsoft.com/windows/servercore/iis:windowsservercore-ltsc2019 - volumes: - - type: bind - source: Z:\ - target: C:\data -`, - expected: types.ServiceVolumeConfig{Type: "bind", Source: `Z:\`, Target: `C:\data`}, - }, - { - doc: "Z-drive subdirectory", - yaml: ` -version: '3.3' - -services: - windows: - image: mcr.microsoft.com/windows/servercore/iis:windowsservercore-ltsc2019 - volumes: - - type: bind - source: Z:\some-dir - target: C:\data -`, - expected: types.ServiceVolumeConfig{Type: "bind", Source: `Z:\some-dir`, Target: `C:\data`}, - }, - { - doc: "forward-slashes", - yaml: ` -version: '3.3' - -services: - app: - image: app:latest - volumes: - - type: bind - source: /z/some-dir - target: /c/data -`, - expected: types.ServiceVolumeConfig{Type: "bind", Source: `/z/some-dir`, Target: `/c/data`}, - }, - } - - for _, tc := range tests { - t.Run(tc.doc, func(t *testing.T) { - config, err := loadYAML(tc.yaml) - assert.NilError(t, err) - assert.Check(t, is.Len(config.Services[0].Volumes, 1)) - assert.Check(t, is.DeepEqual(tc.expected, config.Services[0].Volumes[0])) - }) - } -} - -func TestLoadBindMountWithSource(t *testing.T) { - config, err := loadYAML(` -version: "3.5" -services: - bind: - image: nginx:latest - volumes: - - type: bind - target: /app - source: "." -`) - assert.NilError(t, err) - - workingDir, err := os.Getwd() - assert.NilError(t, err) - - expected := types.ServiceVolumeConfig{ - Type: "bind", - Source: workingDir, - Target: "/app", - } - - assert.Assert(t, is.Len(config.Services, 1)) - assert.Check(t, is.Len(config.Services[0].Volumes, 1)) - assert.Check(t, is.DeepEqual(expected, config.Services[0].Volumes[0])) -} - -func TestLoadTmpfsVolumeSizeCanBeZero(t *testing.T) { - config, err := loadYAML(` -version: "3.6" -services: - tmpfs: - image: nginx:latest - volumes: - - type: tmpfs - target: /app - tmpfs: - size: 0 -`) - assert.NilError(t, err) - - expected := types.ServiceVolumeConfig{ - Target: "/app", - Type: "tmpfs", - Tmpfs: &types.ServiceVolumeTmpfs{}, - } - - assert.Assert(t, is.Len(config.Services, 1)) - assert.Check(t, is.Len(config.Services[0].Volumes, 1)) - assert.Check(t, is.DeepEqual(expected, config.Services[0].Volumes[0])) -} - -func TestLoadTmpfsVolumeSizeMustBeGTEQZero(t *testing.T) { - _, err := loadYAML(` -version: "3.6" -services: - tmpfs: - image: nginx:latest - volumes: - - type: tmpfs - target: /app - tmpfs: - size: -1 -`) - assert.ErrorContains(t, err, "services.tmpfs.volumes.0.tmpfs.size Must be greater than or equal to 0") -} - -func TestLoadTmpfsVolumeSizeMustBeInteger(t *testing.T) { - _, err := loadYAML(` -version: "3.6" -services: - tmpfs: - image: nginx:latest - volumes: - - type: tmpfs - target: /app - tmpfs: - size: 0.0001 -`) - assert.ErrorContains(t, err, "services.tmpfs.volumes.0.tmpfs.size must be a integer") -} - -func serviceSort(services []types.ServiceConfig) []types.ServiceConfig { - sort.Slice(services, func(i, j int) bool { - return services[i].Name < services[j].Name - }) - return services -} - -func TestLoadAttachableNetwork(t *testing.T) { - config, err := loadYAML(` -version: "3.2" -networks: - mynet1: - driver: overlay - attachable: true - mynet2: - driver: bridge -`) - assert.NilError(t, err) - - expected := map[string]types.NetworkConfig{ - "mynet1": { - Driver: "overlay", - Attachable: true, - }, - "mynet2": { - Driver: "bridge", - Attachable: false, - }, - } - - assert.Check(t, is.DeepEqual(expected, config.Networks)) -} - -func TestLoadExpandedPortFormat(t *testing.T) { - config, err := loadYAML(` -version: "3.2" -services: - web: - image: busybox - ports: - - "80-82:8080-8082" - - "90-92:8090-8092/udp" - - "85:8500" - - 8600 - - protocol: udp - target: 53 - published: 10053 - - mode: host - target: 22 - published: 10022 -`) - assert.NilError(t, err) - - expected := samplePortsConfig - assert.Check(t, is.Len(config.Services, 1)) - assert.Check(t, is.DeepEqual(expected, config.Services[0].Ports)) -} - -func TestLoadExpandedMountFormat(t *testing.T) { - config, err := loadYAML(` -version: "3.2" -services: - web: - image: busybox - volumes: - - type: volume - source: foo - target: /target - read_only: true -volumes: - foo: {} -`) - assert.NilError(t, err) - - expected := types.ServiceVolumeConfig{ - Type: "volume", - Source: "foo", - Target: "/target", - ReadOnly: true, - } - - assert.Assert(t, is.Len(config.Services, 1)) - assert.Check(t, is.Len(config.Services[0].Volumes, 1)) - assert.Check(t, is.DeepEqual(expected, config.Services[0].Volumes[0])) -} - -func TestLoadExtraHostsMap(t *testing.T) { - config, err := loadYAML(` -version: "3.2" -services: - web: - image: busybox - extra_hosts: - "zulu": "162.242.195.82" - "alpha": "50.31.209.229" - "beta": "[fd20:f8a7:6e5b::2]" - "host.docker.internal": "host-gateway" -`) - assert.NilError(t, err) - - expected := types.HostsList{ - "alpha:50.31.209.229", - "beta:fd20:f8a7:6e5b::2", - "host.docker.internal:host-gateway", - "zulu:162.242.195.82", - } - - assert.Assert(t, is.Len(config.Services, 1)) - assert.Check(t, is.DeepEqual(expected, config.Services[0].ExtraHosts)) -} - -func TestLoadExtraHostsList(t *testing.T) { - config, err := loadYAML(` -version: "3.2" -services: - web: - image: busybox - extra_hosts: - - "zulu:162.242.195.82" - - "whiskey=162.242.195.83" - - "alpha:50.31.209.229" - - "zulu:ff02::1" - - "whiskey=ff02::2" - - "foxtrot=[ff02::3]" - - "bravo:[ff02::4]" - - "host.docker.internal=host-gateway" - - "noaddress" -`) - assert.NilError(t, err) - - expected := types.HostsList{ - "zulu:162.242.195.82", - "whiskey:162.242.195.83", - "alpha:50.31.209.229", - "zulu:ff02::1", - "whiskey:ff02::2", - "foxtrot:ff02::3", - "bravo:ff02::4", - "host.docker.internal:host-gateway", - } - - assert.Assert(t, is.Len(config.Services, 1)) - assert.Check(t, is.DeepEqual(expected, config.Services[0].ExtraHosts)) -} - -func TestLoadVolumesWarnOnDeprecatedExternalNameVersion34(t *testing.T) { - buf, cleanup := patchLogrus() - defer cleanup() - - source := map[string]any{ - "foo": map[string]any{ - "external": map[string]any{ - "name": "oops", - }, - }, - } - vols, err := LoadVolumes(source, "3.4") - assert.NilError(t, err) - expected := map[string]types.VolumeConfig{ - "foo": { - Name: "oops", - External: types.External{External: true}, - }, - } - assert.Check(t, is.DeepEqual(expected, vols)) - assert.Check(t, is.Contains(buf.String(), "volume.external.name is deprecated")) -} - -func patchLogrus() (*bytes.Buffer, func()) { - buf := new(bytes.Buffer) - out := logrus.StandardLogger().Out - logrus.SetOutput(buf) - return buf, func() { logrus.SetOutput(out) } -} - -func TestLoadVolumesWarnOnDeprecatedExternalNameVersion33(t *testing.T) { - buf, cleanup := patchLogrus() - defer cleanup() - - source := map[string]any{ - "foo": map[string]any{ - "external": map[string]any{ - "name": "oops", - }, - }, - } - vols, err := LoadVolumes(source, "3.3") - assert.NilError(t, err) - expected := map[string]types.VolumeConfig{ - "foo": { - Name: "oops", - External: types.External{External: true}, - }, - } - assert.Check(t, is.DeepEqual(expected, vols)) - assert.Check(t, is.Equal("", buf.String())) -} - -func TestLoadV35(t *testing.T) { - actual, err := loadYAML(` -version: "3.5" -services: - foo: - image: busybox - isolation: process -configs: - foo: - name: fooqux - external: true - bar: - name: barqux - file: ./example1.env -secrets: - foo: - name: fooqux - external: true - bar: - name: barqux - file: ./full-example.yml -`) - assert.NilError(t, err) - assert.Check(t, is.Len(actual.Services, 1)) - assert.Check(t, is.Len(actual.Secrets, 2)) - assert.Check(t, is.Len(actual.Configs, 2)) - assert.Check(t, is.Equal("process", actual.Services[0].Isolation)) -} - -func TestLoadV35InvalidIsolation(t *testing.T) { - // validation should be done only on the daemon side - actual, err := loadYAML(` -version: "3.5" -services: - foo: - image: busybox - isolation: invalid -configs: - super: - external: true -`) - assert.NilError(t, err) - assert.Assert(t, is.Len(actual.Services, 1)) - assert.Check(t, is.Equal("invalid", actual.Services[0].Isolation)) -} - -func TestLoadSecretInvalidExternalNameAndNameCombination(t *testing.T) { - _, err := loadYAML(` -version: "3.5" -secrets: - external_secret: - name: user_specified_name - external: - name: external_name -`) - - assert.Check(t, is.ErrorContains(err, "secret.external.name and secret.name conflict; only use secret.name")) - assert.Check(t, is.ErrorContains(err, "external_secret")) -} - -func TestLoadSecretsWarnOnDeprecatedExternalNameVersion35(t *testing.T) { - buf, cleanup := patchLogrus() - defer cleanup() - - source := map[string]any{ - "foo": map[string]any{ - "external": map[string]any{ - "name": "oops", - }, - }, - } - details := types.ConfigDetails{ - Version: "3.5", - } - s, err := LoadSecrets(source, details) - assert.NilError(t, err) - expected := map[string]types.SecretConfig{ - "foo": { - Name: "oops", - External: types.External{External: true}, - }, - } - assert.Check(t, is.DeepEqual(expected, s)) - assert.Check(t, is.Contains(buf.String(), "secret.external.name is deprecated")) -} - -func TestLoadNetworksWarnOnDeprecatedExternalNameVersion35(t *testing.T) { - buf, cleanup := patchLogrus() - defer cleanup() - - source := map[string]any{ - "foo": map[string]any{ - "external": map[string]any{ - "name": "oops", - }, - }, - } - nws, err := LoadNetworks(source, "3.5") - assert.NilError(t, err) - expected := map[string]types.NetworkConfig{ - "foo": { - Name: "oops", - External: types.External{External: true}, - }, - } - assert.Check(t, is.DeepEqual(expected, nws)) - assert.Check(t, is.Contains(buf.String(), "network.external.name is deprecated")) -} - -func TestLoadNetworksWarnOnDeprecatedExternalNameVersion34(t *testing.T) { - buf, cleanup := patchLogrus() - defer cleanup() - - source := map[string]any{ - "foo": map[string]any{ - "external": map[string]any{ - "name": "oops", - }, - }, - } - networks, err := LoadNetworks(source, "3.4") - assert.NilError(t, err) - expected := map[string]types.NetworkConfig{ - "foo": { - Name: "oops", - External: types.External{External: true}, - }, - } - assert.Check(t, is.DeepEqual(expected, networks)) - assert.Check(t, is.Equal("", buf.String())) -} - -func TestLoadNetworkInvalidExternalNameAndNameCombination(t *testing.T) { - _, err := loadYAML(` -version: "3.5" -networks: - foo: - name: user_specified_name - external: - name: external_name -`) - - assert.Check(t, is.ErrorContains(err, "network.external.name and network.name conflict; only use network.name")) - assert.Check(t, is.ErrorContains(err, "foo")) -} - -func TestLoadNetworkWithName(t *testing.T) { - config, err := loadYAML(` -version: '3.5' -services: - hello-world: - image: redis:alpine - networks: - - network1 - - network3 - -networks: - network1: - name: network2 - network3: -`) - assert.NilError(t, err) - expected := &types.Config{ - Filename: "filename.yml", - Version: "3.5", - Services: types.Services{ - { - Name: "hello-world", - Image: "redis:alpine", - Networks: map[string]*types.ServiceNetworkConfig{ - "network1": nil, - "network3": nil, - }, - }, - }, - Networks: map[string]types.NetworkConfig{ - "network1": {Name: "network2"}, - "network3": {}, - }, - } - assert.Check(t, is.DeepEqual(expected, config, cmpopts.EquateEmpty())) -} - -func TestLoadInit(t *testing.T) { - booleanTrue := true - booleanFalse := false - - testcases := []struct { - doc string - yaml string - init *bool - }{ - { - doc: "no init defined", - yaml: ` -version: '3.7' -services: - foo: - image: alpine`, - }, - { - doc: "has true init", - yaml: ` -version: '3.7' -services: - foo: - image: alpine - init: true`, - init: &booleanTrue, - }, - { - doc: "has false init", - yaml: ` -version: '3.7' -services: - foo: - image: alpine - init: false`, - init: &booleanFalse, - }, - } - for _, testcase := range testcases { - testcase := testcase - t.Run(testcase.doc, func(t *testing.T) { - config, err := loadYAML(testcase.yaml) - assert.NilError(t, err) - assert.Check(t, is.Len(config.Services, 1)) - assert.Check(t, is.DeepEqual(testcase.init, config.Services[0].Init)) - }) - } -} - -func TestLoadSysctls(t *testing.T) { - config, err := loadYAML(` -version: "3.8" -services: - web: - image: busybox - sysctls: - - net.core.somaxconn=1024 - - net.ipv4.tcp_syncookies=0 - - testing.one.one= - - testing.one.two -`) - assert.NilError(t, err) - - expected := types.Mapping{ - "net.core.somaxconn": "1024", - "net.ipv4.tcp_syncookies": "0", - "testing.one.one": "", - "testing.one.two": "", - } - - assert.Assert(t, is.Len(config.Services, 1)) - assert.Check(t, is.DeepEqual(expected, config.Services[0].Sysctls)) - - config, err = loadYAML(` -version: "3.8" -services: - web: - image: busybox - sysctls: - net.core.somaxconn: 1024 - net.ipv4.tcp_syncookies: 0 - testing.one.one: "" - testing.one.two: -`) - assert.NilError(t, err) - - assert.Assert(t, is.Len(config.Services, 1)) - assert.Check(t, is.DeepEqual(expected, config.Services[0].Sysctls)) -} - -func TestTransform(t *testing.T) { - source := []any{ - "80-82:8080-8082", - "90-92:8090-8092/udp", - "85:8500", - 8600, - map[string]any{ - "protocol": "udp", - "target": 53, - "published": 10053, - }, - map[string]any{ - "mode": "host", - "target": 22, - "published": 10022, - }, - } - var ports []types.ServicePortConfig - err := Transform(source, &ports) - assert.NilError(t, err) - - assert.Check(t, is.DeepEqual(samplePortsConfig, ports)) -} - -func TestLoadTemplateDriver(t *testing.T) { - config, err := loadYAML(` -version: '3.8' -services: - hello-world: - image: redis:alpine - secrets: - - secret - configs: - - config - -configs: - config: - name: config - external: true - template_driver: config-driver - -secrets: - secret: - name: secret - external: true - template_driver: secret-driver -`) - assert.NilError(t, err) - expected := &types.Config{ - Filename: "filename.yml", - Version: "3.8", - Services: types.Services{ - { - Name: "hello-world", - Image: "redis:alpine", - Configs: []types.ServiceConfigObjConfig{ - { - Source: "config", - }, - }, - Secrets: []types.ServiceSecretConfig{ - { - Source: "secret", - }, - }, - }, - }, - Configs: map[string]types.ConfigObjConfig{ - "config": { - Name: "config", - External: types.External{External: true}, - TemplateDriver: "config-driver", - }, - }, - Secrets: map[string]types.SecretConfig{ - "secret": { - Name: "secret", - External: types.External{External: true}, - TemplateDriver: "secret-driver", - }, - }, - } - assert.Check(t, is.DeepEqual(expected, config, cmpopts.EquateEmpty())) -} - -func TestLoadSecretDriver(t *testing.T) { - config, err := loadYAML(` -version: '3.8' -services: - hello-world: - image: redis:alpine - secrets: - - secret - configs: - - config - -configs: - config: - name: config - external: true - -secrets: - secret: - name: secret - driver: secret-bucket - driver_opts: - OptionA: value for driver option A - OptionB: value for driver option B -`) - assert.NilError(t, err) - expected := &types.Config{ - Filename: "filename.yml", - Version: "3.8", - Services: types.Services{ - { - Name: "hello-world", - Image: "redis:alpine", - Configs: []types.ServiceConfigObjConfig{ - { - Source: "config", - }, - }, - Secrets: []types.ServiceSecretConfig{ - { - Source: "secret", - }, - }, - }, - }, - Configs: map[string]types.ConfigObjConfig{ - "config": { - Name: "config", - External: types.External{External: true}, - }, - }, - Secrets: map[string]types.SecretConfig{ - "secret": { - Name: "secret", - Driver: "secret-bucket", - DriverOpts: map[string]string{ - "OptionA": "value for driver option A", - "OptionB": "value for driver option B", - }, - }, - }, - } - assert.Check(t, is.DeepEqual(expected, config, cmpopts.EquateEmpty())) + assert.Check(t, is.Contains(props, "volumes_from")) + assert.Check(t, is.Contains(props, "cpu_quota")) } diff --git a/cli/compose/loader/merge.go b/cli/compose/loader/merge.go deleted file mode 100644 index ab33ee61ab44..000000000000 --- a/cli/compose/loader/merge.go +++ /dev/null @@ -1,304 +0,0 @@ -// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.19 - -package loader - -import ( - "reflect" - "sort" - - "dario.cat/mergo" - "github.com/docker/cli/cli/compose/types" - "github.com/pkg/errors" -) - -type specials struct { - m map[reflect.Type]func(dst, src reflect.Value) error -} - -func (s *specials) Transformer(t reflect.Type) func(dst, src reflect.Value) error { - if fn, ok := s.m[t]; ok { - return fn - } - return nil -} - -func merge(configs []*types.Config) (*types.Config, error) { - base := configs[0] - for _, override := range configs[1:] { - var err error - base.Services, err = mergeServices(base.Services, override.Services) - if err != nil { - return base, errors.Wrapf(err, "cannot merge services from %s", override.Filename) - } - base.Volumes, err = mergeVolumes(base.Volumes, override.Volumes) - if err != nil { - return base, errors.Wrapf(err, "cannot merge volumes from %s", override.Filename) - } - base.Networks, err = mergeNetworks(base.Networks, override.Networks) - if err != nil { - return base, errors.Wrapf(err, "cannot merge networks from %s", override.Filename) - } - base.Secrets, err = mergeSecrets(base.Secrets, override.Secrets) - if err != nil { - return base, errors.Wrapf(err, "cannot merge secrets from %s", override.Filename) - } - base.Configs, err = mergeConfigs(base.Configs, override.Configs) - if err != nil { - return base, errors.Wrapf(err, "cannot merge configs from %s", override.Filename) - } - } - return base, nil -} - -func mergeServices(base, override []types.ServiceConfig) ([]types.ServiceConfig, error) { - baseServices := mapByName(base) - overrideServices := mapByName(override) - specials := &specials{ - m: map[reflect.Type]func(dst, src reflect.Value) error{ - reflect.TypeOf(&types.LoggingConfig{}): safelyMerge(mergeLoggingConfig), - reflect.TypeOf([]types.ServicePortConfig{}): mergeSlice(toServicePortConfigsMap, toServicePortConfigsSlice), - reflect.TypeOf([]types.ServiceSecretConfig{}): mergeSlice(toServiceSecretConfigsMap, toServiceSecretConfigsSlice), - reflect.TypeOf([]types.ServiceConfigObjConfig{}): mergeSlice(toServiceConfigObjConfigsMap, toSServiceConfigObjConfigsSlice), - reflect.TypeOf(&types.UlimitsConfig{}): mergeUlimitsConfig, - reflect.TypeOf([]types.ServiceVolumeConfig{}): mergeSlice(toServiceVolumeConfigsMap, toServiceVolumeConfigsSlice), - reflect.TypeOf(types.ShellCommand{}): mergeShellCommand, - reflect.TypeOf(&types.ServiceNetworkConfig{}): mergeServiceNetworkConfig, - reflect.PointerTo(reflect.TypeOf(uint64(1))): mergeUint64, - }, - } - for name, overrideService := range overrideServices { - overrideService := overrideService - if baseService, ok := baseServices[name]; ok { - if err := mergo.Merge(&baseService, &overrideService, mergo.WithAppendSlice, mergo.WithOverride, mergo.WithTransformers(specials)); err != nil { - return base, errors.Wrapf(err, "cannot merge service %s", name) - } - baseServices[name] = baseService - continue - } - baseServices[name] = overrideService - } - services := []types.ServiceConfig{} - for _, baseService := range baseServices { - services = append(services, baseService) - } - sort.Slice(services, func(i, j int) bool { return services[i].Name < services[j].Name }) - return services, nil -} - -func toServiceSecretConfigsMap(s any) (map[any]any, error) { - secrets, ok := s.([]types.ServiceSecretConfig) - if !ok { - return nil, errors.Errorf("not a serviceSecretConfig: %v", s) - } - m := map[any]any{} - for _, secret := range secrets { - m[secret.Source] = secret - } - return m, nil -} - -func toServiceConfigObjConfigsMap(s any) (map[any]any, error) { - secrets, ok := s.([]types.ServiceConfigObjConfig) - if !ok { - return nil, errors.Errorf("not a serviceSecretConfig: %v", s) - } - m := map[any]any{} - for _, secret := range secrets { - m[secret.Source] = secret - } - return m, nil -} - -func toServicePortConfigsMap(s any) (map[any]any, error) { - ports, ok := s.([]types.ServicePortConfig) - if !ok { - return nil, errors.Errorf("not a servicePortConfig slice: %v", s) - } - m := map[any]any{} - for _, p := range ports { - m[p.Published] = p - } - return m, nil -} - -func toServiceVolumeConfigsMap(s any) (map[any]any, error) { - volumes, ok := s.([]types.ServiceVolumeConfig) - if !ok { - return nil, errors.Errorf("not a serviceVolumeConfig slice: %v", s) - } - m := map[any]any{} - for _, v := range volumes { - m[v.Target] = v - } - return m, nil -} - -func toServiceSecretConfigsSlice(dst reflect.Value, m map[any]any) error { - s := []types.ServiceSecretConfig{} - for _, v := range m { - s = append(s, v.(types.ServiceSecretConfig)) - } - sort.Slice(s, func(i, j int) bool { return s[i].Source < s[j].Source }) - dst.Set(reflect.ValueOf(s)) - return nil -} - -func toSServiceConfigObjConfigsSlice(dst reflect.Value, m map[any]any) error { - s := []types.ServiceConfigObjConfig{} - for _, v := range m { - s = append(s, v.(types.ServiceConfigObjConfig)) - } - sort.Slice(s, func(i, j int) bool { return s[i].Source < s[j].Source }) - dst.Set(reflect.ValueOf(s)) - return nil -} - -func toServicePortConfigsSlice(dst reflect.Value, m map[any]any) error { - s := []types.ServicePortConfig{} - for _, v := range m { - s = append(s, v.(types.ServicePortConfig)) - } - sort.Slice(s, func(i, j int) bool { return s[i].Published < s[j].Published }) - dst.Set(reflect.ValueOf(s)) - return nil -} - -func toServiceVolumeConfigsSlice(dst reflect.Value, m map[any]any) error { - s := []types.ServiceVolumeConfig{} - for _, v := range m { - s = append(s, v.(types.ServiceVolumeConfig)) - } - sort.Slice(s, func(i, j int) bool { return s[i].Target < s[j].Target }) - dst.Set(reflect.ValueOf(s)) - return nil -} - -type ( - tomapFn func(s any) (map[any]any, error) - writeValueFromMapFn func(reflect.Value, map[any]any) error -) - -func safelyMerge(mergeFn func(dst, src reflect.Value) error) func(dst, src reflect.Value) error { - return func(dst, src reflect.Value) error { - if src.IsNil() { - return nil - } - if dst.IsNil() { - dst.Set(src) - return nil - } - return mergeFn(dst, src) - } -} - -func mergeSlice(tomap tomapFn, writeValue writeValueFromMapFn) func(dst, src reflect.Value) error { - return func(dst, src reflect.Value) error { - dstMap, err := sliceToMap(tomap, dst) - if err != nil { - return err - } - srcMap, err := sliceToMap(tomap, src) - if err != nil { - return err - } - if err := mergo.Map(&dstMap, srcMap, mergo.WithOverride); err != nil { - return err - } - return writeValue(dst, dstMap) - } -} - -func sliceToMap(tomap tomapFn, v reflect.Value) (map[any]any, error) { - // check if valid - if !v.IsValid() { - return nil, errors.Errorf("invalid value : %+v", v) - } - return tomap(v.Interface()) -} - -func mergeLoggingConfig(dst, src reflect.Value) error { - // Same driver, merging options - if getLoggingDriver(dst.Elem()) == getLoggingDriver(src.Elem()) || - getLoggingDriver(dst.Elem()) == "" || getLoggingDriver(src.Elem()) == "" { - if getLoggingDriver(dst.Elem()) == "" { - dst.Elem().FieldByName("Driver").SetString(getLoggingDriver(src.Elem())) - } - dstOptions := dst.Elem().FieldByName("Options").Interface().(map[string]string) - srcOptions := src.Elem().FieldByName("Options").Interface().(map[string]string) - return mergo.Merge(&dstOptions, srcOptions, mergo.WithOverride) - } - // Different driver, override with src - dst.Set(src) - return nil -} - -//nolint:unparam -func mergeUlimitsConfig(dst, src reflect.Value) error { - if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() { - dst.Elem().Set(src.Elem()) - } - return nil -} - -//nolint:unparam -func mergeShellCommand(dst, src reflect.Value) error { - if src.Len() != 0 { - dst.Set(src) - } - return nil -} - -//nolint:unparam -func mergeServiceNetworkConfig(dst, src reflect.Value) error { - if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() { - dst.Elem().FieldByName("Aliases").Set(src.Elem().FieldByName("Aliases")) - if ipv4 := src.Elem().FieldByName("Ipv4Address").Interface().(string); ipv4 != "" { - dst.Elem().FieldByName("Ipv4Address").SetString(ipv4) - } - if ipv6 := src.Elem().FieldByName("Ipv6Address").Interface().(string); ipv6 != "" { - dst.Elem().FieldByName("Ipv6Address").SetString(ipv6) - } - } - return nil -} - -//nolint:unparam -func mergeUint64(dst, src reflect.Value) error { - if !src.IsNil() { - dst.Elem().Set(src.Elem()) - } - return nil -} - -func getLoggingDriver(v reflect.Value) string { - return v.FieldByName("Driver").String() -} - -func mapByName(services []types.ServiceConfig) map[string]types.ServiceConfig { - m := map[string]types.ServiceConfig{} - for _, service := range services { - m[service.Name] = service - } - return m -} - -func mergeVolumes(base, override map[string]types.VolumeConfig) (map[string]types.VolumeConfig, error) { - err := mergo.Map(&base, &override, mergo.WithOverride) - return base, err -} - -func mergeNetworks(base, override map[string]types.NetworkConfig) (map[string]types.NetworkConfig, error) { - err := mergo.Map(&base, &override, mergo.WithOverride) - return base, err -} - -func mergeSecrets(base, override map[string]types.SecretConfig) (map[string]types.SecretConfig, error) { - err := mergo.Map(&base, &override, mergo.WithOverride) - return base, err -} - -func mergeConfigs(base, override map[string]types.ConfigObjConfig) (map[string]types.ConfigObjConfig, error) { - err := mergo.Map(&base, &override, mergo.WithOverride) - return base, err -} diff --git a/cli/compose/loader/merge_test.go b/cli/compose/loader/merge_test.go deleted file mode 100644 index d0a1637498a7..000000000000 --- a/cli/compose/loader/merge_test.go +++ /dev/null @@ -1,1299 +0,0 @@ -// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.19 - -package loader - -import ( - "reflect" - "testing" - - "dario.cat/mergo" - "github.com/docker/cli/cli/compose/types" - "gotest.tools/v3/assert" -) - -func TestLoadTwoDifferentVersion(t *testing.T) { - configDetails := types.ConfigDetails{ - ConfigFiles: []types.ConfigFile{ - {Filename: "base.yml", Config: map[string]any{ - "version": "3.1", - }}, - {Filename: "override.yml", Config: map[string]any{ - "version": "3.4", - }}, - }, - } - _, err := Load(configDetails) - assert.Error(t, err, "version mismatched between two composefiles : 3.1 and 3.4") -} - -func TestLoadLogging(t *testing.T) { - loggingCases := []struct { - name string - loggingBase map[string]any - loggingOverride map[string]any - expected *types.LoggingConfig - }{ - { - name: "no_override_driver", - loggingBase: map[string]any{ - "logging": map[string]any{ - "driver": "json-file", - "options": map[string]any{ - "frequency": "2000", - "timeout": "23", - }, - }, - }, - loggingOverride: map[string]any{ - "logging": map[string]any{ - "options": map[string]any{ - "timeout": "360", - "pretty-print": "on", - }, - }, - }, - expected: &types.LoggingConfig{ - Driver: "json-file", - Options: map[string]string{ - "frequency": "2000", - "timeout": "360", - "pretty-print": "on", - }, - }, - }, - { - name: "override_driver", - loggingBase: map[string]any{ - "logging": map[string]any{ - "driver": "json-file", - "options": map[string]any{ - "frequency": "2000", - "timeout": "23", - }, - }, - }, - loggingOverride: map[string]any{ - "logging": map[string]any{ - "driver": "syslog", - "options": map[string]any{ - "timeout": "360", - "pretty-print": "on", - }, - }, - }, - expected: &types.LoggingConfig{ - Driver: "syslog", - Options: map[string]string{ - "timeout": "360", - "pretty-print": "on", - }, - }, - }, - { - name: "no_base_driver", - loggingBase: map[string]any{ - "logging": map[string]any{ - "options": map[string]any{ - "frequency": "2000", - "timeout": "23", - }, - }, - }, - loggingOverride: map[string]any{ - "logging": map[string]any{ - "driver": "json-file", - "options": map[string]any{ - "timeout": "360", - "pretty-print": "on", - }, - }, - }, - expected: &types.LoggingConfig{ - Driver: "json-file", - Options: map[string]string{ - "frequency": "2000", - "timeout": "360", - "pretty-print": "on", - }, - }, - }, - { - name: "no_driver", - loggingBase: map[string]any{ - "logging": map[string]any{ - "options": map[string]any{ - "frequency": "2000", - "timeout": "23", - }, - }, - }, - loggingOverride: map[string]any{ - "logging": map[string]any{ - "options": map[string]any{ - "timeout": "360", - "pretty-print": "on", - }, - }, - }, - expected: &types.LoggingConfig{ - Options: map[string]string{ - "frequency": "2000", - "timeout": "360", - "pretty-print": "on", - }, - }, - }, - { - name: "no_override_options", - loggingBase: map[string]any{ - "logging": map[string]any{ - "driver": "json-file", - "options": map[string]any{ - "frequency": "2000", - "timeout": "23", - }, - }, - }, - loggingOverride: map[string]any{ - "logging": map[string]any{ - "driver": "syslog", - }, - }, - expected: &types.LoggingConfig{ - Driver: "syslog", - }, - }, - { - name: "no_base", - loggingBase: map[string]any{}, - loggingOverride: map[string]any{ - "logging": map[string]any{ - "driver": "json-file", - "options": map[string]any{ - "frequency": "2000", - }, - }, - }, - expected: &types.LoggingConfig{ - Driver: "json-file", - Options: map[string]string{ - "frequency": "2000", - }, - }, - }, - } - - for _, tc := range loggingCases { - t.Run(tc.name, func(t *testing.T) { - configDetails := types.ConfigDetails{ - ConfigFiles: []types.ConfigFile{ - { - Filename: "base.yml", - Config: map[string]any{ - "version": "3.4", - "services": map[string]any{ - "foo": tc.loggingBase, - }, - }, - }, - { - Filename: "override.yml", - Config: map[string]any{ - "version": "3.4", - "services": map[string]any{ - "foo": tc.loggingOverride, - }, - }, - }, - }, - } - config, err := Load(configDetails) - assert.NilError(t, err) - assert.DeepEqual(t, &types.Config{ - Filename: "base.yml", - Version: "3.4", - Services: []types.ServiceConfig{ - { - Name: "foo", - Logging: tc.expected, - Environment: types.MappingWithEquals{}, - }, - }, - Networks: map[string]types.NetworkConfig{}, - Volumes: map[string]types.VolumeConfig{}, - Secrets: map[string]types.SecretConfig{}, - Configs: map[string]types.ConfigObjConfig{}, - }, config) - }) - } -} - -func TestLoadMultipleServicePorts(t *testing.T) { - portsCases := []struct { - name string - portBase map[string]any - portOverride map[string]any - expected []types.ServicePortConfig - }{ - { - name: "no_override", - portBase: map[string]any{ - "ports": []any{ - "8080:80", - }, - }, - portOverride: map[string]any{}, - expected: []types.ServicePortConfig{ - { - Mode: "ingress", - Published: 8080, - Target: 80, - Protocol: "tcp", - }, - }, - }, - { - name: "override_different_published", - portBase: map[string]any{ - "ports": []any{ - "8080:80", - }, - }, - portOverride: map[string]any{ - "ports": []any{ - "8081:80", - }, - }, - expected: []types.ServicePortConfig{ - { - Mode: "ingress", - Published: 8080, - Target: 80, - Protocol: "tcp", - }, - { - Mode: "ingress", - Published: 8081, - Target: 80, - Protocol: "tcp", - }, - }, - }, - { - name: "override_same_published", - portBase: map[string]any{ - "ports": []any{ - "8080:80", - }, - }, - portOverride: map[string]any{ - "ports": []any{ - "8080:81", - }, - }, - expected: []types.ServicePortConfig{ - { - Mode: "ingress", - Published: 8080, - Target: 81, - Protocol: "tcp", - }, - }, - }, - } - - for _, tc := range portsCases { - t.Run(tc.name, func(t *testing.T) { - configDetails := types.ConfigDetails{ - ConfigFiles: []types.ConfigFile{ - { - Filename: "base.yml", - Config: map[string]any{ - "version": "3.4", - "services": map[string]any{ - "foo": tc.portBase, - }, - }, - }, - { - Filename: "override.yml", - Config: map[string]any{ - "version": "3.4", - "services": map[string]any{ - "foo": tc.portOverride, - }, - }, - }, - }, - } - config, err := Load(configDetails) - assert.NilError(t, err) - assert.DeepEqual(t, &types.Config{ - Filename: "base.yml", - Version: "3.4", - Services: []types.ServiceConfig{ - { - Name: "foo", - Ports: tc.expected, - Environment: types.MappingWithEquals{}, - }, - }, - Networks: map[string]types.NetworkConfig{}, - Volumes: map[string]types.VolumeConfig{}, - Secrets: map[string]types.SecretConfig{}, - Configs: map[string]types.ConfigObjConfig{}, - }, config) - }) - } -} - -func TestLoadMultipleSecretsConfig(t *testing.T) { - portsCases := []struct { - name string - secretBase map[string]any - secretOverride map[string]any - expected []types.ServiceSecretConfig - }{ - { - name: "no_override", - secretBase: map[string]any{ - "secrets": []any{ - "my_secret", - }, - }, - secretOverride: map[string]any{}, - expected: []types.ServiceSecretConfig{ - { - Source: "my_secret", - }, - }, - }, - { - name: "override_simple", - secretBase: map[string]any{ - "secrets": []any{ - "foo_secret", - }, - }, - secretOverride: map[string]any{ - "secrets": []any{ - "bar_secret", - }, - }, - expected: []types.ServiceSecretConfig{ - { - Source: "bar_secret", - }, - { - Source: "foo_secret", - }, - }, - }, - { - name: "override_same_source", - secretBase: map[string]any{ - "secrets": []any{ - "foo_secret", - map[string]any{ - "source": "bar_secret", - "target": "waw_secret", - }, - }, - }, - secretOverride: map[string]any{ - "secrets": []any{ - map[string]any{ - "source": "bar_secret", - "target": "bof_secret", - }, - map[string]any{ - "source": "baz_secret", - "target": "waw_secret", - }, - }, - }, - expected: []types.ServiceSecretConfig{ - { - Source: "bar_secret", - Target: "bof_secret", - }, - { - Source: "baz_secret", - Target: "waw_secret", - }, - { - Source: "foo_secret", - }, - }, - }, - } - - for _, tc := range portsCases { - t.Run(tc.name, func(t *testing.T) { - configDetails := types.ConfigDetails{ - ConfigFiles: []types.ConfigFile{ - { - Filename: "base.yml", - Config: map[string]any{ - "version": "3.4", - "services": map[string]any{ - "foo": tc.secretBase, - }, - }, - }, - { - Filename: "override.yml", - Config: map[string]any{ - "version": "3.4", - "services": map[string]any{ - "foo": tc.secretOverride, - }, - }, - }, - }, - } - config, err := Load(configDetails) - assert.NilError(t, err) - assert.DeepEqual(t, &types.Config{ - Filename: "base.yml", - Version: "3.4", - Services: []types.ServiceConfig{ - { - Name: "foo", - Secrets: tc.expected, - Environment: types.MappingWithEquals{}, - }, - }, - Networks: map[string]types.NetworkConfig{}, - Volumes: map[string]types.VolumeConfig{}, - Secrets: map[string]types.SecretConfig{}, - Configs: map[string]types.ConfigObjConfig{}, - }, config) - }) - } -} - -func TestLoadMultipleConfigobjsConfig(t *testing.T) { - portsCases := []struct { - name string - configBase map[string]any - configOverride map[string]any - expected []types.ServiceConfigObjConfig - }{ - { - name: "no_override", - configBase: map[string]any{ - "configs": []any{ - "my_config", - }, - }, - configOverride: map[string]any{}, - expected: []types.ServiceConfigObjConfig{ - { - Source: "my_config", - }, - }, - }, - { - name: "override_simple", - configBase: map[string]any{ - "configs": []any{ - "foo_config", - }, - }, - configOverride: map[string]any{ - "configs": []any{ - "bar_config", - }, - }, - expected: []types.ServiceConfigObjConfig{ - { - Source: "bar_config", - }, - { - Source: "foo_config", - }, - }, - }, - { - name: "override_same_source", - configBase: map[string]any{ - "configs": []any{ - "foo_config", - map[string]any{ - "source": "bar_config", - "target": "waw_config", - }, - }, - }, - configOverride: map[string]any{ - "configs": []any{ - map[string]any{ - "source": "bar_config", - "target": "bof_config", - }, - map[string]any{ - "source": "baz_config", - "target": "waw_config", - }, - }, - }, - expected: []types.ServiceConfigObjConfig{ - { - Source: "bar_config", - Target: "bof_config", - }, - { - Source: "baz_config", - Target: "waw_config", - }, - { - Source: "foo_config", - }, - }, - }, - } - - for _, tc := range portsCases { - t.Run(tc.name, func(t *testing.T) { - configDetails := types.ConfigDetails{ - ConfigFiles: []types.ConfigFile{ - { - Filename: "base.yml", - Config: map[string]any{ - "version": "3.4", - "services": map[string]any{ - "foo": tc.configBase, - }, - }, - }, - { - Filename: "override.yml", - Config: map[string]any{ - "version": "3.4", - "services": map[string]any{ - "foo": tc.configOverride, - }, - }, - }, - }, - } - config, err := Load(configDetails) - assert.NilError(t, err) - assert.DeepEqual(t, &types.Config{ - Filename: "base.yml", - Version: "3.4", - Services: []types.ServiceConfig{ - { - Name: "foo", - Configs: tc.expected, - Environment: types.MappingWithEquals{}, - }, - }, - Networks: map[string]types.NetworkConfig{}, - Volumes: map[string]types.VolumeConfig{}, - Secrets: map[string]types.SecretConfig{}, - Configs: map[string]types.ConfigObjConfig{}, - }, config) - }) - } -} - -func TestLoadMultipleUlimits(t *testing.T) { - ulimitCases := []struct { - name string - ulimitBase map[string]any - ulimitOverride map[string]any - expected map[string]*types.UlimitsConfig - }{ - { - name: "no_override", - ulimitBase: map[string]any{ - "ulimits": map[string]any{ - "noproc": 65535, - }, - }, - ulimitOverride: map[string]any{}, - expected: map[string]*types.UlimitsConfig{ - "noproc": { - Single: 65535, - }, - }, - }, - { - name: "override_simple", - ulimitBase: map[string]any{ - "ulimits": map[string]any{ - "noproc": 65535, - }, - }, - ulimitOverride: map[string]any{ - "ulimits": map[string]any{ - "noproc": 44444, - }, - }, - expected: map[string]*types.UlimitsConfig{ - "noproc": { - Single: 44444, - }, - }, - }, - { - name: "override_different_notation", - ulimitBase: map[string]any{ - "ulimits": map[string]any{ - "nofile": map[string]any{ - "soft": 11111, - "hard": 99999, - }, - "noproc": 44444, - }, - }, - ulimitOverride: map[string]any{ - "ulimits": map[string]any{ - "nofile": 55555, - "noproc": map[string]any{ - "soft": 22222, - "hard": 33333, - }, - }, - }, - expected: map[string]*types.UlimitsConfig{ - "noproc": { - Soft: 22222, - Hard: 33333, - }, - "nofile": { - Single: 55555, - }, - }, - }, - } - - for _, tc := range ulimitCases { - t.Run(tc.name, func(t *testing.T) { - configDetails := types.ConfigDetails{ - ConfigFiles: []types.ConfigFile{ - { - Filename: "base.yml", - Config: map[string]any{ - "version": "3.4", - "services": map[string]any{ - "foo": tc.ulimitBase, - }, - }, - }, - { - Filename: "override.yml", - Config: map[string]any{ - "version": "3.4", - "services": map[string]any{ - "foo": tc.ulimitOverride, - }, - }, - }, - }, - } - config, err := Load(configDetails) - assert.NilError(t, err) - assert.DeepEqual(t, &types.Config{ - Filename: "base.yml", - Version: "3.4", - Services: []types.ServiceConfig{ - { - Name: "foo", - Ulimits: tc.expected, - Environment: types.MappingWithEquals{}, - }, - }, - Networks: map[string]types.NetworkConfig{}, - Volumes: map[string]types.VolumeConfig{}, - Secrets: map[string]types.SecretConfig{}, - Configs: map[string]types.ConfigObjConfig{}, - }, config) - }) - } -} - -func TestLoadMultipleServiceNetworks(t *testing.T) { - networkCases := []struct { - name string - networkBase map[string]any - networkOverride map[string]any - expected map[string]*types.ServiceNetworkConfig - }{ - { - name: "no_override", - networkBase: map[string]any{ - "networks": []any{ - "net1", - "net2", - }, - }, - networkOverride: map[string]any{}, - expected: map[string]*types.ServiceNetworkConfig{ - "net1": nil, - "net2": nil, - }, - }, - { - name: "override_simple", - networkBase: map[string]any{ - "networks": []any{ - "net1", - "net2", - }, - }, - networkOverride: map[string]any{ - "networks": []any{ - "net1", - "net3", - }, - }, - expected: map[string]*types.ServiceNetworkConfig{ - "net1": nil, - "net2": nil, - "net3": nil, - }, - }, - { - name: "override_with_aliases", - networkBase: map[string]any{ - "networks": map[string]any{ - "net1": map[string]any{ - "aliases": []any{ - "alias1", - }, - }, - "net2": nil, - }, - }, - networkOverride: map[string]any{ - "networks": map[string]any{ - "net1": map[string]any{ - "aliases": []any{ - "alias2", - "alias3", - }, - }, - "net3": map[string]any{}, - }, - }, - expected: map[string]*types.ServiceNetworkConfig{ - "net1": { - Aliases: []string{"alias2", "alias3"}, - }, - "net2": nil, - "net3": {}, - }, - }, - } - - for _, tc := range networkCases { - t.Run(tc.name, func(t *testing.T) { - configDetails := types.ConfigDetails{ - ConfigFiles: []types.ConfigFile{ - { - Filename: "base.yml", - Config: map[string]any{ - "version": "3.4", - "services": map[string]any{ - "foo": tc.networkBase, - }, - }, - }, - { - Filename: "override.yml", - Config: map[string]any{ - "version": "3.4", - "services": map[string]any{ - "foo": tc.networkOverride, - }, - }, - }, - }, - } - config, err := Load(configDetails) - assert.NilError(t, err) - assert.DeepEqual(t, &types.Config{ - Filename: "base.yml", - Version: "3.4", - Services: []types.ServiceConfig{ - { - Name: "foo", - Networks: tc.expected, - Environment: types.MappingWithEquals{}, - }, - }, - Networks: map[string]types.NetworkConfig{}, - Volumes: map[string]types.VolumeConfig{}, - Secrets: map[string]types.SecretConfig{}, - Configs: map[string]types.ConfigObjConfig{}, - }, config) - }) - } -} - -func TestLoadMultipleConfigs(t *testing.T) { - base := map[string]any{ - "version": "3.4", - "services": map[string]any{ - "foo": map[string]any{ - "image": "foo", - "build": map[string]any{ - "context": ".", - "dockerfile": "bar.Dockerfile", - }, - "ports": []any{ - "8080:80", - "9090:90", - }, - "labels": []any{ - "foo=bar", - }, - "cap_add": []any{ - "NET_ADMIN", - }, - }, - }, - "volumes": map[string]any{}, - "networks": map[string]any{}, - "secrets": map[string]any{}, - "configs": map[string]any{}, - } - override := map[string]any{ - "version": "3.4", - "services": map[string]any{ - "foo": map[string]any{ - "image": "baz", - "build": map[string]any{ - "dockerfile": "foo.Dockerfile", - "args": []any{ - "buildno=1", - "password=secret", - }, - }, - "ports": []any{ - map[string]any{ - "target": 81, - "published": 8080, - }, - }, - "labels": map[string]any{ - "foo": "baz", - }, - "cap_add": []any{ - "SYS_ADMIN", - }, - }, - "bar": map[string]any{ - "image": "bar", - }, - }, - "volumes": map[string]any{}, - "networks": map[string]any{}, - "secrets": map[string]any{}, - "configs": map[string]any{}, - } - configDetails := types.ConfigDetails{ - ConfigFiles: []types.ConfigFile{ - {Filename: "base.yml", Config: base}, - {Filename: "override.yml", Config: override}, - }, - } - config, err := Load(configDetails) - assert.NilError(t, err) - assert.DeepEqual(t, &types.Config{ - Filename: "base.yml", - Version: "3.4", - Services: []types.ServiceConfig{ - { - Name: "bar", - Image: "bar", - Environment: types.MappingWithEquals{}, - }, - { - Name: "foo", - Image: "baz", - Build: types.BuildConfig{ - Context: ".", - Dockerfile: "foo.Dockerfile", - Args: types.MappingWithEquals{ - "buildno": strPtr("1"), - "password": strPtr("secret"), - }, - }, - Ports: []types.ServicePortConfig{ - { - Target: 81, - Published: 8080, - }, - { - Mode: "ingress", - Target: 90, - Published: 9090, - Protocol: "tcp", - }, - }, - Labels: types.Labels{ - "foo": "baz", - }, - CapAdd: []string{"NET_ADMIN", "SYS_ADMIN"}, - Environment: types.MappingWithEquals{}, - }, - }, - Networks: map[string]types.NetworkConfig{}, - Volumes: map[string]types.VolumeConfig{}, - Secrets: map[string]types.SecretConfig{}, - Configs: map[string]types.ConfigObjConfig{}, - }, config) -} - -// Issue#972 -func TestLoadMultipleNetworks(t *testing.T) { - base := map[string]any{ - "version": "3.4", - "services": map[string]any{ - "foo": map[string]any{ - "image": "baz", - }, - }, - "volumes": map[string]any{}, - "networks": map[string]any{ - "hostnet": map[string]any{ - "driver": "overlay", - "ipam": map[string]any{ - "driver": "default", - "config": []any{ - map[string]any{ - "subnet": "10.0.0.0/20", - }, - }, - }, - }, - }, - "secrets": map[string]any{}, - "configs": map[string]any{}, - } - override := map[string]any{ - "version": "3.4", - "services": map[string]any{}, - "volumes": map[string]any{}, - "networks": map[string]any{ - "hostnet": map[string]any{ - "external": map[string]any{ - "name": "host", - }, - }, - }, - "secrets": map[string]any{}, - "configs": map[string]any{}, - } - configDetails := types.ConfigDetails{ - ConfigFiles: []types.ConfigFile{ - {Filename: "base.yml", Config: base}, - {Filename: "override.yml", Config: override}, - }, - } - config, err := Load(configDetails) - assert.NilError(t, err) - assert.DeepEqual(t, &types.Config{ - Filename: "base.yml", - Version: "3.4", - Services: []types.ServiceConfig{ - { - Name: "foo", - Image: "baz", - Environment: types.MappingWithEquals{}, - }, - }, - Networks: map[string]types.NetworkConfig{ - "hostnet": { - Name: "host", - External: types.External{ - External: true, - }, - }, - }, - Volumes: map[string]types.VolumeConfig{}, - Secrets: map[string]types.SecretConfig{}, - Configs: map[string]types.ConfigObjConfig{}, - }, config) -} - -func TestLoadMultipleServiceCommands(t *testing.T) { - base := map[string]any{ - "version": "3.7", - "services": map[string]any{ - "foo": map[string]any{ - "image": "baz", - "command": "foo bar", - }, - }, - "volumes": map[string]any{}, - "networks": map[string]any{}, - "secrets": map[string]any{}, - "configs": map[string]any{}, - } - override := map[string]any{ - "version": "3.7", - "services": map[string]any{ - "foo": map[string]any{ - "image": "baz", - "command": "foo baz", - }, - }, - "volumes": map[string]any{}, - "networks": map[string]any{}, - "secrets": map[string]any{}, - "configs": map[string]any{}, - } - configDetails := types.ConfigDetails{ - ConfigFiles: []types.ConfigFile{ - {Filename: "base.yml", Config: base}, - {Filename: "override.yml", Config: override}, - }, - } - config, err := Load(configDetails) - assert.NilError(t, err) - assert.DeepEqual(t, &types.Config{ - Filename: "base.yml", - Version: "3.7", - Services: []types.ServiceConfig{ - { - Name: "foo", - Image: "baz", - Command: types.ShellCommand{"foo", "baz"}, - Environment: types.MappingWithEquals{}, - }, - }, - Volumes: map[string]types.VolumeConfig{}, - Secrets: map[string]types.SecretConfig{}, - Configs: map[string]types.ConfigObjConfig{}, - Networks: map[string]types.NetworkConfig{}, - }, config) -} - -func TestLoadMultipleServiceVolumes(t *testing.T) { - base := map[string]any{ - "version": "3.7", - "services": map[string]any{ - "foo": map[string]any{ - "image": "baz", - "volumes": []any{ - map[string]any{ - "type": "volume", - "source": "sourceVolume", - "target": "/var/app", - }, - }, - }, - }, - "volumes": map[string]any{ - "sourceVolume": map[string]any{}, - }, - "networks": map[string]any{}, - "secrets": map[string]any{}, - "configs": map[string]any{}, - } - override := map[string]any{ - "version": "3.7", - "services": map[string]any{ - "foo": map[string]any{ - "image": "baz", - "volumes": []any{ - map[string]any{ - "type": "volume", - "source": "/local", - "target": "/var/app", - }, - }, - }, - }, - "volumes": map[string]any{}, - "networks": map[string]any{}, - "secrets": map[string]any{}, - "configs": map[string]any{}, - } - configDetails := types.ConfigDetails{ - ConfigFiles: []types.ConfigFile{ - {Filename: "base.yml", Config: base}, - {Filename: "override.yml", Config: override}, - }, - } - config, err := Load(configDetails) - assert.NilError(t, err) - assert.DeepEqual(t, &types.Config{ - Filename: "base.yml", - Version: "3.7", - Services: []types.ServiceConfig{ - { - Name: "foo", - Image: "baz", - Environment: types.MappingWithEquals{}, - Volumes: []types.ServiceVolumeConfig{ - { - Type: "volume", - Source: "/local", - Target: "/var/app", - }, - }, - }, - }, - Volumes: map[string]types.VolumeConfig{ - "sourceVolume": {}, - }, - Secrets: map[string]types.SecretConfig{}, - Configs: map[string]types.ConfigObjConfig{}, - Networks: map[string]types.NetworkConfig{}, - }, config) -} - -func TestMergeUlimitsConfig(t *testing.T) { - specials := &specials{ - m: map[reflect.Type]func(dst, src reflect.Value) error{ - reflect.TypeOf(&types.UlimitsConfig{}): mergeUlimitsConfig, - }, - } - base := map[string]*types.UlimitsConfig{ - "override-single": {Single: 100}, - "override-single-with-soft-hard": {Single: 200}, - "override-soft-hard": {Soft: 300, Hard: 301}, - "override-soft-hard-with-single": {Soft: 400, Hard: 401}, - "dont-override": {Single: 500}, - } - override := map[string]*types.UlimitsConfig{ - "override-single": {Single: 110}, - "override-single-with-soft-hard": {Soft: 210, Hard: 211}, - "override-soft-hard": {Soft: 310, Hard: 311}, - "override-soft-hard-with-single": {Single: 410}, - "add": {Single: 610}, - } - err := mergo.Merge(&base, &override, mergo.WithOverride, mergo.WithTransformers(specials)) - assert.NilError(t, err) - assert.DeepEqual( - t, - base, - map[string]*types.UlimitsConfig{ - "override-single": {Single: 110}, - "override-single-with-soft-hard": {Soft: 210, Hard: 211}, - "override-soft-hard": {Soft: 310, Hard: 311}, - "override-soft-hard-with-single": {Single: 410}, - "dont-override": {Single: 500}, - "add": {Single: 610}, - }, - ) -} - -func TestMergeServiceNetworkConfig(t *testing.T) { - specials := &specials{ - m: map[reflect.Type]func(dst, src reflect.Value) error{ - reflect.TypeOf(&types.ServiceNetworkConfig{}): mergeServiceNetworkConfig, - }, - } - base := map[string]*types.ServiceNetworkConfig{ - "override-aliases": { - Aliases: []string{"100", "101"}, - Ipv4Address: "127.0.0.1", - Ipv6Address: "0:0:0:0:0:0:0:1", - }, - "dont-override": { - Aliases: []string{"200", "201"}, - Ipv4Address: "127.0.0.2", - Ipv6Address: "0:0:0:0:0:0:0:2", - }, - } - override := map[string]*types.ServiceNetworkConfig{ - "override-aliases": { - Aliases: []string{"110", "111"}, - Ipv4Address: "127.0.1.1", - Ipv6Address: "0:0:0:0:0:0:1:1", - }, - "add": { - Aliases: []string{"310", "311"}, - Ipv4Address: "127.0.3.1", - Ipv6Address: "0:0:0:0:0:0:3:1", - }, - } - err := mergo.Merge(&base, &override, mergo.WithOverride, mergo.WithTransformers(specials)) - assert.NilError(t, err) - assert.DeepEqual( - t, - base, - map[string]*types.ServiceNetworkConfig{ - "override-aliases": { - Aliases: []string{"110", "111"}, - Ipv4Address: "127.0.1.1", - Ipv6Address: "0:0:0:0:0:0:1:1", - }, - "dont-override": { - Aliases: []string{"200", "201"}, - Ipv4Address: "127.0.0.2", - Ipv6Address: "0:0:0:0:0:0:0:2", - }, - "add": { - Aliases: []string{"310", "311"}, - Ipv4Address: "127.0.3.1", - Ipv6Address: "0:0:0:0:0:0:3:1", - }, - }, - ) -} - -// issue #3293 -func TestMergeServiceOverrideReplicasZero(t *testing.T) { - base := types.ServiceConfig{ - Name: "someService", - Deploy: types.DeployConfig{ - Replicas: uint64Ptr(3), - }, - } - override := types.ServiceConfig{ - Name: "someService", - Deploy: types.DeployConfig{ - Replicas: uint64Ptr(0), - }, - } - services, err := mergeServices([]types.ServiceConfig{base}, []types.ServiceConfig{override}) - assert.NilError(t, err) - assert.Equal(t, len(services), 1) - actual := services[0] - assert.DeepEqual( - t, - actual, - types.ServiceConfig{ - Name: "someService", - Deploy: types.DeployConfig{ - Replicas: uint64Ptr(0), - }, - }, - ) -} - -func TestMergeServiceOverrideReplicasNotNil(t *testing.T) { - base := types.ServiceConfig{ - Name: "someService", - Deploy: types.DeployConfig{ - Replicas: uint64Ptr(3), - }, - } - override := types.ServiceConfig{ - Name: "someService", - Deploy: types.DeployConfig{}, - } - services, err := mergeServices([]types.ServiceConfig{base}, []types.ServiceConfig{override}) - assert.NilError(t, err) - assert.Equal(t, len(services), 1) - actual := services[0] - assert.DeepEqual( - t, - actual, - types.ServiceConfig{ - Name: "someService", - Deploy: types.DeployConfig{ - Replicas: uint64Ptr(3), - }, - }, - ) -} diff --git a/cli/compose/loader/testdata/full-example.json.golden b/cli/compose/loader/testdata/full-example.json.golden index 13c82de2fa59..24e797f5ac2a 100644 --- a/cli/compose/loader/testdata/full-example.json.golden +++ b/cli/compose/loader/testdata/full-example.json.golden @@ -2,7 +2,6 @@ "configs": { "config1": { "file": "/foo/config_data", - "external": false, "labels": { "foo": "bar" } @@ -17,10 +16,10 @@ }, "config4": { "name": "foo", - "file": "/foo", - "external": false + "file": "/foo" } }, + "name": "full-example", "networks": { "external-network": { "name": "external-network", @@ -49,20 +48,17 @@ } ] }, - "external": false, "labels": { "foo": "bar" } }, "some-network": { - "ipam": {}, - "external": false + "ipam": {} } }, "secrets": { "secret1": { "file": "/foo/secret_data", - "external": false, "labels": { "foo": "bar" } @@ -77,8 +73,7 @@ }, "secret4": { "name": "bar", - "file": "/foo", - "external": false + "file": "/foo" } }, "services": { @@ -97,8 +92,8 @@ "bar" ], "extra_hosts": [ - "ipv4.example.com:127.0.0.1", - "ipv6.example.com:::1" + "ipv4.example.com=127.0.0.1", + "ipv6.example.com=::1" ], "network": "foo", "target": "foo" @@ -131,11 +126,16 @@ } ], "container_name": "my-web-container", - "credential_spec": {}, - "depends_on": [ - "db", - "redis" - ], + "depends_on": { + "db": { + "condition": "service_started", + "required": true + }, + "redis": { + "condition": "service_started", + "required": true + } + }, "deploy": { "mode": "replicated", "replicas": 6, @@ -226,8 +226,11 @@ "QUX": "qux_from_environment" }, "env_file": [ - "./example1.env", - "./example2.env" + "/foo/example1.env", + { + "path": "/foo/example2.env", + "required": false + } ], "expose": [ "3000", @@ -239,9 +242,9 @@ "project_db_1:postgresql" ], "extra_hosts": [ - "somehost:162.242.195.82", - "otherhost:50.31.209.229", - "host.docker.internal:host-gateway" + "host.docker.internal=host-gateway", + "otherhost=50.31.209.229", + "somehost=162.242.195.82" ], "hostname": "foo", "healthcheck": { @@ -323,97 +326,97 @@ { "mode": "ingress", "target": 8000, - "published": 8000, + "published": "8000", "protocol": "tcp" }, { "mode": "ingress", "target": 8080, - "published": 9090, + "published": "9090", "protocol": "tcp" }, { "mode": "ingress", "target": 8081, - "published": 9091, + "published": "9091", "protocol": "tcp" }, { "mode": "ingress", "target": 22, - "published": 49100, + "published": "49100", "protocol": "tcp" }, { "mode": "ingress", "target": 8001, - "published": 8001, + "published": "8001", "protocol": "tcp" }, { "mode": "ingress", "target": 5000, - "published": 5000, + "published": "5000", "protocol": "tcp" }, { "mode": "ingress", "target": 5001, - "published": 5001, + "published": "5001", "protocol": "tcp" }, { "mode": "ingress", "target": 5002, - "published": 5002, + "published": "5002", "protocol": "tcp" }, { "mode": "ingress", "target": 5003, - "published": 5003, + "published": "5003", "protocol": "tcp" }, { "mode": "ingress", "target": 5004, - "published": 5004, + "published": "5004", "protocol": "tcp" }, { "mode": "ingress", "target": 5005, - "published": 5005, + "published": "5005", "protocol": "tcp" }, { "mode": "ingress", "target": 5006, - "published": 5006, + "published": "5006", "protocol": "tcp" }, { "mode": "ingress", "target": 5007, - "published": 5007, + "published": "5007", "protocol": "tcp" }, { "mode": "ingress", "target": 5008, - "published": 5008, + "published": "5008", "protocol": "tcp" }, { "mode": "ingress", "target": 5009, - "published": 5009, + "published": "5009", "protocol": "tcp" }, { "mode": "ingress", "target": 5010, - "published": 5010, + "published": "5010", "protocol": "tcp" } ], @@ -497,7 +500,7 @@ "type": "tmpfs", "target": "/opt", "tmpfs": { - "size": 10000 + "size": "10000" } }, { @@ -509,7 +512,6 @@ "working_dir": "/code" } }, - "version": "3.12", "volumes": { "another-volume": { "name": "user_specified_name", @@ -517,59 +519,10 @@ "driver_opts": { "baz": "1", "foo": "bar" - }, - "external": false + } }, "cluster-volume": { - "driver": "my-csi-driver", - "external": false, - "x-cluster-spec": { - "group": "mygroup", - "access_mode": { - "scope": "single", - "sharing": "none", - "block_volume": {} - }, - "accessibility_requirements": { - "requisite": [ - { - "segments": { - "region": "R1", - "zone": "Z1" - } - }, - { - "segments": { - "region": "R1", - "zone": "Z2" - } - } - ], - "preferred": [ - { - "segments": { - "region": "R1", - "zone": "Z1" - } - } - ] - }, - "capacity_range": { - "required_bytes": "1073741824", - "limit_bytes": "8589934592" - }, - "secrets": [ - { - "key": "mycsisecret", - "secret": "secret1" - }, - { - "key": "mycsisecret2", - "secret": "secret4" - } - ], - "availability": "active" - } + "driver": "my-csi-driver" }, "external-volume": { "name": "external-volume", @@ -589,14 +542,11 @@ "baz": "1", "foo": "bar" }, - "external": false, "labels": { "foo": "bar" } }, - "some-volume": { - "external": false - } + "some-volume": {} }, "x-bar": "baz", "x-foo": "bar", diff --git a/cli/compose/loader/testdata/full-example.yaml.golden b/cli/compose/loader/testdata/full-example.yaml.golden index 83e2b8342f2e..729de99f5de4 100644 --- a/cli/compose/loader/testdata/full-example.yaml.golden +++ b/cli/compose/loader/testdata/full-example.yaml.golden @@ -1,4 +1,4 @@ -version: "3.12" +name: full-example services: foo: build: @@ -12,8 +12,8 @@ services: - foo - bar extra_hosts: - - ipv4.example.com:127.0.0.1 - - ipv6.example.com:::1 + - ipv4.example.com=127.0.0.1 + - ipv6.example.com=::1 network: foo target: foo cap_add: @@ -37,8 +37,12 @@ services: mode: 288 container_name: my-web-container depends_on: - - db - - redis + db: + condition: service_started + required: true + redis: + condition: service_started + required: true deploy: mode: replicated replicas: 6 @@ -104,8 +108,9 @@ services: FOO: foo_from_env_file QUX: qux_from_environment env_file: - - ./example1.env - - ./example2.env + - /foo/example1.env + - path: /foo/example2.env + required: false expose: - "3000" - "8000" @@ -114,9 +119,9 @@ services: - project_db_1:mysql - project_db_1:postgresql extra_hosts: - - somehost:162.242.195.82 - - otherhost:50.31.209.229 - - host.docker.internal:host-gateway + - host.docker.internal=host-gateway + - otherhost=50.31.209.229 + - somehost=162.242.195.82 hostname: foo healthcheck: test: @@ -174,67 +179,67 @@ services: protocol: tcp - mode: ingress target: 8000 - published: 8000 + published: "8000" protocol: tcp - mode: ingress target: 8080 - published: 9090 + published: "9090" protocol: tcp - mode: ingress target: 8081 - published: 9091 + published: "9091" protocol: tcp - mode: ingress target: 22 - published: 49100 + published: "49100" protocol: tcp - mode: ingress target: 8001 - published: 8001 + published: "8001" protocol: tcp - mode: ingress target: 5000 - published: 5000 + published: "5000" protocol: tcp - mode: ingress target: 5001 - published: 5001 + published: "5001" protocol: tcp - mode: ingress target: 5002 - published: 5002 + published: "5002" protocol: tcp - mode: ingress target: 5003 - published: 5003 + published: "5003" protocol: tcp - mode: ingress target: 5004 - published: 5004 + published: "5004" protocol: tcp - mode: ingress target: 5005 - published: 5005 + published: "5005" protocol: tcp - mode: ingress target: 5006 - published: 5006 + published: "5006" protocol: tcp - mode: ingress target: 5007 - published: 5007 + published: "5007" protocol: tcp - mode: ingress target: 5008 - published: 5008 + published: "5008" protocol: tcp - mode: ingress target: 5009 - published: 5009 + published: "5009" protocol: tcp - mode: ingress target: 5010 - published: 5010 + published: "5010" protocol: tcp privileged: true read_only: true @@ -291,7 +296,7 @@ services: - type: tmpfs target: /opt tmpfs: - size: 10000 + size: "10000" - type: cluster source: group:mygroup target: /srv diff --git a/cli/compose/loader/types_test.go b/cli/compose/loader/types_test.go index a6498a8f2f2a..f550522b8f13 100644 --- a/cli/compose/loader/types_test.go +++ b/cli/compose/loader/types_test.go @@ -5,6 +5,7 @@ import ( "os" "testing" + "github.com/compose-spec/compose-go/v2/loader" yaml "gopkg.in/yaml.v2" "gotest.tools/v3/assert" "gotest.tools/v3/golden" @@ -22,9 +23,12 @@ func TestMarshallConfig(t *testing.T) { // Make sure the expected can be parsed. yamlData, err := os.ReadFile("testdata/full-example.yaml.golden") assert.NilError(t, err) - dict, err := ParseYAML(yamlData) - assert.NilError(t, err) - _, err = Load(buildConfigDetails(dict, map[string]string{})) + _, err = Load(buildConfigDetailsFromYAML(string(yamlData), map[string]string{}), func(options *loader.Options) { + options.SkipConsistencyCheck = true + options.SkipNormalization = true + options.ResolvePaths = false + options.SkipResolveEnvironment = true + }) assert.NilError(t, err) } @@ -38,8 +42,12 @@ func TestJSONMarshallConfig(t *testing.T) { jsonData, err := os.ReadFile("testdata/full-example.json.golden") assert.NilError(t, err) - dict, err := ParseYAML(jsonData) - assert.NilError(t, err) - _, err = Load(buildConfigDetails(dict, map[string]string{})) + _, err = Load(buildConfigDetailsFromYAML(string(jsonData), map[string]string{}), + func(options *loader.Options) { + options.SkipConsistencyCheck = true + options.SkipNormalization = true + options.ResolvePaths = false + options.SkipResolveEnvironment = true + }) assert.NilError(t, err) } diff --git a/cli/compose/loader/volume.go b/cli/compose/loader/volume.go index f043f4aa57ff..c1cbb9cddb69 100644 --- a/cli/compose/loader/volume.go +++ b/cli/compose/loader/volume.go @@ -5,7 +5,7 @@ import ( "unicode" "unicode/utf8" - "github.com/docker/cli/cli/compose/types" + types "github.com/compose-spec/compose-go/v2/types" "github.com/docker/docker/api/types/mount" "github.com/pkg/errors" ) diff --git a/cli/compose/loader/volume_test.go b/cli/compose/loader/volume_test.go index 54dc200fe8f6..12bd71adf38a 100644 --- a/cli/compose/loader/volume_test.go +++ b/cli/compose/loader/volume_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/docker/cli/cli/compose/types" + "github.com/compose-spec/compose-go/v2/types" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" ) diff --git a/cli/compose/template/template.go b/cli/compose/template/template.go deleted file mode 100644 index dd3acaf28787..000000000000 --- a/cli/compose/template/template.go +++ /dev/null @@ -1,248 +0,0 @@ -// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.19 - -package template - -import ( - "fmt" - "regexp" - "strings" -) - -const ( - delimiter = "\\$" - subst = "[_a-z][_a-z0-9]*(?::?[-?][^}]*)?" -) - -var defaultPattern = regexp.MustCompile(fmt.Sprintf( - "%s(?i:(?P%s)|(?P%s)|{(?P%s)}|(?P))", - delimiter, delimiter, subst, subst, -)) - -// DefaultSubstituteFuncs contains the default SubstituteFunc used by the docker cli -var DefaultSubstituteFuncs = []SubstituteFunc{ - softDefault, - hardDefault, - requiredNonEmpty, - required, -} - -// InvalidTemplateError is returned when a variable template is not in a valid -// format -type InvalidTemplateError struct { - Template string -} - -func (e InvalidTemplateError) Error() string { - return fmt.Sprintf("Invalid template: %#v", e.Template) -} - -// Mapping is a user-supplied function which maps from variable names to values. -// Returns the value as a string and a bool indicating whether -// the value is present, to distinguish between an empty string -// and the absence of a value. -type Mapping func(string) (string, bool) - -// SubstituteFunc is a user-supplied function that apply substitution. -// Returns the value as a string, a bool indicating if the function could apply -// the substitution and an error. -type SubstituteFunc func(string, Mapping) (string, bool, error) - -// SubstituteWith subsitute variables in the string with their values. -// It accepts additional substitute function. -func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) { - var err error - result := pattern.ReplaceAllStringFunc(template, func(substring string) string { - matches := pattern.FindStringSubmatch(substring) - groups := matchGroups(matches, pattern) - if escaped := groups["escaped"]; escaped != "" { - return escaped - } - - substitution := groups["named"] - if substitution == "" { - substitution = groups["braced"] - } - - if substitution == "" { - err = &InvalidTemplateError{Template: template} - return "" - } - - for _, f := range subsFuncs { - var ( - value string - applied bool - ) - value, applied, err = f(substitution, mapping) - if err != nil { - return "" - } - if !applied { - continue - } - return value - } - - value, _ := mapping(substitution) - return value - }) - - return result, err -} - -// Substitute variables in the string with their values -func Substitute(template string, mapping Mapping) (string, error) { - return SubstituteWith(template, mapping, defaultPattern, DefaultSubstituteFuncs...) -} - -// ExtractVariables returns a map of all the variables defined in the specified -// composefile (dict representation) and their default value if any. -func ExtractVariables(configDict map[string]any, pattern *regexp.Regexp) map[string]string { - if pattern == nil { - pattern = defaultPattern - } - return recurseExtract(configDict, pattern) -} - -func recurseExtract(value any, pattern *regexp.Regexp) map[string]string { - m := map[string]string{} - - switch value := value.(type) { - case string: - if values, is := extractVariable(value, pattern); is { - for _, v := range values { - m[v.name] = v.value - } - } - case map[string]any: - for _, elem := range value { - submap := recurseExtract(elem, pattern) - for key, value := range submap { - m[key] = value - } - } - - case []any: - for _, elem := range value { - if values, is := extractVariable(elem, pattern); is { - for _, v := range values { - m[v.name] = v.value - } - } - } - } - - return m -} - -type extractedValue struct { - name string - value string -} - -func extractVariable(value any, pattern *regexp.Regexp) ([]extractedValue, bool) { - sValue, ok := value.(string) - if !ok { - return []extractedValue{}, false - } - matches := pattern.FindAllStringSubmatch(sValue, -1) - if len(matches) == 0 { - return []extractedValue{}, false - } - values := []extractedValue{} - for _, match := range matches { - groups := matchGroups(match, pattern) - if escaped := groups["escaped"]; escaped != "" { - continue - } - val := groups["named"] - if val == "" { - val = groups["braced"] - } - name := val - var defaultValue string - switch { - case strings.Contains(val, ":?"): - name, _ = partition(val, ":?") - case strings.Contains(val, "?"): - name, _ = partition(val, "?") - case strings.Contains(val, ":-"): - name, defaultValue = partition(val, ":-") - case strings.Contains(val, "-"): - name, defaultValue = partition(val, "-") - } - values = append(values, extractedValue{name: name, value: defaultValue}) - } - return values, len(values) > 0 -} - -// Soft default (fall back if unset or empty) -func softDefault(substitution string, mapping Mapping) (string, bool, error) { - sep := ":-" - if !strings.Contains(substitution, sep) { - return "", false, nil - } - name, defaultValue := partition(substitution, sep) - value, ok := mapping(name) - if !ok || value == "" { - return defaultValue, true, nil - } - return value, true, nil -} - -// Hard default (fall back if-and-only-if empty) -func hardDefault(substitution string, mapping Mapping) (string, bool, error) { - sep := "-" - if !strings.Contains(substitution, sep) { - return "", false, nil - } - name, defaultValue := partition(substitution, sep) - value, ok := mapping(name) - if !ok { - return defaultValue, true, nil - } - return value, true, nil -} - -func requiredNonEmpty(substitution string, mapping Mapping) (string, bool, error) { - return withRequired(substitution, mapping, ":?", func(v string) bool { return v != "" }) -} - -func required(substitution string, mapping Mapping) (string, bool, error) { - return withRequired(substitution, mapping, "?", func(_ string) bool { return true }) -} - -func withRequired(substitution string, mapping Mapping, sep string, valid func(string) bool) (string, bool, error) { - if !strings.Contains(substitution, sep) { - return "", false, nil - } - name, errorMessage := partition(substitution, sep) - value, ok := mapping(name) - if !ok || !valid(value) { - return "", true, &InvalidTemplateError{ - Template: fmt.Sprintf("required variable %s is missing a value: %s", name, errorMessage), - } - } - return value, true, nil -} - -func matchGroups(matches []string, pattern *regexp.Regexp) map[string]string { - groups := make(map[string]string) - for i, name := range pattern.SubexpNames()[1:] { - groups[name] = matches[i+1] - } - return groups -} - -// Split the string at the first occurrence of sep, and return the part before the separator, -// and the part after the separator. -// -// If the separator is not found, return the string itself, followed by an empty string. -func partition(s, sep string) (string, string) { - k, v, ok := strings.Cut(s, sep) - if !ok { - return s, "" - } - return k, v -} diff --git a/cli/compose/template/template_test.go b/cli/compose/template/template_test.go deleted file mode 100644 index c29a26de559f..000000000000 --- a/cli/compose/template/template_test.go +++ /dev/null @@ -1,286 +0,0 @@ -// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.19 - -package template - -import ( - "fmt" - "testing" - - "gotest.tools/v3/assert" - is "gotest.tools/v3/assert/cmp" -) - -var defaults = map[string]string{ - "FOO": "first", - "BAR": "", -} - -func defaultMapping(name string) (string, bool) { - val, ok := defaults[name] - return val, ok -} - -func TestEscaped(t *testing.T) { - result, err := Substitute("$${foo}", defaultMapping) - assert.NilError(t, err) - assert.Check(t, is.Equal("${foo}", result)) -} - -func TestSubstituteNoMatch(t *testing.T) { - result, err := Substitute("foo", defaultMapping) - assert.NilError(t, err) - assert.Equal(t, "foo", result) -} - -func TestInvalid(t *testing.T) { - invalidTemplates := []string{ - "${", - "$}", - "${}", - "${ }", - "${ foo}", - "${foo }", - "${foo!}", - } - - for _, template := range invalidTemplates { - _, err := Substitute(template, defaultMapping) - assert.ErrorContains(t, err, "Invalid template") - } -} - -func TestNoValueNoDefault(t *testing.T) { - for _, template := range []string{"This ${missing} var", "This ${BAR} var"} { - result, err := Substitute(template, defaultMapping) - assert.NilError(t, err) - assert.Check(t, is.Equal("This var", result)) - } -} - -func TestValueNoDefault(t *testing.T) { - for _, template := range []string{"This $FOO var", "This ${FOO} var"} { - result, err := Substitute(template, defaultMapping) - assert.NilError(t, err) - assert.Check(t, is.Equal("This first var", result)) - } -} - -func TestNoValueWithDefault(t *testing.T) { - for _, template := range []string{"ok ${missing:-def}", "ok ${missing-def}"} { - result, err := Substitute(template, defaultMapping) - assert.NilError(t, err) - assert.Check(t, is.Equal("ok def", result)) - } -} - -func TestEmptyValueWithSoftDefault(t *testing.T) { - result, err := Substitute("ok ${BAR:-def}", defaultMapping) - assert.NilError(t, err) - assert.Check(t, is.Equal("ok def", result)) -} - -func TestValueWithSoftDefault(t *testing.T) { - result, err := Substitute("ok ${FOO:-def}", defaultMapping) - assert.NilError(t, err) - assert.Check(t, is.Equal("ok first", result)) -} - -func TestEmptyValueWithHardDefault(t *testing.T) { - result, err := Substitute("ok ${BAR-def}", defaultMapping) - assert.NilError(t, err) - assert.Check(t, is.Equal("ok ", result)) -} - -func TestNonAlphanumericDefault(t *testing.T) { - result, err := Substitute("ok ${BAR:-/non:-alphanumeric}", defaultMapping) - assert.NilError(t, err) - assert.Check(t, is.Equal("ok /non:-alphanumeric", result)) -} - -func TestMandatoryVariableErrors(t *testing.T) { - testCases := []struct { - template string - expectedError string - }{ - { - template: "not ok ${UNSET_VAR:?Mandatory Variable Unset}", - expectedError: "required variable UNSET_VAR is missing a value: Mandatory Variable Unset", - }, - { - template: "not ok ${BAR:?Mandatory Variable Empty}", - expectedError: "required variable BAR is missing a value: Mandatory Variable Empty", - }, - { - template: "not ok ${UNSET_VAR:?}", - expectedError: "required variable UNSET_VAR is missing a value", - }, - { - template: "not ok ${UNSET_VAR?Mandatory Variable Unset}", - expectedError: "required variable UNSET_VAR is missing a value: Mandatory Variable Unset", - }, - { - template: "not ok ${UNSET_VAR?}", - expectedError: "required variable UNSET_VAR is missing a value", - }, - } - - for _, tc := range testCases { - _, err := Substitute(tc.template, defaultMapping) - assert.Check(t, is.ErrorContains(err, tc.expectedError)) - assert.Check(t, is.ErrorType(err, &InvalidTemplateError{})) - } -} - -func TestDefaultsForMandatoryVariables(t *testing.T) { - testCases := []struct { - template string - expected string - }{ - { - template: "ok ${FOO:?err}", - expected: "ok first", - }, - { - template: "ok ${FOO?err}", - expected: "ok first", - }, - { - template: "ok ${BAR?err}", - expected: "ok ", - }, - } - - for _, tc := range testCases { - result, err := Substitute(tc.template, defaultMapping) - assert.NilError(t, err) - assert.Check(t, is.Equal(tc.expected, result)) - } -} - -func TestSubstituteWithCustomFunc(t *testing.T) { - errIsMissing := func(substitution string, mapping Mapping) (string, bool, error) { - value, found := mapping(substitution) - if !found { - return "", true, &InvalidTemplateError{ - Template: fmt.Sprintf("required variable %s is missing a value", substitution), - } - } - return value, true, nil - } - - result, err := SubstituteWith("ok ${FOO}", defaultMapping, defaultPattern, errIsMissing) - assert.NilError(t, err) - assert.Check(t, is.Equal("ok first", result)) - - result, err = SubstituteWith("ok ${BAR}", defaultMapping, defaultPattern, errIsMissing) - assert.NilError(t, err) - assert.Check(t, is.Equal("ok ", result)) - - _, err = SubstituteWith("ok ${NOTHERE}", defaultMapping, defaultPattern, errIsMissing) - assert.Check(t, is.ErrorContains(err, "required variable")) -} - -func TestExtractVariables(t *testing.T) { - testCases := []struct { - name string - dict map[string]any - expected map[string]string - }{ - { - name: "empty", - dict: map[string]any{}, - expected: map[string]string{}, - }, - { - name: "no-variables", - dict: map[string]any{ - "foo": "bar", - }, - expected: map[string]string{}, - }, - { - name: "variable-without-curly-braces", - dict: map[string]any{ - "foo": "$bar", - }, - expected: map[string]string{ - "bar": "", - }, - }, - { - name: "variable", - dict: map[string]any{ - "foo": "${bar}", - }, - expected: map[string]string{ - "bar": "", - }, - }, - { - name: "required-variable", - dict: map[string]any{ - "foo": "${bar?:foo}", - }, - expected: map[string]string{ - "bar": "", - }, - }, - { - name: "required-variable2", - dict: map[string]any{ - "foo": "${bar?foo}", - }, - expected: map[string]string{ - "bar": "", - }, - }, - { - name: "default-variable", - dict: map[string]any{ - "foo": "${bar:-foo}", - }, - expected: map[string]string{ - "bar": "foo", - }, - }, - { - name: "default-variable2", - dict: map[string]any{ - "foo": "${bar-foo}", - }, - expected: map[string]string{ - "bar": "foo", - }, - }, - { - name: "multiple-values", - dict: map[string]any{ - "foo": "${bar:-foo}", - "bar": map[string]any{ - "foo": "${fruit:-banana}", - "bar": "vegetable", - }, - "baz": []any{ - "foo", - "$docker:${project:-cli}", - "$toto", - }, - }, - expected: map[string]string{ - "bar": "foo", - "fruit": "banana", - "toto": "", - "docker": "", - "project": "cli", - }, - }, - } - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - actual := ExtractVariables(tc.dict, defaultPattern) - assert.Check(t, is.DeepEqual(actual, tc.expected)) - }) - } -} diff --git a/cli/compose/types/types.go b/cli/compose/types/types.go index f6954ff7f0ae..071e6d74ee48 100644 --- a/cli/compose/types/types.go +++ b/cli/compose/types/types.go @@ -4,75 +4,58 @@ package types import ( - "encoding/json" - "fmt" - "strconv" "time" + + compose "github.com/compose-spec/compose-go/v2/types" ) // UnsupportedProperties not yet supported by this implementation of the compose file -var UnsupportedProperties = []string{ - "build", - "cgroupns_mode", - "cgroup_parent", - "devices", - "domainname", - "external_links", - "ipc", - "links", - "mac_address", - "network_mode", - "pid", - "privileged", - "restart", - "security_opt", - "shm_size", - "userns_mode", +var UnsupportedProperties = map[string]string{ + "Build": "build", + // "cgroupns_mode", + "CgroupParent": "cgroup_parent", + "Devices": "devices", + "DomainName": "domainname", + "ExternalLinks": "external_links", + "Ipc": "ipc", + "Links": "links", + "MacAddress": "mac_address", + "NetworkMode": "network_mode", + "Pid": "pid", + "Privileged": "privileged", + "Restart": "restart", + "SecurityOpt": "security_opt", + "ShmSize": "shm_size", + "UserNSMode": "userns_mode", } // DeprecatedProperties that were removed from the v3 format, but their // use should not impact the behaviour of the application. -var DeprecatedProperties = map[string]string{ - "container_name": "Setting the container name is not supported.", - "expose": "Exposing ports is unnecessary - services on the same network can access each other's containers on any port.", +var DeprecatedProperties = map[string]Pair[string, string]{ + "ContainerName": NewPair("container_name", "Setting the container name is not supported."), + "Expose": NewPair("expose", "Exposing ports is unnecessary - services on the same network can access each other's containers on any port."), } // ForbiddenProperties that are not supported in this implementation of the // compose file. -var ForbiddenProperties = map[string]string{ - "extends": "Support for `extends` is not implemented yet.", - "volume_driver": "Instead of setting the volume driver on the service, define a volume using the top-level `volumes` option and specify the driver there.", - "volumes_from": "To share a volume between services, define it using the top-level `volumes` option and reference it from each service that shares it using the service-level `volumes` option.", - "cpu_quota": "Set resource limits using deploy.resources", - "cpu_shares": "Set resource limits using deploy.resources", - "cpuset": "Set resource limits using deploy.resources", - "mem_limit": "Set resource limits using deploy.resources", - "memswap_limit": "Set resource limits using deploy.resources", -} - -// ConfigFile is a filename and the contents of the file as a Dict -type ConfigFile struct { - Filename string - Config map[string]any -} - -// ConfigDetails are the details about a group of ConfigFiles -type ConfigDetails struct { - Version string - WorkingDir string - ConfigFiles []ConfigFile - Environment map[string]string -} - -// Duration is a thin wrapper around time.Duration with improved JSON marshalling -type Duration time.Duration - -func (d Duration) String() string { - return time.Duration(d).String() +var ForbiddenProperties = map[string]Pair[string, string]{ + "VolumeDriver": NewPair( + "volume_driver", + "Instead of setting the volume driver on the service, define a volume using the top-level `volumes` option and specify the driver there.", + ), + "VolumesFrom": NewPair( + "volumes_from", + "To share a volume between services, define it using the top-level `volumes` option and reference it from each service that shares it using the service-level `volumes` option.", + ), + "CPUQuota": NewPair("cpu_quota", "Set resource limits using deploy.resources"), + "CPUShares": NewPair("cpu_shares", "Set resource limits using deploy.resources"), + "CPUSet": NewPair("cpuset", "Set resource limits using deploy.resources"), + "MemLimit": NewPair("mem_limit", "Set resource limits using deploy.resources"), + "MemSwapLimit": NewPair("memswap_limit", "Set resource limits using deploy.resources"), } // ConvertDurationPtr converts a typedefined Duration pointer to a time.Duration pointer with the same value. -func ConvertDurationPtr(d *Duration) *time.Duration { +func ConvertDurationPtr(d *compose.Duration) *time.Duration { if d == nil { return nil } @@ -80,420 +63,6 @@ func ConvertDurationPtr(d *Duration) *time.Duration { return &res } -// MarshalJSON makes Duration implement json.Marshaler -func (d Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(d.String()) -} - -// MarshalYAML makes Duration implement yaml.Marshaler -func (d Duration) MarshalYAML() (any, error) { - return d.String(), nil -} - -// LookupEnv provides a lookup function for environment variables -func (cd ConfigDetails) LookupEnv(key string) (string, bool) { - v, ok := cd.Environment[key] - return v, ok -} - -// Config is a full compose file configuration -type Config struct { - Filename string `yaml:"-" json:"-"` - Version string `json:"version"` - Services Services `json:"services"` - Networks map[string]NetworkConfig `yaml:",omitempty" json:"networks,omitempty"` - Volumes map[string]VolumeConfig `yaml:",omitempty" json:"volumes,omitempty"` - Secrets map[string]SecretConfig `yaml:",omitempty" json:"secrets,omitempty"` - Configs map[string]ConfigObjConfig `yaml:",omitempty" json:"configs,omitempty"` - Extras map[string]any `yaml:",inline" json:"-"` -} - -// MarshalJSON makes Config implement json.Marshaler -func (c Config) MarshalJSON() ([]byte, error) { - m := map[string]any{ - "version": c.Version, - "services": c.Services, - } - - if len(c.Networks) > 0 { - m["networks"] = c.Networks - } - if len(c.Volumes) > 0 { - m["volumes"] = c.Volumes - } - if len(c.Secrets) > 0 { - m["secrets"] = c.Secrets - } - if len(c.Configs) > 0 { - m["configs"] = c.Configs - } - for k, v := range c.Extras { - m[k] = v - } - return json.Marshal(m) -} - -// Services is a list of ServiceConfig -type Services []ServiceConfig - -// MarshalYAML makes Services implement yaml.Marshaller -func (s Services) MarshalYAML() (any, error) { - services := map[string]ServiceConfig{} - for _, service := range s { - services[service.Name] = service - } - return services, nil -} - -// MarshalJSON makes Services implement json.Marshaler -func (s Services) MarshalJSON() ([]byte, error) { - data, err := s.MarshalYAML() - if err != nil { - return nil, err - } - return json.MarshalIndent(data, "", " ") -} - -// ServiceConfig is the configuration of one service -type ServiceConfig struct { - Name string `yaml:"-" json:"-"` - - Build BuildConfig `yaml:",omitempty" json:"build,omitempty"` - CapAdd []string `mapstructure:"cap_add" yaml:"cap_add,omitempty" json:"cap_add,omitempty"` - CapDrop []string `mapstructure:"cap_drop" yaml:"cap_drop,omitempty" json:"cap_drop,omitempty"` - CgroupNSMode string `mapstructure:"cgroupns_mode" yaml:"cgroupns_mode,omitempty" json:"cgroupns_mode,omitempty"` - CgroupParent string `mapstructure:"cgroup_parent" yaml:"cgroup_parent,omitempty" json:"cgroup_parent,omitempty"` - Command ShellCommand `yaml:",omitempty" json:"command,omitempty"` - Configs []ServiceConfigObjConfig `yaml:",omitempty" json:"configs,omitempty"` - ContainerName string `mapstructure:"container_name" yaml:"container_name,omitempty" json:"container_name,omitempty"` - CredentialSpec CredentialSpecConfig `mapstructure:"credential_spec" yaml:"credential_spec,omitempty" json:"credential_spec,omitempty"` - DependsOn []string `mapstructure:"depends_on" yaml:"depends_on,omitempty" json:"depends_on,omitempty"` - Deploy DeployConfig `yaml:",omitempty" json:"deploy,omitempty"` - Devices []string `yaml:",omitempty" json:"devices,omitempty"` - DNS StringList `yaml:",omitempty" json:"dns,omitempty"` - DNSSearch StringList `mapstructure:"dns_search" yaml:"dns_search,omitempty" json:"dns_search,omitempty"` - DomainName string `mapstructure:"domainname" yaml:"domainname,omitempty" json:"domainname,omitempty"` - Entrypoint ShellCommand `yaml:",omitempty" json:"entrypoint,omitempty"` - Environment MappingWithEquals `yaml:",omitempty" json:"environment,omitempty"` - EnvFile StringList `mapstructure:"env_file" yaml:"env_file,omitempty" json:"env_file,omitempty"` - Expose StringOrNumberList `yaml:",omitempty" json:"expose,omitempty"` - ExternalLinks []string `mapstructure:"external_links" yaml:"external_links,omitempty" json:"external_links,omitempty"` - ExtraHosts HostsList `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` - Hostname string `yaml:",omitempty" json:"hostname,omitempty"` - HealthCheck *HealthCheckConfig `yaml:",omitempty" json:"healthcheck,omitempty"` - Image string `yaml:",omitempty" json:"image,omitempty"` - Init *bool `yaml:",omitempty" json:"init,omitempty"` - Ipc string `yaml:",omitempty" json:"ipc,omitempty"` - Isolation string `mapstructure:"isolation" yaml:"isolation,omitempty" json:"isolation,omitempty"` - Labels Labels `yaml:",omitempty" json:"labels,omitempty"` - Links []string `yaml:",omitempty" json:"links,omitempty"` - Logging *LoggingConfig `yaml:",omitempty" json:"logging,omitempty"` - MacAddress string `mapstructure:"mac_address" yaml:"mac_address,omitempty" json:"mac_address,omitempty"` - NetworkMode string `mapstructure:"network_mode" yaml:"network_mode,omitempty" json:"network_mode,omitempty"` - Networks map[string]*ServiceNetworkConfig `yaml:",omitempty" json:"networks,omitempty"` - Pid string `yaml:",omitempty" json:"pid,omitempty"` - Ports []ServicePortConfig `yaml:",omitempty" json:"ports,omitempty"` - Privileged bool `yaml:",omitempty" json:"privileged,omitempty"` - ReadOnly bool `mapstructure:"read_only" yaml:"read_only,omitempty" json:"read_only,omitempty"` - Restart string `yaml:",omitempty" json:"restart,omitempty"` - Secrets []ServiceSecretConfig `yaml:",omitempty" json:"secrets,omitempty"` - SecurityOpt []string `mapstructure:"security_opt" yaml:"security_opt,omitempty" json:"security_opt,omitempty"` - ShmSize string `mapstructure:"shm_size" yaml:"shm_size,omitempty" json:"shm_size,omitempty"` - StdinOpen bool `mapstructure:"stdin_open" yaml:"stdin_open,omitempty" json:"stdin_open,omitempty"` - StopGracePeriod *Duration `mapstructure:"stop_grace_period" yaml:"stop_grace_period,omitempty" json:"stop_grace_period,omitempty"` - StopSignal string `mapstructure:"stop_signal" yaml:"stop_signal,omitempty" json:"stop_signal,omitempty"` - Sysctls Mapping `yaml:",omitempty" json:"sysctls,omitempty"` - Tmpfs StringList `yaml:",omitempty" json:"tmpfs,omitempty"` - Tty bool `mapstructure:"tty" yaml:"tty,omitempty" json:"tty,omitempty"` - Ulimits map[string]*UlimitsConfig `yaml:",omitempty" json:"ulimits,omitempty"` - User string `yaml:",omitempty" json:"user,omitempty"` - UserNSMode string `mapstructure:"userns_mode" yaml:"userns_mode,omitempty" json:"userns_mode,omitempty"` - Volumes []ServiceVolumeConfig `yaml:",omitempty" json:"volumes,omitempty"` - WorkingDir string `mapstructure:"working_dir" yaml:"working_dir,omitempty" json:"working_dir,omitempty"` - - Extras map[string]any `yaml:",inline" json:"-"` -} - -// BuildConfig is a type for build -// using the same format at libcompose: https://github.com/docker/libcompose/blob/master/yaml/build.go#L12 -type BuildConfig struct { - Context string `yaml:",omitempty" json:"context,omitempty"` - Dockerfile string `yaml:",omitempty" json:"dockerfile,omitempty"` - Args MappingWithEquals `yaml:",omitempty" json:"args,omitempty"` - Labels Labels `yaml:",omitempty" json:"labels,omitempty"` - CacheFrom StringList `mapstructure:"cache_from" yaml:"cache_from,omitempty" json:"cache_from,omitempty"` - ExtraHosts HostsList `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` - Network string `yaml:",omitempty" json:"network,omitempty"` - Target string `yaml:",omitempty" json:"target,omitempty"` -} - -// ShellCommand is a string or list of string args -type ShellCommand []string - -// StringList is a type for fields that can be a string or list of strings -type StringList []string - -// StringOrNumberList is a type for fields that can be a list of strings or -// numbers -type StringOrNumberList []string - -// MappingWithEquals is a mapping type that can be converted from a list of -// key[=value] strings. -// For the key with an empty value (`key=`), the mapped value is set to a pointer to `""`. -// For the key without value (`key`), the mapped value is set to nil. -type MappingWithEquals map[string]*string - -// Mapping is a mapping type that can be converted from a list of -// key[=value] strings. -// For the key with an empty value (`key=`), or key without value (`key`), the -// mapped value is set to an empty string `""`. -type Mapping map[string]string - -// Labels is a mapping type for labels -type Labels map[string]string - -// MappingWithColon is a mapping type that can be converted from a list of -// 'key: value' strings -type MappingWithColon map[string]string - -// HostsList is a list of colon-separated host-ip mappings -type HostsList []string - -// LoggingConfig the logging configuration for a service -type LoggingConfig struct { - Driver string `yaml:",omitempty" json:"driver,omitempty"` - Options map[string]string `yaml:",omitempty" json:"options,omitempty"` -} - -// DeployConfig the deployment configuration for a service -type DeployConfig struct { - Mode string `yaml:",omitempty" json:"mode,omitempty"` - Replicas *uint64 `yaml:",omitempty" json:"replicas,omitempty"` - Labels Labels `yaml:",omitempty" json:"labels,omitempty"` - UpdateConfig *UpdateConfig `mapstructure:"update_config" yaml:"update_config,omitempty" json:"update_config,omitempty"` - RollbackConfig *UpdateConfig `mapstructure:"rollback_config" yaml:"rollback_config,omitempty" json:"rollback_config,omitempty"` - Resources Resources `yaml:",omitempty" json:"resources,omitempty"` - RestartPolicy *RestartPolicy `mapstructure:"restart_policy" yaml:"restart_policy,omitempty" json:"restart_policy,omitempty"` - Placement Placement `yaml:",omitempty" json:"placement,omitempty"` - EndpointMode string `mapstructure:"endpoint_mode" yaml:"endpoint_mode,omitempty" json:"endpoint_mode,omitempty"` -} - -// HealthCheckConfig the healthcheck configuration for a service -type HealthCheckConfig struct { - Test HealthCheckTest `yaml:",omitempty" json:"test,omitempty"` - Timeout *Duration `yaml:",omitempty" json:"timeout,omitempty"` - Interval *Duration `yaml:",omitempty" json:"interval,omitempty"` - Retries *uint64 `yaml:",omitempty" json:"retries,omitempty"` - StartPeriod *Duration `mapstructure:"start_period" yaml:"start_period,omitempty" json:"start_period,omitempty"` - StartInterval *Duration `mapstructure:"start_interval" yaml:"start_interval,omitempty" json:"start_interval,omitempty"` - Disable bool `yaml:",omitempty" json:"disable,omitempty"` -} - -// HealthCheckTest is the command run to test the health of a service -type HealthCheckTest []string - -// UpdateConfig the service update configuration -type UpdateConfig struct { - Parallelism *uint64 `yaml:",omitempty" json:"parallelism,omitempty"` - Delay Duration `yaml:",omitempty" json:"delay,omitempty"` - FailureAction string `mapstructure:"failure_action" yaml:"failure_action,omitempty" json:"failure_action,omitempty"` - Monitor Duration `yaml:",omitempty" json:"monitor,omitempty"` - MaxFailureRatio float32 `mapstructure:"max_failure_ratio" yaml:"max_failure_ratio,omitempty" json:"max_failure_ratio,omitempty"` - Order string `yaml:",omitempty" json:"order,omitempty"` -} - -// Resources the resource limits and reservations -type Resources struct { - Limits *ResourceLimit `yaml:",omitempty" json:"limits,omitempty"` - Reservations *Resource `yaml:",omitempty" json:"reservations,omitempty"` -} - -// ResourceLimit is a resource to be limited -type ResourceLimit struct { - // TODO: types to convert from units and ratios - NanoCPUs string `mapstructure:"cpus" yaml:"cpus,omitempty" json:"cpus,omitempty"` - MemoryBytes UnitBytes `mapstructure:"memory" yaml:"memory,omitempty" json:"memory,omitempty"` - Pids int64 `mapstructure:"pids" yaml:"pids,omitempty" json:"pids,omitempty"` -} - -// Resource is a resource to be reserved -type Resource struct { - // TODO: types to convert from units and ratios - NanoCPUs string `mapstructure:"cpus" yaml:"cpus,omitempty" json:"cpus,omitempty"` - MemoryBytes UnitBytes `mapstructure:"memory" yaml:"memory,omitempty" json:"memory,omitempty"` - GenericResources []GenericResource `mapstructure:"generic_resources" yaml:"generic_resources,omitempty" json:"generic_resources,omitempty"` -} - -// GenericResource represents a "user defined" resource which can -// only be an integer (e.g: SSD=3) for a service -type GenericResource struct { - DiscreteResourceSpec *DiscreteGenericResource `mapstructure:"discrete_resource_spec" yaml:"discrete_resource_spec,omitempty" json:"discrete_resource_spec,omitempty"` -} - -// DiscreteGenericResource represents a "user defined" resource which is defined -// as an integer -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to count the resource (SSD=5, HDD=3, ...) -type DiscreteGenericResource struct { - Kind string `json:"kind"` - Value int64 `json:"value"` -} - -// UnitBytes is the bytes type -type UnitBytes int64 - -// MarshalYAML makes UnitBytes implement yaml.Marshaller -func (u UnitBytes) MarshalYAML() (any, error) { - return fmt.Sprintf("%d", u), nil -} - -// MarshalJSON makes UnitBytes implement json.Marshaler -func (u UnitBytes) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`"%d"`, u)), nil -} - -// RestartPolicy the service restart policy -type RestartPolicy struct { - Condition string `yaml:",omitempty" json:"condition,omitempty"` - Delay *Duration `yaml:",omitempty" json:"delay,omitempty"` - MaxAttempts *uint64 `mapstructure:"max_attempts" yaml:"max_attempts,omitempty" json:"max_attempts,omitempty"` - Window *Duration `yaml:",omitempty" json:"window,omitempty"` -} - -// Placement constraints for the service -type Placement struct { - Constraints []string `yaml:",omitempty" json:"constraints,omitempty"` - Preferences []PlacementPreferences `yaml:",omitempty" json:"preferences,omitempty"` - MaxReplicas uint64 `mapstructure:"max_replicas_per_node" yaml:"max_replicas_per_node,omitempty" json:"max_replicas_per_node,omitempty"` -} - -// PlacementPreferences is the preferences for a service placement -type PlacementPreferences struct { - Spread string `yaml:",omitempty" json:"spread,omitempty"` -} - -// ServiceNetworkConfig is the network configuration for a service -type ServiceNetworkConfig struct { - Aliases []string `yaml:",omitempty" json:"aliases,omitempty"` - Ipv4Address string `mapstructure:"ipv4_address" yaml:"ipv4_address,omitempty" json:"ipv4_address,omitempty"` - Ipv6Address string `mapstructure:"ipv6_address" yaml:"ipv6_address,omitempty" json:"ipv6_address,omitempty"` -} - -// ServicePortConfig is the port configuration for a service -type ServicePortConfig struct { - Mode string `yaml:",omitempty" json:"mode,omitempty"` - Target uint32 `yaml:",omitempty" json:"target,omitempty"` - Published uint32 `yaml:",omitempty" json:"published,omitempty"` - Protocol string `yaml:",omitempty" json:"protocol,omitempty"` -} - -// ServiceVolumeConfig are references to a volume used by a service -type ServiceVolumeConfig struct { - Type string `yaml:",omitempty" json:"type,omitempty"` - Source string `yaml:",omitempty" json:"source,omitempty"` - Target string `yaml:",omitempty" json:"target,omitempty"` - ReadOnly bool `mapstructure:"read_only" yaml:"read_only,omitempty" json:"read_only,omitempty"` - Consistency string `yaml:",omitempty" json:"consistency,omitempty"` - Bind *ServiceVolumeBind `yaml:",omitempty" json:"bind,omitempty"` - Volume *ServiceVolumeVolume `yaml:",omitempty" json:"volume,omitempty"` - Tmpfs *ServiceVolumeTmpfs `yaml:",omitempty" json:"tmpfs,omitempty"` - Cluster *ServiceVolumeCluster `yaml:",omitempty" json:"cluster,omitempty"` -} - -// ServiceVolumeBind are options for a service volume of type bind -type ServiceVolumeBind struct { - Propagation string `yaml:",omitempty" json:"propagation,omitempty"` -} - -// ServiceVolumeVolume are options for a service volume of type volume -type ServiceVolumeVolume struct { - NoCopy bool `mapstructure:"nocopy" yaml:"nocopy,omitempty" json:"nocopy,omitempty"` -} - -// ServiceVolumeTmpfs are options for a service volume of type tmpfs -type ServiceVolumeTmpfs struct { - Size int64 `yaml:",omitempty" json:"size,omitempty"` -} - -// ServiceVolumeCluster are options for a service volume of type cluster. -// Deliberately left blank for future options, but unused now. -type ServiceVolumeCluster struct{} - -// FileReferenceConfig for a reference to a swarm file object -type FileReferenceConfig struct { - Source string `yaml:",omitempty" json:"source,omitempty"` - Target string `yaml:",omitempty" json:"target,omitempty"` - UID string `yaml:",omitempty" json:"uid,omitempty"` - GID string `yaml:",omitempty" json:"gid,omitempty"` - Mode *uint32 `yaml:",omitempty" json:"mode,omitempty"` -} - -// ServiceConfigObjConfig is the config obj configuration for a service -type ServiceConfigObjConfig FileReferenceConfig - -// ServiceSecretConfig is the secret configuration for a service -type ServiceSecretConfig FileReferenceConfig - -// UlimitsConfig the ulimit configuration -type UlimitsConfig struct { - Single int `yaml:",omitempty" json:"single,omitempty"` - Soft int `yaml:",omitempty" json:"soft,omitempty"` - Hard int `yaml:",omitempty" json:"hard,omitempty"` -} - -// MarshalYAML makes UlimitsConfig implement yaml.Marshaller -func (u *UlimitsConfig) MarshalYAML() (any, error) { - if u.Single != 0 { - return u.Single, nil - } - // Return as a value to avoid re-entering this method and use the default implementation - return *u, nil -} - -// MarshalJSON makes UlimitsConfig implement json.Marshaller -func (u *UlimitsConfig) MarshalJSON() ([]byte, error) { - if u.Single != 0 { - return json.Marshal(u.Single) - } - // Pass as a value to avoid re-entering this method and use the default implementation - return json.Marshal(*u) -} - -// NetworkConfig for a network -type NetworkConfig struct { - Name string `yaml:",omitempty" json:"name,omitempty"` - Driver string `yaml:",omitempty" json:"driver,omitempty"` - DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` - Ipam IPAMConfig `yaml:",omitempty" json:"ipam,omitempty"` - External External `yaml:",omitempty" json:"external,omitempty"` - Internal bool `yaml:",omitempty" json:"internal,omitempty"` - Attachable bool `yaml:",omitempty" json:"attachable,omitempty"` - Labels Labels `yaml:",omitempty" json:"labels,omitempty"` - Extras map[string]any `yaml:",inline" json:"-"` -} - -// IPAMConfig for a network -type IPAMConfig struct { - Driver string `yaml:",omitempty" json:"driver,omitempty"` - Config []*IPAMPool `yaml:",omitempty" json:"config,omitempty"` -} - -// IPAMPool for a network -type IPAMPool struct { - Subnet string `yaml:",omitempty" json:"subnet,omitempty"` -} - -// VolumeConfig for a volume -type VolumeConfig struct { - Name string `yaml:",omitempty" json:"name,omitempty"` - Driver string `yaml:",omitempty" json:"driver,omitempty"` - DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` - External External `yaml:",omitempty" json:"external,omitempty"` - Labels Labels `yaml:",omitempty" json:"labels,omitempty"` - Extras map[string]any `yaml:",inline" json:"-"` - Spec *ClusterVolumeSpec `mapstructure:"x-cluster-spec" yaml:"x-cluster-spec,omitempty" json:"x-cluster-spec,omitempty"` -} - // ClusterVolumeSpec defines all the configuration and options specific to a // cluster (CSI) volume. type ClusterVolumeSpec struct { @@ -534,13 +103,13 @@ type TopologyRequirement struct { // Topology defines a particular topology group type Topology struct { - Segments Mapping `yaml:",omitempty" json:"segments,omitempty"` + Segments compose.Mapping `yaml:",omitempty" json:"segments,omitempty"` } // CapacityRange defines the minimum and maximum size of a volume. type CapacityRange struct { - RequiredBytes UnitBytes `mapstructure:"required_bytes" yaml:"required_bytes,omitempty" json:"required_bytes,omitempty"` - LimitBytes UnitBytes `mapstructure:"limit_bytes" yaml:"limit_bytes,omitempty" json:"limit_bytes,omitempty"` + RequiredBytes compose.UnitBytes `mapstructure:"required_bytes" yaml:"required_bytes,omitempty" json:"required_bytes,omitempty"` + LimitBytes compose.UnitBytes `mapstructure:"limit_bytes" yaml:"limit_bytes,omitempty" json:"limit_bytes,omitempty"` } // VolumeSecret defines a secret that needs to be passed to the CSI plugin when @@ -550,51 +119,19 @@ type VolumeSecret struct { Secret string `yaml:",omitempty" json:"secret,omitempty"` } -// External identifies a Volume or Network as a reference to a resource that is -// not managed, and should already exist. -// External.name is deprecated and replaced by Volume.name -type External struct { - Name string `yaml:",omitempty" json:"name,omitempty"` - External bool `yaml:",omitempty" json:"external,omitempty"` +type Pair[T, U any] struct { + key T + value U } -// MarshalYAML makes External implement yaml.Marshaller -func (e External) MarshalYAML() (any, error) { - if e.Name == "" { - return e.External, nil - } - return External{Name: e.Name}, nil +func (p Pair[T, U]) Key() T { + return p.key } -// MarshalJSON makes External implement json.Marshaller -func (e External) MarshalJSON() ([]byte, error) { - if e.Name == "" { - return []byte(strconv.FormatBool(e.External)), nil - } - return []byte(fmt.Sprintf(`{"name": %q}`, e.Name)), nil +func (p Pair[T, U]) Value() U { + return p.value } -// CredentialSpecConfig for credential spec on Windows -type CredentialSpecConfig struct { - Config string `yaml:",omitempty" json:"config,omitempty"` // Config was added in API v1.40 - File string `yaml:",omitempty" json:"file,omitempty"` - Registry string `yaml:",omitempty" json:"registry,omitempty"` +func NewPair[T, U any](key T, value U) Pair[T, U] { + return Pair[T, U]{key, value} } - -// FileObjectConfig is a config type for a file used by a service -type FileObjectConfig struct { - Name string `yaml:",omitempty" json:"name,omitempty"` - File string `yaml:",omitempty" json:"file,omitempty"` - External External `yaml:",omitempty" json:"external,omitempty"` - Labels Labels `yaml:",omitempty" json:"labels,omitempty"` - Extras map[string]any `yaml:",inline" json:"-"` - Driver string `yaml:",omitempty" json:"driver,omitempty"` - DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` - TemplateDriver string `mapstructure:"template_driver" yaml:"template_driver,omitempty" json:"template_driver,omitempty"` -} - -// SecretConfig for a secret -type SecretConfig FileObjectConfig - -// ConfigObjConfig is the config for the swarm "Config" object -type ConfigObjConfig FileObjectConfig diff --git a/e2e/stack/testdata/full-stack.yml b/e2e/stack/testdata/full-stack.yml index 2a4855291ceb..f14a6feb190d 100644 --- a/e2e/stack/testdata/full-stack.yml +++ b/e2e/stack/testdata/full-stack.yml @@ -1,5 +1,4 @@ version: '3.3' - services: one: image: registry:5000/alpine:frozen diff --git a/vendor.mod b/vendor.mod index ab1e8c22d8e1..dcc6c18b70b1 100644 --- a/vendor.mod +++ b/vendor.mod @@ -1,13 +1,11 @@ module github.com/docker/cli -// 'vendor.mod' enables use of 'go mod vendor' to managed 'vendor/' directory. -// There is no 'go.mod' file, as that would imply opting in for all the rules -// around SemVer, which this repo cannot abide by as it uses CalVer. +go 1.21 -go 1.19 +toolchain go1.21.6 require ( - dario.cat/mergo v1.0.0 + github.com/compose-spec/compose-go/v2 v2.0.0-rc.7 github.com/containerd/containerd v1.7.12 github.com/creack/pty v1.1.21 github.com/distribution/reference v0.5.0 @@ -21,7 +19,6 @@ require ( github.com/google/go-cmp v0.6.0 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/mattn/go-runewidth v0.0.15 - github.com/mitchellh/mapstructure v1.5.0 github.com/moby/patternmatcher v0.6.0 github.com/moby/swarmkit/v2 v2.0.0-20240125134710-dcda100a8261 github.com/moby/sys/sequential v0.5.0 @@ -37,9 +34,10 @@ require ( github.com/theupdateframework/notary v0.7.1-0.20210315103452-bf96a202a09a github.com/tonistiigi/go-rosetta v0.0.0-20200727161949-f79598599c5d github.com/xeipuuv/gojsonschema v1.2.0 + golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 golang.org/x/sync v0.6.0 golang.org/x/sys v0.16.0 - golang.org/x/term v0.15.0 + golang.org/x/term v0.16.0 golang.org/x/text v0.14.0 gopkg.in/yaml.v2 v2.4.0 gotest.tools/v3 v3.5.1 @@ -63,8 +61,12 @@ require ( github.com/gorilla/mux v1.8.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/klauspost/compress v1.17.4 // indirect + github.com/mattn/go-shellwords v1.0.12 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/sys/symlink v0.2.0 // indirect github.com/moby/sys/user v0.1.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect @@ -79,12 +81,13 @@ require ( go.opentelemetry.io/otel v1.19.0 // indirect go.opentelemetry.io/otel/metric v1.19.0 // indirect go.opentelemetry.io/otel/trace v1.19.0 // indirect - golang.org/x/crypto v0.17.0 // indirect + golang.org/x/crypto v0.18.0 // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.19.0 // indirect + golang.org/x/net v0.20.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.16.0 // indirect + golang.org/x/tools v0.17.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/grpc v1.58.3 // indirect google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/vendor.sum b/vendor.sum index 73df4b7cbdd4..6d5a691e8b3a 100644 --- a/vendor.sum +++ b/vendor.sum @@ -1,6 +1,5 @@ -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= @@ -27,15 +26,19 @@ github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0Bsq github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= github.com/cloudflare/cfssl v1.6.4 h1:NMOvfrEjFfC63K3SGXgAnFdsgkmiq4kATme5BfcqrO8= +github.com/cloudflare/cfssl v1.6.4/go.mod h1:8b3CQMxfWPAeom3zBnGJ6sd+G1NkL5TXqmDXacb+1J0= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/compose-spec/compose-go/v2 v2.0.0-rc.7 h1:koFIK+JwplWu1m/DscSO6MJw7hodaEHOaKQZPUSL4OY= +github.com/compose-spec/compose-go/v2 v2.0.0-rc.7/go.mod h1:bEPizBkIojlQ20pi2vNluBa58tevvj0Y18oUSHPyfdc= github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -109,6 +112,7 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= github.com/google/certificate-transparency-go v1.1.4 h1:hCyXHDbtqlr/lMXU0D4WgbalXL0Zk4dSWWMbPV8VrqY= +github.com/google/certificate-transparency-go v1.1.4/go.mod h1:D6lvbfwckhNrbM9WVl1EVeMOyzC19mpIjMOI4nxBHtQ= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= @@ -120,6 +124,7 @@ github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -132,6 +137,7 @@ github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d h1:jRQLvyVGL+iVt github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmoiron/sqlx v1.3.3 h1:j82X0bf7oQ27XeqxicSZsTU5suPwKElg3oyxNn43iTk= +github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= @@ -154,6 +160,8 @@ github.com/magiconair/properties v1.5.3 h1:C8fxWnhYyME3n0klPOhVM7PtYUB3eV1W3DeFm github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= @@ -161,9 +169,13 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfr github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/swarmkit/v2 v2.0.0-20240125134710-dcda100a8261 h1:mjLf2jYrqtIS4LvLzg0gNyJR4rMXS4X5Bg1A4hOhVMs= @@ -254,11 +266,13 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/theupdateframework/notary v0.7.1-0.20210315103452-bf96a202a09a h1:tlJ7tGUHvcvL1v3yR6NcCc9nOqh2L+CG6HWrYQtwzQ0= github.com/theupdateframework/notary v0.7.1-0.20210315103452-bf96a202a09a/go.mod h1:Y94A6rPp2OwNfP/7vmf8O2xx2IykP8pPXQ1DLouGnEw= github.com/tonistiigi/go-rosetta v0.0.0-20200727161949-f79598599c5d h1:wvQZpqy8p0D/FUia6ipKDhXrzPzBVJE4PZyPc5+5Ay0= github.com/tonistiigi/go-rosetta v0.0.0-20200727161949-f79598599c5d/go.mod h1:xKQhd7snlzKFuUi1taTGWjpRE8iFTA06DeacYi3CVFQ= github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b h1:FsyNrX12e5BkplJq7wKOLk0+C6LZ+KGXvuEcKUYm5ss= +github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -269,7 +283,9 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zmap/zcrypto v0.0.0-20210511125630-18f1e0152cfc h1:zkGwegkOW709y0oiAraH/3D8njopUR/pARHv4tZZ6pw= +github.com/zmap/zcrypto v0.0.0-20210511125630-18f1e0152cfc/go.mod h1:FM4U1E3NzlNMRnSUTU3P1UdukWhYGifqEsjk9fn7BCk= github.com/zmap/zlint/v3 v3.1.0 h1:WjVytZo79m/L1+/Mlphl09WBob6YTGljN5IGWZFpAv0= +github.com/zmap/zlint/v3 v3.1.0/go.mod h1:L7t8s3sEKkb0A2BxGy1IWrxt1ZATa1R4QfJZaQOD3zU= go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= go.etcd.io/etcd/raft/v3 v3.5.6 h1:tOmx6Ym6rn2GpZOrvTGJZciJHek6RnC3U/zNInzIN50= go.etcd.io/etcd/raft/v3 v3.5.6/go.mod h1:wL8kkRGx1Hp8FmZUuHfL3K2/OaGIDaXGr1N7i2G07J0= @@ -278,13 +294,17 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1: go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= @@ -295,8 +315,10 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= +golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= @@ -308,8 +330,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -334,8 +356,8 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= @@ -347,14 +369,15 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= -golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -371,6 +394,7 @@ gopkg.in/cenkalti/backoff.v2 v2.2.1/go.mod h1:S0QdOvT2AlerfSBkp0O+dk+bbIMaNbEmVk gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1 h1:d4KQkxAaAiRY2h5Zqis161Pv91A37uZyJOx73duwUwM= @@ -389,5 +413,6 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= tags.cncf.io/container-device-interface v0.6.2 h1:dThE6dtp/93ZDGhqaED2Pu374SOeUkBfuvkLuiTdwzg= tags.cncf.io/container-device-interface v0.6.2/go.mod h1:Shusyhjs1A5Na/kqPVLL0KqnHQHuunol9LFeUNkuGVE= diff --git a/vendor/dario.cat/mergo/.deepsource.toml b/vendor/dario.cat/mergo/.deepsource.toml deleted file mode 100644 index a8bc979e02ef..000000000000 --- a/vendor/dario.cat/mergo/.deepsource.toml +++ /dev/null @@ -1,12 +0,0 @@ -version = 1 - -test_patterns = [ - "*_test.go" -] - -[[analyzers]] -name = "go" -enabled = true - - [analyzers.meta] - import_path = "dario.cat/mergo" \ No newline at end of file diff --git a/vendor/dario.cat/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore deleted file mode 100644 index 529c3412ba95..000000000000 --- a/vendor/dario.cat/mergo/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -#### joe made this: http://goel.io/joe - -#### go #### -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -#### vim #### -# Swap -[._]*.s[a-v][a-z] -[._]*.sw[a-p] -[._]s[a-v][a-z] -[._]sw[a-p] - -# Session -Session.vim - -# Temporary -.netrwhist -*~ -# Auto-generated tag files -tags diff --git a/vendor/dario.cat/mergo/.travis.yml b/vendor/dario.cat/mergo/.travis.yml deleted file mode 100644 index d324c43ba4df..000000000000 --- a/vendor/dario.cat/mergo/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -arch: - - amd64 - - ppc64le -install: - - go get -t - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls -script: - - go test -race -v ./... -after_script: - - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md b/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md deleted file mode 100644 index 469b44907a09..000000000000 --- a/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/dario.cat/mergo/CONTRIBUTING.md b/vendor/dario.cat/mergo/CONTRIBUTING.md deleted file mode 100644 index 0a1ff9f94d85..000000000000 --- a/vendor/dario.cat/mergo/CONTRIBUTING.md +++ /dev/null @@ -1,112 +0,0 @@ - -# Contributing to mergo - -First off, thanks for taking the time to contribute! ❤️ - -All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 - -> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: -> - Star the project -> - Tweet about it -> - Refer this project in your project's readme -> - Mention the project at local meetups and tell your friends/colleagues - - -## Table of Contents - -- [Code of Conduct](#code-of-conduct) -- [I Have a Question](#i-have-a-question) -- [I Want To Contribute](#i-want-to-contribute) -- [Reporting Bugs](#reporting-bugs) -- [Suggesting Enhancements](#suggesting-enhancements) - -## Code of Conduct - -This project and everyone participating in it is governed by the -[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). -By participating, you are expected to uphold this code. Please report unacceptable behavior -to <>. - - -## I Have a Question - -> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). - -Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. - -If you then still feel the need to ask a question and need clarification, we recommend the following: - -- Open an [Issue](https://github.com/imdario/mergo/issues/new). -- Provide as much context as you can about what you're running into. -- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. - -We will then take care of the issue as soon as possible. - -## I Want To Contribute - -> ### Legal Notice -> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. - -### Reporting Bugs - - -#### Before Submitting a Bug Report - -A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. - -- Make sure that you are using the latest version. -- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). -- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). -- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. -- Collect information about the bug: -- Stack trace (Traceback) -- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) -- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. -- Possibly your input and the output -- Can you reliably reproduce the issue? And can you also reproduce it with older versions? - - -#### How Do I Submit a Good Bug Report? - -> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . - - -We use GitHub issues to track bugs and errors. If you run into an issue with the project: - -- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) -- Explain the behavior you would expect and the actual behavior. -- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. -- Provide the information you collected in the previous section. - -Once it's filed: - -- The project team will label the issue accordingly. -- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. -- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. - -### Suggesting Enhancements - -This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. - - -#### Before Submitting an Enhancement - -- Make sure that you are using the latest version. -- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. -- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. -- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. - - -#### How Do I Submit a Good Enhancement Suggestion? - -Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). - -- Use a **clear and descriptive title** for the issue to identify the suggestion. -- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. -- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. -- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. -- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. - - -## Attribution -This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md deleted file mode 100644 index 7d0cf9f32afe..000000000000 --- a/vendor/dario.cat/mergo/README.md +++ /dev/null @@ -1,248 +0,0 @@ -# Mergo - -[![GitHub release][5]][6] -[![GoCard][7]][8] -[![Test status][1]][2] -[![OpenSSF Scorecard][21]][22] -[![OpenSSF Best Practices][19]][20] -[![Coverage status][9]][10] -[![Sourcegraph][11]][12] -[![FOSSA status][13]][14] - -[![GoDoc][3]][4] -[![Become my sponsor][15]][16] -[![Tidelift][17]][18] - -[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master -[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml -[3]: https://godoc.org/github.com/imdario/mergo?status.svg -[4]: https://godoc.org/github.com/imdario/mergo -[5]: https://img.shields.io/github/release/imdario/mergo.svg -[6]: https://github.com/imdario/mergo/releases -[7]: https://goreportcard.com/badge/imdario/mergo -[8]: https://goreportcard.com/report/github.com/imdario/mergo -[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[10]: https://coveralls.io/github/imdario/mergo?branch=master -[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[12]: https://sourcegraph.com/github.com/imdario/mergo?badge -[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield -[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://img.shields.io/github/sponsors/imdario -[16]: https://github.com/sponsors/imdario -[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo -[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo -[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge -[20]: https://bestpractices.coreinfrastructure.org/projects/7177 -[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge -[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo - -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). - -### Important notes - -#### 1.0.0 - -In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. - -#### 0.3.9 - -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. - -Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u dario.cat/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -### Donations - -If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: - -Buy Me a Coffee at ko-fi.com -Donate using Liberapay -Become my sponsor - -### Mergo in the wild - -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [containerssh/containerssh](https://github.com/containerssh/containerssh) -- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) -- [tjpnz/structbot](https://github.com/tjpnz/structbot) - -## Install - - go get dario.cat/mergo - - // use in your .go code - import ( - "dario.cat/mergo" - ) - -## Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - -```go -if err := mergo.Merge(&dst, src); err != nil { - // ... -} -``` - -Also, you can merge overwriting values using the transformer `WithOverride`. - -```go -if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... -} -``` - -Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. - -```go -if err := mergo.Map(&dst, srcMap); err != nil { - // ... -} -``` - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. - -Here is a nice example: - -```go -package main - -import ( - "fmt" - "dario.cat/mergo" -) - -type Foo struct { - A string - B int64 -} - -func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} -} -``` - -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v3 - -### Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? - -```go -package main - -import ( - "fmt" - "dario.cat/mergo" - "reflect" - "time" -) - -type timeTransformer struct { -} - -func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil -} - -type Snapshot struct { - Time time.Time - // ... -} - -func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } -} -``` - -## Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) - -## About - -Written by [Dario Castañé](http://dario.im). - -## License - -[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/dario.cat/mergo/SECURITY.md b/vendor/dario.cat/mergo/SECURITY.md deleted file mode 100644 index a5de61f77ba7..000000000000 --- a/vendor/dario.cat/mergo/SECURITY.md +++ /dev/null @@ -1,14 +0,0 @@ -# Security Policy - -## Supported Versions - -| Version | Supported | -| ------- | ------------------ | -| 0.3.x | :white_check_mark: | -| < 0.3 | :x: | - -## Security contact information - -To report a security vulnerability, please use the -[Tidelift security contact](https://tidelift.com/security). -Tidelift will coordinate the fix and disclosure. diff --git a/vendor/dario.cat/mergo/doc.go b/vendor/dario.cat/mergo/doc.go deleted file mode 100644 index 7d96ec0546d8..000000000000 --- a/vendor/dario.cat/mergo/doc.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -# Status - -It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. - -# Important notes - -1.0.0 - -In 1.0.0 Mergo moves to a vanity URL `dario.cat/mergo`. - -0.3.9 - -Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. - -Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u dario.cat/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -# Install - -Do your usual installation procedure: - - go get dario.cat/mergo - - // use in your .go code - import ( - "dario.cat/mergo" - ) - -# Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - - if err := mergo.Merge(&dst, src); err != nil { - // ... - } - -Also, you can merge overwriting values using the transformer WithOverride. - - if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... - } - -Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. - - if err := mergo.Map(&dst, srcMap); err != nil { - // ... - } - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. - -Here is a nice example: - - package main - - import ( - "fmt" - "dario.cat/mergo" - ) - - type Foo struct { - A string - B int64 - } - - func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} - } - -# Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? - - package main - - import ( - "fmt" - "dario.cat/mergo" - "reflect" - "time" - ) - - type timeTransformer struct { - } - - func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil - } - - type Snapshot struct { - Time time.Time - // ... - } - - func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } - } - -# Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario - -# About - -Written by Dario Castañé: https://da.rio.hn - -# License - -BSD 3-Clause license, as Go language. -*/ -package mergo diff --git a/vendor/dario.cat/mergo/map.go b/vendor/dario.cat/mergo/map.go deleted file mode 100644 index b50d5c2a4e7c..000000000000 --- a/vendor/dario.cat/mergo/map.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2014 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" -) - -func changeInitialCase(s string, mapper func(rune) rune) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(mapper(r)) + s[n:] -} - -func isExported(field reflect.StructField) bool { - r, _ := utf8.DecodeRuneInString(field.Name) - return r >= 'A' && r <= 'Z' -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{typ, seen, addr} - } - zeroValue := reflect.Value{} - switch dst.Kind() { - case reflect.Map: - dstMap := dst.Interface().(map[string]interface{}) - for i, n := 0, src.NumField(); i < n; i++ { - srcType := src.Type() - field := srcType.Field(i) - if !isExported(field) { - continue - } - fieldName := field.Name - fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { - dstMap[fieldName] = src.Field(i).Interface() - } - } - case reflect.Ptr: - if dst.IsNil() { - v := reflect.New(dst.Type().Elem()) - dst.Set(v) - } - dst = dst.Elem() - fallthrough - case reflect.Struct: - srcMap := src.Interface().(map[string]interface{}) - for key := range srcMap { - config.overwriteWithEmptyValue = true - srcValue := srcMap[key] - fieldName := changeInitialCase(key, unicode.ToUpper) - dstElement := dst.FieldByName(fieldName) - if dstElement == zeroValue { - // We discard it because the field doesn't exist. - continue - } - srcElement := reflect.ValueOf(srcValue) - dstKind := dstElement.Kind() - srcKind := srcElement.Kind() - if srcKind == reflect.Ptr && dstKind != reflect.Ptr { - srcElement = srcElement.Elem() - srcKind = reflect.TypeOf(srcElement.Interface()).Kind() - } else if dstKind == reflect.Ptr { - // Can this work? I guess it can't. - if srcKind != reflect.Ptr && srcElement.CanAddr() { - srcPtr := srcElement.Addr() - srcElement = reflect.ValueOf(srcPtr) - srcKind = reflect.Ptr - } - } - - if !srcElement.IsValid() { - continue - } - if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else { - return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) - } - } - } - return -} - -// Map sets fields' values in dst from src. -// src can be a map with string keys or a struct. dst must be the opposite: -// if src is a map, dst must be a valid pointer to struct. If src is a struct, -// dst must be map[string]interface{}. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -// If dst is a map, keys will be src fields' names in lower camel case. -// Missing key in src that doesn't match a field in dst will be skipped. This -// doesn't apply if dst is a map. -// This is separated method from Merge because it is cleaner and it keeps sane -// semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, opts...) -} - -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: Use Map(…) with WithOverride -func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, append(opts, WithOverride)...) -} - -func _map(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerArgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - // To be friction-less, we redirect equal-type arguments - // to deepMerge. Only because arguments can be anything. - if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - } - switch vSrc.Kind() { - case reflect.Struct: - if vDst.Kind() != reflect.Map { - return ErrExpectedMapAsDestination - } - case reflect.Map: - if vDst.Kind() != reflect.Struct { - return ErrExpectedStructAsDestination - } - default: - return ErrNotSupported - } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} diff --git a/vendor/dario.cat/mergo/merge.go b/vendor/dario.cat/mergo/merge.go deleted file mode 100644 index 0ef9b2138c15..000000000000 --- a/vendor/dario.cat/mergo/merge.go +++ /dev/null @@ -1,409 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" -) - -func hasMergeableFields(dst reflect.Value) (exported bool) { - for i, n := 0, dst.NumField(); i < n; i++ { - field := dst.Type().Field(i) - if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasMergeableFields(dst.Field(i)) - } else if isExportedComponent(&field) { - exported = exported || len(field.PkgPath) == 0 - } - } - return -} - -func isExportedComponent(field *reflect.StructField) bool { - pkgPath := field.PkgPath - if len(pkgPath) > 0 { - return false - } - c := field.Name[0] - if 'a' <= c && c <= 'z' || c == '_' { - return false - } - return true -} - -type Config struct { - Transformers Transformers - Overwrite bool - ShouldNotDereference bool - AppendSlice bool - TypeCheck bool - overwriteWithEmptyValue bool - overwriteSliceWithEmptyValue bool - sliceDeepCopy bool - debug bool -} - -type Transformers interface { - Transformer(reflect.Type) func(dst, src reflect.Value) error -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - typeCheck := config.TypeCheck - overwriteWithEmptySrc := config.overwriteWithEmptyValue - overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue - sliceDeepCopy := config.sliceDeepCopy - - if !src.IsValid() { - return - } - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{typ, seen, addr} - } - - if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { - if fn := config.Transformers.Transformer(dst.Type()); fn != nil { - err = fn(dst, src) - return - } - } - - switch dst.Kind() { - case reflect.Struct: - if hasMergeableFields(dst) { - for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { - return - } - } - } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { - dst.Set(src) - } - } - case reflect.Map: - if dst.IsNil() && !src.IsNil() { - if dst.CanSet() { - dst.Set(reflect.MakeMap(dst.Type())) - } else { - dst = src - return - } - } - - if src.Kind() != reflect.Map { - if overwrite && dst.CanSet() { - dst.Set(src) - } - return - } - - for _, key := range src.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - continue - } - dstElement := dst.MapIndex(key) - switch srcElement.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: - if srcElement.IsNil() { - if overwrite { - dst.SetMapIndex(key, srcElement) - } - continue - } - fallthrough - default: - if !srcElement.CanInterface() { - continue - } - switch reflect.TypeOf(srcElement.Interface()).Kind() { - case reflect.Struct: - fallthrough - case reflect.Ptr: - fallthrough - case reflect.Map: - srcMapElm := srcElement - dstMapElm := dstElement - if srcMapElm.CanInterface() { - srcMapElm = reflect.ValueOf(srcMapElm.Interface()) - if dstMapElm.IsValid() { - dstMapElm = reflect.ValueOf(dstMapElm.Interface()) - } - } - if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { - return - } - case reflect.Slice: - srcSlice := reflect.ValueOf(srcElement.Interface()) - - var dstSlice reflect.Value - if !dstElement.IsValid() || dstElement.IsNil() { - dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) - } else { - dstSlice = reflect.ValueOf(dstElement.Interface()) - } - - if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { - if typeCheck && srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = srcSlice - } else if config.AppendSlice { - if srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = reflect.AppendSlice(dstSlice, srcSlice) - } else if sliceDeepCopy { - i := 0 - for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { - srcElement := srcSlice.Index(i) - dstElement := dstSlice.Index(i) - - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - - } - dst.SetMapIndex(key, dstSlice) - } - } - - if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { - if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { - continue - } - if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { - continue - } - } - - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - dst.SetMapIndex(key, srcElement) - } - } - - // Ensure that all keys in dst are deleted if they are not in src. - if overwriteWithEmptySrc { - for _, key := range dst.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - dst.SetMapIndex(key, reflect.Value{}) - } - } - } - case reflect.Slice: - if !dst.CanSet() { - break - } - if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { - dst.Set(src) - } else if config.AppendSlice { - if src.Type() != dst.Type() { - return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) - } - dst.Set(reflect.AppendSlice(dst, src)) - } else if sliceDeepCopy { - for i := 0; i < src.Len() && i < dst.Len(); i++ { - srcElement := src.Index(i) - dstElement := dst.Index(i) - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - } - case reflect.Ptr: - fallthrough - case reflect.Interface: - if isReflectNil(src) { - if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - break - } - - if src.Kind() != reflect.Interface { - if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { - dst.Set(src) - } - } else if src.Kind() == reflect.Ptr { - if !config.ShouldNotDereference { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - } else { - if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { - dst.Set(src) - } - } - } else if dst.Elem().Type() == src.Type() { - if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { - return - } - } else { - return ErrDifferentArgumentsTypes - } - break - } - - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { - dst.Set(src) - } - break - } - - if dst.Elem().Kind() == src.Elem().Kind() { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - break - } - default: - mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) - if mustSet { - if dst.CanSet() { - dst.Set(src) - } else { - dst = src - } - } - } - - return -} - -// Merge will fill any empty for value type attributes on the dst struct using corresponding -// src attributes if they themselves are not empty. dst and src must be valid same-type structs -// and dst must be a pointer to struct. -// It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, opts...) -} - -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: use Merge(…) with WithOverride -func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, append(opts, WithOverride)...) -} - -// WithTransformers adds transformers to merge, allowing to customize the merging of some types. -func WithTransformers(transformers Transformers) func(*Config) { - return func(config *Config) { - config.Transformers = transformers - } -} - -// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. -func WithOverride(config *Config) { - config.Overwrite = true -} - -// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. -func WithOverwriteWithEmptyValue(config *Config) { - config.Overwrite = true - config.overwriteWithEmptyValue = true -} - -// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. -func WithOverrideEmptySlice(config *Config) { - config.overwriteSliceWithEmptyValue = true -} - -// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty -// (i.e. a non-nil pointer is never considered empty). -func WithoutDereference(config *Config) { - config.ShouldNotDereference = true -} - -// WithAppendSlice will make merge append slices instead of overwriting it. -func WithAppendSlice(config *Config) { - config.AppendSlice = true -} - -// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). -func WithTypeCheck(config *Config) { - config.TypeCheck = true -} - -// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. -func WithSliceDeepCopy(config *Config) { - config.sliceDeepCopy = true - config.Overwrite = true -} - -func merge(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerArgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - if vDst.Type() != vSrc.Type() { - return ErrDifferentArgumentsTypes - } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} - -// IsReflectNil is the reflect value provided nil -func isReflectNil(v reflect.Value) bool { - k := v.Kind() - switch k { - case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: - // Both interface and slice are nil if first word is 0. - // Both are always bigger than a word; assume flagIndir. - return v.IsNil() - default: - return false - } -} diff --git a/vendor/dario.cat/mergo/mergo.go b/vendor/dario.cat/mergo/mergo.go deleted file mode 100644 index 0a721e2d8586..000000000000 --- a/vendor/dario.cat/mergo/mergo.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "errors" - "reflect" -) - -// Errors reported by Mergo when it finds invalid arguments. -var ( - ErrNilArguments = errors.New("src and dst must not be nil") - ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs, maps, and slices are supported") - ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") - ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerArgument = errors.New("dst must be a pointer") -) - -// During deepMerge, must keep track of checks that are -// in progress. The comparison algorithm assumes that all -// checks in progress are true when it reencounters them. -// Visited are stored in a map indexed by 17 * a1 + a2; -type visit struct { - typ reflect.Type - next *visit - ptr uintptr -} - -// From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value, shouldDereference bool) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if v.IsNil() { - return true - } - if shouldDereference { - return isEmptyValue(v.Elem(), shouldDereference) - } - return false - case reflect.Func: - return v.IsNil() - case reflect.Invalid: - return true - } - return false -} - -func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { - if dst == nil || src == nil { - err = ErrNilArguments - return - } - vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { - err = ErrNotSupported - return - } - vSrc = reflect.ValueOf(src) - // We check if vSrc is a pointer to dereference it. - if vSrc.Kind() == reflect.Ptr { - vSrc = vSrc.Elem() - } - return -} diff --git a/vendor/github.com/compose-spec/compose-go/v2/LICENSE b/vendor/github.com/compose-spec/compose-go/v2/LICENSE new file mode 100644 index 000000000000..9c8e20ab85c1 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/compose-spec/compose-go/v2/NOTICE b/vendor/github.com/compose-spec/compose-go/v2/NOTICE new file mode 100644 index 000000000000..9c2755477e7d --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/NOTICE @@ -0,0 +1,2 @@ +The Compose Specification +Copyright 2020 The Compose Specification Authors diff --git a/vendor/github.com/compose-spec/compose-go/v2/consts/consts.go b/vendor/github.com/compose-spec/compose-go/v2/consts/consts.go new file mode 100644 index 000000000000..592e6f067dbf --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/consts/consts.go @@ -0,0 +1,29 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package consts + +const ( + ComposeProjectName = "COMPOSE_PROJECT_NAME" + ComposePathSeparator = "COMPOSE_PATH_SEPARATOR" + ComposeFilePath = "COMPOSE_FILE" + ComposeDisableDefaultEnvFile = "COMPOSE_DISABLE_ENV_FILE" + ComposeProfiles = "COMPOSE_PROFILES" +) + +const Extensions = "#extensions" // Using # prefix, we prevent risk to conflict with an actual yaml key + +type ComposeFileKey struct{} diff --git a/vendor/github.com/compose-spec/compose-go/v2/dotenv/LICENSE b/vendor/github.com/compose-spec/compose-go/v2/dotenv/LICENSE new file mode 100644 index 000000000000..9390caf66007 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/dotenv/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 John Barton + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/compose-spec/compose-go/v2/dotenv/env.go b/vendor/github.com/compose-spec/compose-go/v2/dotenv/env.go new file mode 100644 index 000000000000..cc472a60a3ad --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/dotenv/env.go @@ -0,0 +1,76 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package dotenv + +import ( + "bytes" + "fmt" + "os" + "path/filepath" +) + +func GetEnvFromFile(currentEnv map[string]string, filenames []string) (map[string]string, error) { + envMap := make(map[string]string) + + for _, dotEnvFile := range filenames { + abs, err := filepath.Abs(dotEnvFile) + if err != nil { + return envMap, err + } + dotEnvFile = abs + + s, err := os.Stat(dotEnvFile) + if os.IsNotExist(err) { + return envMap, fmt.Errorf("Couldn't find env file: %s", dotEnvFile) + } + if err != nil { + return envMap, err + } + + if s.IsDir() { + if len(filenames) == 0 { + return envMap, nil + } + return envMap, fmt.Errorf("%s is a directory", dotEnvFile) + } + + b, err := os.ReadFile(dotEnvFile) + if os.IsNotExist(err) { + return nil, fmt.Errorf("Couldn't read env file: %s", dotEnvFile) + } + if err != nil { + return envMap, err + } + + env, err := ParseWithLookup(bytes.NewReader(b), func(k string) (string, bool) { + v, ok := currentEnv[k] + if ok { + return v, true + } + v, ok = envMap[k] + return v, ok + }) + if err != nil { + return envMap, fmt.Errorf("failed to read %s: %w", dotEnvFile, err) + } + for k, v := range env { + envMap[k] = v + } + } + + return envMap, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/dotenv/godotenv.go b/vendor/github.com/compose-spec/compose-go/v2/dotenv/godotenv.go new file mode 100644 index 000000000000..e6635ce33b07 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/dotenv/godotenv.go @@ -0,0 +1,175 @@ +// Package dotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) +// +// Examples/readme can be found on the github page at https://github.com/joho/godotenv +// +// The TL;DR is that you make a .env file that looks something like +// +// SOME_ENV_VAR=somevalue +// +// and then in your go code you can call +// +// godotenv.Load() +// +// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") +package dotenv + +import ( + "bytes" + "io" + "os" + "regexp" + "strings" + + "github.com/compose-spec/compose-go/v2/template" +) + +var utf8BOM = []byte("\uFEFF") + +var startsWithDigitRegex = regexp.MustCompile(`^\s*\d.*`) // Keys starting with numbers are ignored + +// LookupFn represents a lookup function to resolve variables from +type LookupFn func(string) (string, bool) + +var noLookupFn = func(s string) (string, bool) { + return "", false +} + +// Parse reads an env file from io.Reader, returning a map of keys and values. +func Parse(r io.Reader) (map[string]string, error) { + return ParseWithLookup(r, nil) +} + +// ParseWithLookup reads an env file from io.Reader, returning a map of keys and values. +func ParseWithLookup(r io.Reader, lookupFn LookupFn) (map[string]string, error) { + data, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + // seek past the UTF-8 BOM if it exists (particularly on Windows, some + // editors tend to add it, and it'll cause parsing to fail) + data = bytes.TrimPrefix(data, utf8BOM) + + return UnmarshalBytesWithLookup(data, lookupFn) +} + +// Load will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main). +// +// If you call Load without any args it will default to loading .env in the current path. +// +// You can otherwise tell it which files to load (there can be more than one) like: +// +// godotenv.Load("fileone", "filetwo") +// +// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults +func Load(filenames ...string) error { + return load(false, filenames...) +} + +func load(overload bool, filenames ...string) error { + filenames = filenamesOrDefault(filenames) + for _, filename := range filenames { + err := loadFile(filename, overload) + if err != nil { + return err + } + } + return nil +} + +// ReadWithLookup gets all env vars from the files and/or lookup function and return values as +// a map rather than automatically writing values into env +func ReadWithLookup(lookupFn LookupFn, filenames ...string) (map[string]string, error) { + filenames = filenamesOrDefault(filenames) + envMap := make(map[string]string) + + for _, filename := range filenames { + individualEnvMap, individualErr := readFile(filename, lookupFn) + + if individualErr != nil { + return envMap, individualErr + } + + for key, value := range individualEnvMap { + if startsWithDigitRegex.MatchString(key) { + continue + } + envMap[key] = value + } + } + + return envMap, nil +} + +// Read all env (with same file loading semantics as Load) but return values as +// a map rather than automatically writing values into env +func Read(filenames ...string) (map[string]string, error) { + return ReadWithLookup(nil, filenames...) +} + +// UnmarshalBytesWithLookup parses env file from byte slice of chars, returning a map of keys and values. +func UnmarshalBytesWithLookup(src []byte, lookupFn LookupFn) (map[string]string, error) { + return UnmarshalWithLookup(string(src), lookupFn) +} + +// UnmarshalWithLookup parses env file from string, returning a map of keys and values. +func UnmarshalWithLookup(src string, lookupFn LookupFn) (map[string]string, error) { + out := make(map[string]string) + err := newParser().parse(src, out, lookupFn) + return out, err +} + +func filenamesOrDefault(filenames []string) []string { + if len(filenames) == 0 { + return []string{".env"} + } + return filenames +} + +func loadFile(filename string, overload bool) error { + envMap, err := readFile(filename, nil) + if err != nil { + return err + } + + currentEnv := map[string]bool{} + rawEnv := os.Environ() + for _, rawEnvLine := range rawEnv { + key := strings.Split(rawEnvLine, "=")[0] + currentEnv[key] = true + } + + for key, value := range envMap { + if !currentEnv[key] || overload { + _ = os.Setenv(key, value) + } + } + + return nil +} + +func readFile(filename string, lookupFn LookupFn) (map[string]string, error) { + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + return ParseWithLookup(file, lookupFn) +} + +func expandVariables(value string, envMap map[string]string, lookupFn LookupFn) (string, error) { + retVal, err := template.Substitute(value, func(k string) (string, bool) { + if v, ok := lookupFn(k); ok { + return v, true + } + v, ok := envMap[k] + return v, ok + }) + if err != nil { + return value, err + } + return retVal, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/dotenv/parser.go b/vendor/github.com/compose-spec/compose-go/v2/dotenv/parser.go new file mode 100644 index 000000000000..861cd953d6ba --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/dotenv/parser.go @@ -0,0 +1,282 @@ +package dotenv + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "unicode" +) + +const ( + charComment = '#' + prefixSingleQuote = '\'' + prefixDoubleQuote = '"' +) + +var ( + escapeSeqRegex = regexp.MustCompile(`(\\(?:[abcfnrtv$"\\]|0\d{0,3}))`) + exportRegex = regexp.MustCompile(`^export\s+`) +) + +type parser struct { + line int +} + +func newParser() *parser { + return &parser{ + line: 1, + } +} + +func (p *parser) parse(src string, out map[string]string, lookupFn LookupFn) error { + cutset := src + if lookupFn == nil { + lookupFn = noLookupFn + } + for { + cutset = p.getStatementStart(cutset) + if cutset == "" { + // reached end of file + break + } + + key, left, inherited, err := p.locateKeyName(cutset) + if err != nil { + return err + } + if strings.Contains(key, " ") { + return fmt.Errorf("line %d: key cannot contain a space", p.line) + } + + if inherited { + value, ok := lookupFn(key) + if ok { + out[key] = value + } + cutset = left + continue + } + + value, left, err := p.extractVarValue(left, out, lookupFn) + if err != nil { + return err + } + + out[key] = value + cutset = left + } + + return nil +} + +// getStatementPosition returns position of statement begin. +// +// It skips any comment line or non-whitespace character. +func (p *parser) getStatementStart(src string) string { + pos := p.indexOfNonSpaceChar(src) + if pos == -1 { + return "" + } + + src = src[pos:] + if src[0] != charComment { + return src + } + + // skip comment section + pos = strings.IndexFunc(src, isCharFunc('\n')) + if pos == -1 { + return "" + } + return p.getStatementStart(src[pos:]) +} + +// locateKeyName locates and parses key name and returns rest of slice +func (p *parser) locateKeyName(src string) (string, string, bool, error) { + var key string + var inherited bool + // trim "export" and space at beginning + if exportRegex.MatchString(src) { + // we use a `strings.trim` to preserve the pointer to the same underlying memory. + // a regexp replace would copy the string. + src = strings.TrimLeftFunc(strings.TrimPrefix(src, "export"), isSpace) + } + + // locate key name end and validate it in single loop + offset := 0 +loop: + for i, rune := range src { + if isSpace(rune) { + continue + } + + switch rune { + case '=', ':', '\n': + // library also supports yaml-style value declaration + key = string(src[0:i]) + offset = i + 1 + inherited = rune == '\n' + break loop + case '_', '.', '-', '[', ']': + default: + // variable name should match [A-Za-z0-9_.-] + if unicode.IsLetter(rune) || unicode.IsNumber(rune) { + continue + } + + return "", "", inherited, fmt.Errorf( + `line %d: unexpected character %q in variable name %q`, + p.line, string(rune), strings.Split(src, "\n")[0]) + } + } + + if src == "" { + return "", "", inherited, errors.New("zero length string") + } + + // trim whitespace + key = strings.TrimRightFunc(key, unicode.IsSpace) + cutset := strings.TrimLeftFunc(src[offset:], isSpace) + return key, cutset, inherited, nil +} + +// extractVarValue extracts variable value and returns rest of slice +func (p *parser) extractVarValue(src string, envMap map[string]string, lookupFn LookupFn) (string, string, error) { + quote, isQuoted := hasQuotePrefix(src) + if !isQuoted { + // unquoted value - read until new line + value, rest, _ := strings.Cut(src, "\n") + p.line++ + + // Remove inline comments on unquoted lines + value, _, _ = strings.Cut(value, " #") + value = strings.TrimRightFunc(value, unicode.IsSpace) + retVal, err := expandVariables(string(value), envMap, lookupFn) + return retVal, rest, err + } + + previousCharIsEscape := false + // lookup quoted string terminator + var chars []byte + for i := 1; i < len(src); i++ { + char := src[i] + if char == '\n' { + p.line++ + } + if char != quote { + if !previousCharIsEscape && char == '\\' { + previousCharIsEscape = true + continue + } + if previousCharIsEscape { + previousCharIsEscape = false + chars = append(chars, '\\') + } + chars = append(chars, char) + continue + } + + // skip escaped quote symbol (\" or \', depends on quote) + if previousCharIsEscape { + previousCharIsEscape = false + chars = append(chars, char) + continue + } + + // trim quotes + value := string(chars) + if quote == prefixDoubleQuote { + // expand standard shell escape sequences & then interpolate + // variables on the result + retVal, err := expandVariables(expandEscapes(value), envMap, lookupFn) + if err != nil { + return "", "", err + } + value = retVal + } + + return value, src[i+1:], nil + } + + // return formatted error if quoted string is not terminated + valEndIndex := strings.IndexFunc(src, isCharFunc('\n')) + if valEndIndex == -1 { + valEndIndex = len(src) + } + + return "", "", fmt.Errorf("line %d: unterminated quoted value %s", p.line, src[:valEndIndex]) +} + +func expandEscapes(str string) string { + out := escapeSeqRegex.ReplaceAllStringFunc(str, func(match string) string { + if match == `\$` { + // `\$` is not a Go escape sequence, the expansion parser uses + // the special `$$` syntax + // both `FOO=\$bar` and `FOO=$$bar` are valid in an env file and + // will result in FOO w/ literal value of "$bar" (no interpolation) + return "$$" + } + + if strings.HasPrefix(match, `\0`) { + // octal escape sequences in Go are not prefixed with `\0`, so + // rewrite the prefix, e.g. `\0123` -> `\123` -> literal value "S" + match = strings.Replace(match, `\0`, `\`, 1) + } + + // use Go to unquote (unescape) the literal + // see https://go.dev/ref/spec#Rune_literals + // + // NOTE: Go supports ADDITIONAL escapes like `\x` & `\u` & `\U`! + // These are NOT supported, which is why we use a regex to find + // only matches we support and then use `UnquoteChar` instead of a + // `Unquote` on the entire value + v, _, _, err := strconv.UnquoteChar(match, '"') + if err != nil { + return match + } + return string(v) + }) + return out +} + +func (p *parser) indexOfNonSpaceChar(src string) int { + return strings.IndexFunc(src, func(r rune) bool { + if r == '\n' { + p.line++ + } + return !unicode.IsSpace(r) + }) +} + +// hasQuotePrefix reports whether charset starts with single or double quote and returns quote character +func hasQuotePrefix(src string) (byte, bool) { + if src == "" { + return 0, false + } + + switch quote := src[0]; quote { + case prefixDoubleQuote, prefixSingleQuote: + return quote, true // isQuoted + default: + return 0, false + } +} + +func isCharFunc(char rune) func(rune) bool { + return func(v rune) bool { + return v == char + } +} + +// isSpace reports whether the rune is a space character but not line break character +// +// this differs from unicode.IsSpace, which also applies line break as space +func isSpace(r rune) bool { + switch r { + case '\t', '\v', '\f', '\r', ' ', 0x85, 0xA0: + return true + } + return false +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/errdefs/errors.go b/vendor/github.com/compose-spec/compose-go/v2/errdefs/errors.go new file mode 100644 index 000000000000..a54407007ece --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/errdefs/errors.go @@ -0,0 +1,53 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errdefs + +import "errors" + +var ( + // ErrNotFound is returned when an object is not found + ErrNotFound = errors.New("not found") + + // ErrInvalid is returned when a compose project is invalid + ErrInvalid = errors.New("invalid compose project") + + // ErrUnsupported is returned when a compose project uses an unsupported attribute + ErrUnsupported = errors.New("unsupported attribute") + + // ErrIncompatible is returned when a compose project uses an incompatible attribute + ErrIncompatible = errors.New("incompatible attribute") +) + +// IsNotFoundError returns true if the unwrapped error is ErrNotFound +func IsNotFoundError(err error) bool { + return errors.Is(err, ErrNotFound) +} + +// IsInvalidError returns true if the unwrapped error is ErrInvalid +func IsInvalidError(err error) bool { + return errors.Is(err, ErrInvalid) +} + +// IsUnsupportedError returns true if the unwrapped error is ErrUnsupported +func IsUnsupportedError(err error) bool { + return errors.Is(err, ErrUnsupported) +} + +// IsUnsupportedError returns true if the unwrapped error is ErrIncompatible +func IsIncompatibleError(err error) bool { + return errors.Is(err, ErrIncompatible) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/format/volume.go b/vendor/github.com/compose-spec/compose-go/v2/format/volume.go new file mode 100644 index 000000000000..0083a62cb2be --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/format/volume.go @@ -0,0 +1,184 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package format + +import ( + "errors" + "fmt" + "strings" + "unicode" + "unicode/utf8" + + "github.com/compose-spec/compose-go/v2/types" +) + +const endOfSpec = rune(0) + +// ParseVolume parses a volume spec without any knowledge of the target platform +func ParseVolume(spec string) (types.ServiceVolumeConfig, error) { + volume := types.ServiceVolumeConfig{} + + switch len(spec) { + case 0: + return volume, errors.New("invalid empty volume spec") + case 1, 2: + volume.Target = spec + volume.Type = types.VolumeTypeVolume + return volume, nil + } + + var buffer []rune + for _, char := range spec + string(endOfSpec) { + switch { + case isWindowsDrive(buffer, char): + buffer = append(buffer, char) + case char == ':' || char == endOfSpec: + if err := populateFieldFromBuffer(char, buffer, &volume); err != nil { + populateType(&volume) + return volume, fmt.Errorf("invalid spec: %s: %w", spec, err) + } + buffer = nil + default: + buffer = append(buffer, char) + } + } + + populateType(&volume) + return volume, nil +} + +func isWindowsDrive(buffer []rune, char rune) bool { + return char == ':' && len(buffer) == 1 && unicode.IsLetter(buffer[0]) +} + +func populateFieldFromBuffer(char rune, buffer []rune, volume *types.ServiceVolumeConfig) error { + strBuffer := string(buffer) + switch { + case len(buffer) == 0: + return errors.New("empty section between colons") + // Anonymous volume + case volume.Source == "" && char == endOfSpec: + volume.Target = strBuffer + return nil + case volume.Source == "": + volume.Source = strBuffer + return nil + case volume.Target == "": + volume.Target = strBuffer + return nil + case char == ':': + return errors.New("too many colons") + } + for _, option := range strings.Split(strBuffer, ",") { + switch option { + case "ro": + volume.ReadOnly = true + case "rw": + volume.ReadOnly = false + case "nocopy": + volume.Volume = &types.ServiceVolumeVolume{NoCopy: true} + default: + if isBindOption(option) { + setBindOption(volume, option) + } + // ignore unknown options + } + } + return nil +} + +var Propagations = []string{ + types.PropagationRPrivate, + types.PropagationPrivate, + types.PropagationRShared, + types.PropagationShared, + types.PropagationRSlave, + types.PropagationSlave, +} + +type setBindOptionFunc func(bind *types.ServiceVolumeBind, option string) + +var bindOptions = map[string]setBindOptionFunc{ + types.PropagationRPrivate: setBindPropagation, + types.PropagationPrivate: setBindPropagation, + types.PropagationRShared: setBindPropagation, + types.PropagationShared: setBindPropagation, + types.PropagationRSlave: setBindPropagation, + types.PropagationSlave: setBindPropagation, + types.SELinuxShared: setBindSELinux, + types.SELinuxPrivate: setBindSELinux, +} + +func setBindPropagation(bind *types.ServiceVolumeBind, option string) { + bind.Propagation = option +} + +func setBindSELinux(bind *types.ServiceVolumeBind, option string) { + bind.SELinux = option +} + +func isBindOption(option string) bool { + _, ok := bindOptions[option] + + return ok +} + +func setBindOption(volume *types.ServiceVolumeConfig, option string) { + if volume.Bind == nil { + volume.Bind = &types.ServiceVolumeBind{} + } + + bindOptions[option](volume.Bind, option) +} + +func populateType(volume *types.ServiceVolumeConfig) { + if isFilePath(volume.Source) { + volume.Type = types.VolumeTypeBind + if volume.Bind == nil { + volume.Bind = &types.ServiceVolumeBind{} + } + // For backward compatibility with docker-compose legacy, using short notation involves + // bind will create missing host path + volume.Bind.CreateHostPath = true + } else { + volume.Type = types.VolumeTypeVolume + if volume.Volume == nil { + volume.Volume = &types.ServiceVolumeVolume{} + } + } +} + +func isFilePath(source string) bool { + if source == "" { + return false + } + switch source[0] { + case '.', '/', '~': + return true + } + + // windows named pipes + if strings.HasPrefix(source, `\\`) { + return true + } + + first, nextIndex := utf8.DecodeRuneInString(source) + if len(source) <= nextIndex { + return false + } + return isWindowsDrive([]rune{first}, rune(source[nextIndex])) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/graph/graph.go b/vendor/github.com/compose-spec/compose-go/v2/graph/graph.go new file mode 100644 index 000000000000..41123370def2 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/graph/graph.go @@ -0,0 +1,111 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package graph + +import ( + "fmt" + "strings" + + "github.com/compose-spec/compose-go/v2/utils" + "golang.org/x/exp/slices" +) + +// graph represents project as service dependencies +type graph[T any] struct { + vertices map[string]*vertex[T] +} + +// vertex represents a service in the dependencies structure +type vertex[T any] struct { + key string + service *T + children map[string]*vertex[T] + parents map[string]*vertex[T] +} + +func (g *graph[T]) addVertex(name string, service T) { + g.vertices[name] = &vertex[T]{ + key: name, + service: &service, + parents: map[string]*vertex[T]{}, + children: map[string]*vertex[T]{}, + } +} + +func (g *graph[T]) addEdge(src, dest string) { + g.vertices[src].children[dest] = g.vertices[dest] + g.vertices[dest].parents[src] = g.vertices[src] +} + +func (g *graph[T]) roots() []*vertex[T] { + var res []*vertex[T] + for _, v := range g.vertices { + if len(v.parents) == 0 { + res = append(res, v) + } + } + return res +} + +func (g *graph[T]) leaves() []*vertex[T] { + var res []*vertex[T] + for _, v := range g.vertices { + if len(v.children) == 0 { + res = append(res, v) + } + } + + return res +} + +func (g *graph[T]) checkCycle() error { + // iterate on vertices in a name-order to render a predicable error message + // this is required by tests and enforce command reproducibility by user, which otherwise could be confusing + names := utils.MapKeys(g.vertices) + for _, name := range names { + err := searchCycle([]string{name}, g.vertices[name]) + if err != nil { + return err + } + } + return nil +} + +func searchCycle[T any](path []string, v *vertex[T]) error { + names := utils.MapKeys(v.children) + for _, name := range names { + if i := slices.Index(path, name); i > 0 { + return fmt.Errorf("dependency cycle detected: %s", strings.Join(path[i:], " -> ")) + } + ch := v.children[name] + err := searchCycle(append(path, name), ch) + if err != nil { + return err + } + } + return nil +} + +// descendents return all descendents for a vertex, might contain duplicates +func (v *vertex[T]) descendents() []string { + var vx []string + for _, n := range v.children { + vx = append(vx, n.key) + vx = append(vx, n.descendents()...) + } + return vx +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/graph/services.go b/vendor/github.com/compose-spec/compose-go/v2/graph/services.go new file mode 100644 index 000000000000..44b36a3f317b --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/graph/services.go @@ -0,0 +1,80 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package graph + +import ( + "context" + "fmt" + + "github.com/compose-spec/compose-go/v2/types" +) + +// InDependencyOrder walk the service graph an invoke VisitorFn in respect to dependency order +func InDependencyOrder(ctx context.Context, project *types.Project, fn VisitorFn[types.ServiceConfig], options ...func(*Options)) error { + _, err := CollectInDependencyOrder[any](ctx, project, func(ctx context.Context, s string, config types.ServiceConfig) (any, error) { + return nil, fn(ctx, s, config) + }, options...) + return err +} + +// CollectInDependencyOrder walk the service graph an invoke CollectorFn in respect to dependency order, then return result for each call +func CollectInDependencyOrder[T any](ctx context.Context, project *types.Project, fn CollectorFn[types.ServiceConfig, T], options ...func(*Options)) (map[string]T, error) { + graph, err := newGraph(project) + if err != nil { + return nil, err + } + t := newTraversal(fn) + for _, option := range options { + option(t.Options) + } + err = walk(ctx, graph, t) + return t.results, err +} + +// newGraph creates a service graph from project +func newGraph(project *types.Project) (*graph[types.ServiceConfig], error) { + g := &graph[types.ServiceConfig]{ + vertices: map[string]*vertex[types.ServiceConfig]{}, + } + + for name, s := range project.Services { + g.addVertex(name, s) + } + + for name, s := range project.Services { + src := g.vertices[name] + for dep, condition := range s.DependsOn { + dest, ok := g.vertices[dep] + if !ok { + if condition.Required { + if ds, exists := project.DisabledServices[dep]; exists { + return nil, fmt.Errorf("service %q is required by %q but is disabled. Can be enabled by profiles %s", dep, name, ds.Profiles) + } + return nil, fmt.Errorf("service %q depends on unknown service %q", name, dep) + } + delete(s.DependsOn, name) + project.Services[name] = s + continue + } + src.children[dep] = dest + dest.parents[name] = src + } + } + + err := g.checkCycle() + return g, err +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/graph/traversal.go b/vendor/github.com/compose-spec/compose-go/v2/graph/traversal.go new file mode 100644 index 000000000000..de85d1fceca1 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/graph/traversal.go @@ -0,0 +1,211 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package graph + +import ( + "context" + "sync" + + "golang.org/x/exp/slices" + "golang.org/x/sync/errgroup" +) + +// CollectorFn executes on each graph vertex based on visit order and return associated value +type CollectorFn[S any, T any] func(context.Context, string, S) (T, error) + +// VisitorFn executes on each graph nodes based on visit order +type VisitorFn[S any] func(context.Context, string, S) error + +type traversal[S any, T any] struct { + *Options + visitor CollectorFn[S, T] + + mu sync.Mutex + status map[string]int + results map[string]T +} + +type Options struct { + // inverse reverse the traversal direction + inverse bool + // maxConcurrency limit the concurrent execution of visitorFn while walking the graph + maxConcurrency int + // after marks a set of node as starting points walking the graph + after []string +} + +const ( + vertexEntered = iota + vertexVisited +) + +func newTraversal[S, T any](fn CollectorFn[S, T]) *traversal[S, T] { + return &traversal[S, T]{ + Options: &Options{}, + status: map[string]int{}, + results: map[string]T{}, + visitor: fn, + } +} + +// WithMaxConcurrency configure traversal to limit concurrency walking graph nodes +func WithMaxConcurrency(max int) func(*Options) { + return func(o *Options) { + o.maxConcurrency = max + } +} + +// InReverseOrder configure traversal to walk the graph in reverse dependency order +func InReverseOrder(o *Options) { + o.inverse = true +} + +// WithRootNodesAndDown creates a graphTraversal to start from selected nodes +func WithRootNodesAndDown(nodes []string) func(*Options) { + return func(o *Options) { + o.after = nodes + } +} + +func walk[S, T any](ctx context.Context, g *graph[S], t *traversal[S, T]) error { + expect := len(g.vertices) + if expect == 0 { + return nil + } + // nodeCh need to allow n=expect writers while reader goroutine could have returned after ctx.Done + nodeCh := make(chan *vertex[S], expect) + defer close(nodeCh) + + eg, ctx := errgroup.WithContext(ctx) + if t.maxConcurrency > 0 { + eg.SetLimit(t.maxConcurrency + 1) + } + + eg.Go(func() error { + for { + select { + case <-ctx.Done(): + return nil + case node := <-nodeCh: + expect-- + if expect == 0 { + return nil + } + + for _, adj := range t.adjacentNodes(node) { + t.visit(ctx, eg, adj, nodeCh) + } + } + } + }) + + // select nodes to start walking the graph based on traversal.direction + for _, node := range t.extremityNodes(g) { + t.visit(ctx, eg, node, nodeCh) + } + + return eg.Wait() +} + +func (t *traversal[S, T]) visit(ctx context.Context, eg *errgroup.Group, node *vertex[S], nodeCh chan *vertex[S]) { + if !t.ready(node) { + // don't visit this service yet as dependencies haven't been visited + return + } + if !t.enter(node) { + // another worker already acquired this node + return + } + eg.Go(func() error { + var ( + err error + result T + ) + if !t.skip(node) { + result, err = t.visitor(ctx, node.key, *node.service) + } + t.done(node, result) + nodeCh <- node + return err + }) +} + +func (t *traversal[S, T]) extremityNodes(g *graph[S]) []*vertex[S] { + if t.inverse { + return g.roots() + } + return g.leaves() +} + +func (t *traversal[S, T]) adjacentNodes(v *vertex[S]) map[string]*vertex[S] { + if t.inverse { + return v.children + } + return v.parents +} + +func (t *traversal[S, T]) ready(v *vertex[S]) bool { + t.mu.Lock() + defer t.mu.Unlock() + + depends := v.children + if t.inverse { + depends = v.parents + } + for name := range depends { + if t.status[name] != vertexVisited { + return false + } + } + return true +} + +func (t *traversal[S, T]) enter(v *vertex[S]) bool { + t.mu.Lock() + defer t.mu.Unlock() + + if _, ok := t.status[v.key]; ok { + return false + } + t.status[v.key] = vertexEntered + return true +} + +func (t *traversal[S, T]) done(v *vertex[S], result T) { + t.mu.Lock() + defer t.mu.Unlock() + t.status[v.key] = vertexVisited + t.results[v.key] = result +} + +func (t *traversal[S, T]) skip(node *vertex[S]) bool { + if len(t.after) == 0 { + return false + } + if slices.Contains(t.after, node.key) { + return false + } + + // is none of our starting node is a descendent, skip visit + ancestors := node.descendents() + for _, name := range t.after { + if slices.Contains(ancestors, name) { + return false + } + } + return true +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/interpolation/interpolation.go b/vendor/github.com/compose-spec/compose-go/v2/interpolation/interpolation.go new file mode 100644 index 000000000000..b56e0afeb987 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/interpolation/interpolation.go @@ -0,0 +1,137 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package interpolation + +import ( + "errors" + "fmt" + "os" + + "github.com/compose-spec/compose-go/v2/template" + "github.com/compose-spec/compose-go/v2/tree" +) + +// Options supported by Interpolate +type Options struct { + // LookupValue from a key + LookupValue LookupValue + // TypeCastMapping maps key paths to functions to cast to a type + TypeCastMapping map[tree.Path]Cast + // Substitution function to use + Substitute func(string, template.Mapping) (string, error) +} + +// LookupValue is a function which maps from variable names to values. +// Returns the value as a string and a bool indicating whether +// the value is present, to distinguish between an empty string +// and the absence of a value. +type LookupValue func(key string) (string, bool) + +// Cast a value to a new type, or return an error if the value can't be cast +type Cast func(value string) (interface{}, error) + +// Interpolate replaces variables in a string with the values from a mapping +func Interpolate(config map[string]interface{}, opts Options) (map[string]interface{}, error) { + if opts.LookupValue == nil { + opts.LookupValue = os.LookupEnv + } + if opts.TypeCastMapping == nil { + opts.TypeCastMapping = make(map[tree.Path]Cast) + } + if opts.Substitute == nil { + opts.Substitute = template.Substitute + } + + out := map[string]interface{}{} + + for key, value := range config { + interpolatedValue, err := recursiveInterpolate(value, tree.NewPath(key), opts) + if err != nil { + return out, err + } + out[key] = interpolatedValue + } + + return out, nil +} + +func recursiveInterpolate(value interface{}, path tree.Path, opts Options) (interface{}, error) { + switch value := value.(type) { + case string: + newValue, err := opts.Substitute(value, template.Mapping(opts.LookupValue)) + if err != nil { + return value, newPathError(path, err) + } + caster, ok := opts.getCasterForPath(path) + if !ok { + return newValue, nil + } + casted, err := caster(newValue) + if err != nil { + return casted, newPathError(path, fmt.Errorf("failed to cast to expected type: %w", err)) + } + return casted, nil + + case map[string]interface{}: + out := map[string]interface{}{} + for key, elem := range value { + interpolatedElem, err := recursiveInterpolate(elem, path.Next(key), opts) + if err != nil { + return nil, err + } + out[key] = interpolatedElem + } + return out, nil + + case []interface{}: + out := make([]interface{}, len(value)) + for i, elem := range value { + interpolatedElem, err := recursiveInterpolate(elem, path.Next(tree.PathMatchList), opts) + if err != nil { + return nil, err + } + out[i] = interpolatedElem + } + return out, nil + + default: + return value, nil + } +} + +func newPathError(path tree.Path, err error) error { + var ite *template.InvalidTemplateError + switch { + case err == nil: + return nil + case errors.As(err, &ite): + return fmt.Errorf( + "invalid interpolation format for %s.\nYou may need to escape any $ with another $.\n%s", + path, ite.Template) + default: + return fmt.Errorf("error while interpolating %s: %w", path, err) + } +} + +func (o Options) getCasterForPath(path tree.Path) (Cast, bool) { + for pattern, caster := range o.TypeCastMapping { + if path.Matches(pattern) { + return caster, true + } + } + return nil, false +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/example1.env b/vendor/github.com/compose-spec/compose-go/v2/loader/example1.env new file mode 100644 index 000000000000..61716e93b577 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/example1.env @@ -0,0 +1,10 @@ +# passed through +FOO=foo_from_env_file +ENV.WITH.DOT=ok +ENV_WITH_UNDERSCORE=ok + +# overridden in example2.env +BAR=bar_from_env_file + +# overridden in full-example.yml +BAZ=baz_from_env_file diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/example2.env b/vendor/github.com/compose-spec/compose-go/v2/loader/example2.env new file mode 100644 index 000000000000..f47d1e614568 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/example2.env @@ -0,0 +1,4 @@ +BAR=bar_from_env_file_2 + +# overridden in configDetails.Environment +QUX=quz_from_env_file_2 diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go b/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go new file mode 100644 index 000000000000..07bf32ba74b2 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go @@ -0,0 +1,214 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + "github.com/compose-spec/compose-go/v2/consts" + "github.com/compose-spec/compose-go/v2/override" + "github.com/compose-spec/compose-go/v2/types" +) + +func ApplyExtends(ctx context.Context, dict map[string]any, opts *Options, tracker *cycleTracker, post ...PostProcessor) error { + a, ok := dict["services"] + if !ok { + return nil + } + services, ok := a.(map[string]any) + if !ok { + return fmt.Errorf("services must be a mapping") + } + for name := range services { + merged, err := applyServiceExtends(ctx, name, services, opts, tracker, post...) + if err != nil { + return err + } + services[name] = merged + } + dict["services"] = services + return nil +} + +func applyServiceExtends(ctx context.Context, name string, services map[string]any, opts *Options, tracker *cycleTracker, post ...PostProcessor) (any, error) { + s := services[name] + if s == nil { + return nil, nil + } + service, ok := s.(map[string]any) + if !ok { + return nil, fmt.Errorf("services.%s must be a mapping", name) + } + extends, ok := service["extends"] + if !ok { + return s, nil + } + filename := ctx.Value(consts.ComposeFileKey{}).(string) + var ( + err error + ref string + file any + ) + switch v := extends.(type) { + case map[string]any: + ref = v["service"].(string) + file = v["file"] + opts.ProcessEvent("extends", v) + case string: + ref = v + opts.ProcessEvent("extends", map[string]any{"service": ref}) + } + + var base any + if file != nil { + filename = file.(string) + services, err = getExtendsBaseFromFile(ctx, ref, filename, opts, tracker) + if err != nil { + return nil, err + } + } else { + _, ok := services[ref] + if !ok { + return nil, fmt.Errorf("cannot extend service %q in %s: service not found", name, filename) + } + } + + tracker, err = tracker.Add(filename, name) + if err != nil { + return nil, err + } + + // recursively apply `extends` + base, err = applyServiceExtends(ctx, ref, services, opts, tracker, post...) + if err != nil { + return nil, err + } + + if base == nil { + return service, nil + } + source := deepClone(base).(map[string]any) + + err = validateExtendSource(source, ref) + if err != nil { + return nil, err + } + + for _, processor := range post { + processor.Apply(map[string]any{ + "services": map[string]any{ + name: source, + }, + }) + } + merged, err := override.ExtendService(source, service) + if err != nil { + return nil, err + } + delete(merged, "extends") + services[name] = merged + return merged, nil +} + +// validateExtendSource check the source for `extends` doesn't refer to another container/service +func validateExtendSource(source map[string]any, ref string) error { + forbidden := []string{"links", "volumes_from", "depends_on"} + for _, key := range forbidden { + if _, ok := source[key]; ok { + return fmt.Errorf("service %q can't be used with `extends` as it declare `%s`", ref, key) + } + } + + sharedNamespace := []string{"network_mode", "ipc", "pid", "net", "cgroup", "userns_mode", "uts"} + for _, key := range sharedNamespace { + if v, ok := source[key]; ok { + val := v.(string) + if strings.HasPrefix(val, types.ContainerPrefix) { + return fmt.Errorf("service %q can't be used with `extends` as it shares `%s` with another container", ref, key) + } + if strings.HasPrefix(val, types.ServicePrefix) { + return fmt.Errorf("service %q can't be used with `extends` as it shares `%s` with another service", ref, key) + } + } + } + return nil +} + +func getExtendsBaseFromFile(ctx context.Context, name string, path string, opts *Options, ct *cycleTracker) (map[string]any, error) { + for _, loader := range opts.ResourceLoaders { + if !loader.Accept(path) { + continue + } + local, err := loader.Load(ctx, path) + if err != nil { + return nil, err + } + localdir := filepath.Dir(local) + relworkingdir := loader.Dir(path) + + extendsOpts := opts.clone() + // replace localResourceLoader with a new flavour, using extended file base path + extendsOpts.ResourceLoaders = append(opts.RemoteResourceLoaders(), localResourceLoader{ + WorkingDir: localdir, + }) + extendsOpts.ResolvePaths = true + extendsOpts.SkipNormalization = true + extendsOpts.SkipConsistencyCheck = true + extendsOpts.SkipInclude = true + extendsOpts.SkipExtends = true // we manage extends recursively based on raw service definition + extendsOpts.SkipValidation = true // we validate the merge result + extendsOpts.SkipDefaultValues = true + source, err := loadYamlModel(ctx, types.ConfigDetails{ + WorkingDir: relworkingdir, + ConfigFiles: []types.ConfigFile{ + {Filename: local}, + }, + }, extendsOpts, ct, nil) + if err != nil { + return nil, err + } + services := source["services"].(map[string]any) + _, ok := services[name] + if !ok { + return nil, fmt.Errorf("cannot extend service %q in %s: service not found", name, path) + } + return services, nil + } + return nil, fmt.Errorf("cannot read %s", path) +} + +func deepClone(value any) any { + switch v := value.(type) { + case []any: + cp := make([]any, len(v)) + for i, e := range v { + cp[i] = deepClone(e) + } + return cp + case map[string]any: + cp := make(map[string]any, len(v)) + for k, e := range v { + cp[k] = deepClone(e) + } + return cp + default: + return value + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/fix.go b/vendor/github.com/compose-spec/compose-go/v2/loader/fix.go new file mode 100644 index 000000000000..7a6e88d817ee --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/fix.go @@ -0,0 +1,36 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +// fixEmptyNotNull is a workaround for https://github.com/xeipuuv/gojsonschema/issues/141 +// as go-yaml `[]` will load as a `[]any(nil)`, which is not the same as an empty array +func fixEmptyNotNull(value any) interface{} { + switch v := value.(type) { + case []any: + if v == nil { + return []any{} + } + for i, e := range v { + v[i] = fixEmptyNotNull(e) + } + case map[string]any: + for k, e := range v { + v[k] = fixEmptyNotNull(e) + } + } + return value +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/full-example.yml b/vendor/github.com/compose-spec/compose-go/v2/loader/full-example.yml new file mode 100644 index 000000000000..25dfbce6438f --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/full-example.yml @@ -0,0 +1,455 @@ +name: full_example_project_name +services: + + bar: + build: + dockerfile_inline: | + FROM alpine + RUN echo "hello" > /world.txt + + foo: + annotations: + - com.example.foo=bar + build: + context: ./dir + dockerfile: Dockerfile + args: + foo: bar + ssh: + - default + target: foo + network: foo + cache_from: + - foo + - bar + labels: [FOO=BAR] + additional_contexts: + foo: ./bar + secrets: + - source: secret1 + target: /run/secrets/secret1 + - source: secret2 + target: my_secret + uid: '103' + gid: '103' + mode: 0440 + tags: + - foo:v1.0.0 + - docker.io/username/foo:my-other-tag + - ${COMPOSE_PROJECT_NAME}:1.0.0 + platforms: + - linux/amd64 + - linux/arm64 + + + cap_add: + - ALL + + cap_drop: + - NET_ADMIN + - SYS_ADMIN + + cgroup_parent: m-executor-abcd + + # String or list + command: bundle exec thin -p 3000 + # command: ["bundle", "exec", "thin", "-p", "3000"] + + configs: + - config1 + - source: config2 + target: /my_config + uid: '103' + gid: '103' + mode: 0440 + + container_name: my-web-container + + depends_on: + - db + - redis + + deploy: + mode: replicated + replicas: 6 + labels: [FOO=BAR] + rollback_config: + parallelism: 3 + delay: 10s + failure_action: continue + monitor: 60s + max_failure_ratio: 0.3 + order: start-first + update_config: + parallelism: 3 + delay: 10s + failure_action: continue + monitor: 60s + max_failure_ratio: 0.3 + order: start-first + resources: + limits: + cpus: '0.001' + memory: 50M + reservations: + cpus: '0.0001' + memory: 20M + generic_resources: + - discrete_resource_spec: + kind: 'gpu' + value: 2 + - discrete_resource_spec: + kind: 'ssd' + value: 1 + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + window: 120s + placement: + constraints: [node=foo] + max_replicas_per_node: 5 + preferences: + - spread: node.labels.az + endpoint_mode: dnsrr + + device_cgroup_rules: + - "c 1:3 mr" + - "a 7:* rmw" + + devices: + - "/dev/ttyUSB0:/dev/ttyUSB0" + + # String or list + # dns: 8.8.8.8 + dns: + - 8.8.8.8 + - 9.9.9.9 + + # String or list + # dns_search: example.com + dns_search: + - dc1.example.com + - dc2.example.com + + domainname: foo.com + + # String or list + # entrypoint: /code/entrypoint.sh -p 3000 + entrypoint: ["/code/entrypoint.sh", "-p", "3000"] + + # String or list + # env_file: .env + env_file: + - ./example1.env + - path: ./example2.env + required: false + + # Mapping or list + # Mapping values can be strings, numbers or null + # Booleans are not allowed - must be quoted + environment: + BAZ: baz_from_service_def + QUX: + # environment: + # - RACK_ENV=development + # - SHOW=true + # - SESSION_SECRET + + # Items can be strings or numbers + expose: + - "3000" + - 8000 + + external_links: + - redis_1 + - project_db_1:mysql + - project_db_1:postgresql + + # Mapping or list + # Mapping values must be strings + # extra_hosts: + # somehost: "162.242.195.82" + # otherhost: "50.31.209.229" + extra_hosts: + - "otherhost:50.31.209.229" + - "somehost:162.242.195.82" + + hostname: foo + + healthcheck: + test: echo "hello world" + interval: 10s + timeout: 1s + retries: 5 + start_period: 15s + start_interval: 5s + + # Any valid image reference - repo, tag, id, sha + image: redis + # image: ubuntu:14.04 + # image: tutum/influxdb + # image: example-registry.com:4000/postgresql + # image: a4bc65fd + # image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d + + ipc: host + + uts: host + + # Mapping or list + # Mapping values can be strings, numbers or null + labels: + com.example.description: "Accounting webapp" + com.example.number: 42 + com.example.empty-label: + # labels: + # - "com.example.description=Accounting webapp" + # - "com.example.number=42" + # - "com.example.empty-label" + + links: + - db + - db:database + - redis + + logging: + driver: syslog + options: + syslog-address: "tcp://192.168.0.42:123" + + mac_address: 02:42:ac:11:65:43 + + # network_mode: "bridge" + # network_mode: "host" + # network_mode: "none" + # Use the network mode of an arbitrary container from another service + # network_mode: "service:db" + # Use the network mode of another container, specified by name or id + # network_mode: "container:some-container" + network_mode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b" + + networks: + some-network: + aliases: + - alias1 + - alias3 + other-network: + ipv4_address: 172.16.238.10 + ipv6_address: 2001:3984:3989::10 + mac_address: 02:42:72:98:65:08 + other-other-network: + + pid: "host" + + ports: + - 3000 + - "3001-3005" + - "8000:8000" + - "9090-9091:8080-8081" + - "49100:22" + - "127.0.0.1:8001:8001" + - "127.0.0.1:5000-5010:5000-5010" + + privileged: true + + read_only: true + + restart: always + + secrets: + - source: secret1 + target: /run/secrets/secret1 + - source: secret2 + target: my_secret + uid: '103' + gid: '103' + mode: 0440 + + security_opt: + - label=level:s0:c100,c200 + - label=type:svirt_apache_t + + stdin_open: true + + stop_grace_period: 20s + + stop_signal: SIGUSR1 + storage_opt: + size: "20G" + sysctls: + net.core.somaxconn: 1024 + net.ipv4.tcp_syncookies: 0 + + # String or list + # tmpfs: /run + tmpfs: + - /run + - /tmp + + tty: true + + ulimits: + # Single number or mapping with soft + hard limits + nproc: 65535 + nofile: + soft: 20000 + hard: 40000 + + user: someone + + volumes: + # Just specify a path and let the Engine create a volume + - /var/lib/anonymous + # Specify an absolute path mapping + - /opt/data:/var/lib/data + # Path on the host, relative to the Compose file + - .:/code + - ./static:/var/www/html + # User-relative path + - ~/configs:/etc/configs:ro + # Named volume + - datavolume:/var/lib/volume + - type: bind + source: ./opt + target: /opt/cached + consistency: cached + - type: tmpfs + target: /opt/tmpfs + tmpfs: + size: 10000 + + working_dir: /code + x-bar: baz + x-foo: bar + +networks: + # Entries can be null, which specifies simply that a network + # called "{project name}_some-network" should be created and + # use the default driver + some-network: + + other-network: + driver: overlay + + driver_opts: + # Values can be strings or numbers + foo: "bar" + baz: 1 + + ipam: + driver: overlay + # driver_opts: + # # Values can be strings or numbers + # com.docker.network.enable_ipv6: "true" + # com.docker.network.numeric_value: 1 + config: + - subnet: 172.28.0.0/16 + ip_range: 172.28.5.0/24 + gateway: 172.28.5.254 + aux_addresses: + host1: 172.28.1.5 + host2: 172.28.1.6 + host3: 172.28.1.7 + - subnet: 2001:3984:3989::/64 + gateway: 2001:3984:3989::1 + + labels: + foo: bar + + external-network: + # Specifies that a pre-existing network called "external-network" + # can be referred to within this file as "external-network" + external: true + + other-external-network: + # Specifies that a pre-existing network called "my-cool-network" + # can be referred to within this file as "other-external-network" + external: + name: my-cool-network + x-bar: baz + x-foo: bar + +volumes: + # Entries can be null, which specifies simply that a volume + # called "{project name}_some-volume" should be created and + # use the default driver + some-volume: + + other-volume: + driver: flocker + + driver_opts: + # Values can be strings or numbers + foo: "bar" + baz: 1 + labels: + foo: bar + + another-volume: + name: "user_specified_name" + driver: vsphere + + driver_opts: + # Values can be strings or numbers + foo: "bar" + baz: 1 + + external-volume: + # Specifies that a pre-existing volume called "external-volume" + # can be referred to within this file as "external-volume" + external: true + + other-external-volume: + # Specifies that a pre-existing volume called "my-cool-volume" + # can be referred to within this file as "other-external-volume" + # This example uses the deprecated "volume.external.name" (replaced by "volume.name") + external: + name: my-cool-volume + + external-volume3: + # Specifies that a pre-existing volume called "this-is-volume3" + # can be referred to within this file as "external-volume3" + name: this-is-volume3 + external: true + x-bar: baz + x-foo: bar + +configs: + config1: + file: ./config_data + labels: + foo: bar + config2: + external: + name: my_config + config3: + external: true + config4: + name: foo + file: ~/config_data + x-bar: baz + x-foo: bar + +secrets: + secret1: + file: ./secret_data + labels: + foo: bar + secret2: + external: + name: my_secret + secret3: + external: true + secret4: + name: bar + environment: BAR + x-bar: baz + x-foo: bar + secret5: + file: /abs/secret_data +x-bar: baz +x-foo: bar +x-nested: + bar: baz + foo: bar diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/include.go b/vendor/github.com/compose-spec/compose-go/v2/loader/include.go new file mode 100644 index 000000000000..cc50734ded0c --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/include.go @@ -0,0 +1,173 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "context" + "fmt" + "os" + "path/filepath" + "reflect" + "strings" + + "github.com/compose-spec/compose-go/v2/dotenv" + interp "github.com/compose-spec/compose-go/v2/interpolation" + "github.com/compose-spec/compose-go/v2/types" +) + +// loadIncludeConfig parse the require config from raw yaml +func loadIncludeConfig(source any) ([]types.IncludeConfig, error) { + if source == nil { + return nil, nil + } + configs := source.([]any) + for i, config := range configs { + if v, ok := config.(string); ok { + configs[i] = map[string]any{ + "path": v, + } + } + } + var requires []types.IncludeConfig + err := Transform(source, &requires) + return requires, err +} + +func ApplyInclude(ctx context.Context, configDetails types.ConfigDetails, model map[string]any, options *Options, included []string) error { + includeConfig, err := loadIncludeConfig(model["include"]) + if err != nil { + return err + } + for _, r := range includeConfig { + for _, listener := range options.Listeners { + listener("include", map[string]any{ + "path": r.Path, + "workingdir": configDetails.WorkingDir, + }) + } + + for i, p := range r.Path { + for _, loader := range options.ResourceLoaders { + if loader.Accept(p) { + path, err := loader.Load(ctx, p) + if err != nil { + return err + } + p = path + break + } + } + r.Path[i] = absPath(configDetails.WorkingDir, p) + } + + mainFile := r.Path[0] + for _, f := range included { + if f == mainFile { + included = append(included, mainFile) + return fmt.Errorf("include cycle detected:\n%s\n include %s", included[0], strings.Join(included[1:], "\n include ")) + } + } + + if r.ProjectDirectory == "" { + r.ProjectDirectory = filepath.Dir(mainFile) + } + + loadOptions := options.clone() + loadOptions.ResolvePaths = true + loadOptions.SkipNormalization = true + loadOptions.SkipConsistencyCheck = true + loadOptions.ResourceLoaders = append(loadOptions.RemoteResourceLoaders(), localResourceLoader{ + WorkingDir: r.ProjectDirectory, + }) + + if len(r.EnvFile) == 0 { + f := filepath.Join(r.ProjectDirectory, ".env") + if s, err := os.Stat(f); err == nil && !s.IsDir() { + r.EnvFile = types.StringList{f} + } + } + + envFromFile, err := dotenv.GetEnvFromFile(configDetails.Environment, r.EnvFile) + if err != nil { + return err + } + + config := types.ConfigDetails{ + WorkingDir: r.ProjectDirectory, + ConfigFiles: types.ToConfigFiles(r.Path), + Environment: configDetails.Environment.Clone().Merge(envFromFile), + } + loadOptions.Interpolate = &interp.Options{ + Substitute: options.Interpolate.Substitute, + LookupValue: config.LookupEnv, + TypeCastMapping: options.Interpolate.TypeCastMapping, + } + imported, err := loadYamlModel(ctx, config, loadOptions, &cycleTracker{}, included) + if err != nil { + return err + } + err = importResources(imported, model) + if err != nil { + return err + } + } + delete(model, "include") + return nil +} + +// importResources import into model all resources defined by imported, and report error on conflict +func importResources(source map[string]any, target map[string]any) error { + if err := importResource(source, target, "services"); err != nil { + return err + } + if err := importResource(source, target, "volumes"); err != nil { + return err + } + if err := importResource(source, target, "networks"); err != nil { + return err + } + if err := importResource(source, target, "secrets"); err != nil { + return err + } + if err := importResource(source, target, "configs"); err != nil { + return err + } + return nil +} + +func importResource(source map[string]any, target map[string]any, key string) error { + from := source[key] + if from != nil { + var to map[string]any + if v, ok := target[key]; ok { + to = v.(map[string]any) + } else { + to = map[string]any{} + } + for name, a := range from.(map[string]any) { + if conflict, ok := to[name]; ok { + if reflect.DeepEqual(a, conflict) { + continue + } + return fmt.Errorf("%s.%s conflicts with imported resource", key, name) + } + to[name] = a + } + target[key] = to + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/interpolate.go b/vendor/github.com/compose-spec/compose-go/v2/loader/interpolate.go new file mode 100644 index 000000000000..a1cef1ecc1db --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/interpolate.go @@ -0,0 +1,117 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "fmt" + "strconv" + "strings" + + interp "github.com/compose-spec/compose-go/v2/interpolation" + "github.com/compose-spec/compose-go/v2/tree" + "github.com/sirupsen/logrus" +) + +var interpolateTypeCastMapping = map[tree.Path]interp.Cast{ + servicePath("configs", tree.PathMatchList, "mode"): toInt, + servicePath("cpu_count"): toInt64, + servicePath("cpu_percent"): toFloat, + servicePath("cpu_period"): toInt64, + servicePath("cpu_quota"): toInt64, + servicePath("cpu_rt_period"): toInt64, + servicePath("cpu_rt_runtime"): toInt64, + servicePath("cpus"): toFloat32, + servicePath("cpu_shares"): toInt64, + servicePath("init"): toBoolean, + servicePath("deploy", "replicas"): toInt, + servicePath("deploy", "update_config", "parallelism"): toInt, + servicePath("deploy", "update_config", "max_failure_ratio"): toFloat, + servicePath("deploy", "rollback_config", "parallelism"): toInt, + servicePath("deploy", "rollback_config", "max_failure_ratio"): toFloat, + servicePath("deploy", "restart_policy", "max_attempts"): toInt, + servicePath("deploy", "placement", "max_replicas_per_node"): toInt, + servicePath("healthcheck", "retries"): toInt, + servicePath("healthcheck", "disable"): toBoolean, + servicePath("oom_kill_disable"): toBoolean, + servicePath("oom_score_adj"): toInt64, + servicePath("pids_limit"): toInt64, + servicePath("ports", tree.PathMatchList, "target"): toInt, + servicePath("privileged"): toBoolean, + servicePath("read_only"): toBoolean, + servicePath("scale"): toInt, + servicePath("secrets", tree.PathMatchList, "mode"): toInt, + servicePath("stdin_open"): toBoolean, + servicePath("tty"): toBoolean, + servicePath("ulimits", tree.PathMatchAll): toInt, + servicePath("ulimits", tree.PathMatchAll, "hard"): toInt, + servicePath("ulimits", tree.PathMatchAll, "soft"): toInt, + servicePath("volumes", tree.PathMatchList, "read_only"): toBoolean, + servicePath("volumes", tree.PathMatchList, "volume", "nocopy"): toBoolean, + iPath("networks", tree.PathMatchAll, "external"): toBoolean, + iPath("networks", tree.PathMatchAll, "internal"): toBoolean, + iPath("networks", tree.PathMatchAll, "attachable"): toBoolean, + iPath("networks", tree.PathMatchAll, "enable_ipv6"): toBoolean, + iPath("volumes", tree.PathMatchAll, "external"): toBoolean, + iPath("secrets", tree.PathMatchAll, "external"): toBoolean, + iPath("configs", tree.PathMatchAll, "external"): toBoolean, +} + +func iPath(parts ...string) tree.Path { + return tree.NewPath(parts...) +} + +func servicePath(parts ...string) tree.Path { + return iPath(append([]string{"services", tree.PathMatchAll}, parts...)...) +} + +func toInt(value string) (interface{}, error) { + return strconv.Atoi(value) +} + +func toInt64(value string) (interface{}, error) { + return strconv.ParseInt(value, 10, 64) +} + +func toFloat(value string) (interface{}, error) { + return strconv.ParseFloat(value, 64) +} + +func toFloat32(value string) (interface{}, error) { + f, err := strconv.ParseFloat(value, 32) + if err != nil { + return nil, err + } + return float32(f), nil +} + +// should match http://yaml.org/type/bool.html +func toBoolean(value string) (interface{}, error) { + switch strings.ToLower(value) { + case "true": + return true, nil + case "false": + return false, nil + case "y", "yes", "on": + logrus.Warnf("%q for boolean is not supported by YAML 1.2, please use `true`", value) + return true, nil + case "n", "no", "off": + logrus.Warnf("%q for boolean is not supported by YAML 1.2, please use `false`", value) + return false, nil + default: + return nil, fmt.Errorf("invalid boolean: %s", value) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go b/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go new file mode 100644 index 000000000000..2201e975744c --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go @@ -0,0 +1,787 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/compose-spec/compose-go/v2/consts" + interp "github.com/compose-spec/compose-go/v2/interpolation" + "github.com/compose-spec/compose-go/v2/override" + "github.com/compose-spec/compose-go/v2/paths" + "github.com/compose-spec/compose-go/v2/schema" + "github.com/compose-spec/compose-go/v2/template" + "github.com/compose-spec/compose-go/v2/transform" + "github.com/compose-spec/compose-go/v2/tree" + "github.com/compose-spec/compose-go/v2/types" + "github.com/compose-spec/compose-go/v2/validation" + "github.com/mitchellh/mapstructure" + "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" +) + +// Options supported by Load +type Options struct { + // Skip schema validation + SkipValidation bool + // Skip interpolation + SkipInterpolation bool + // Skip normalization + SkipNormalization bool + // Resolve path + ResolvePaths bool + // Convert Windows path + ConvertWindowsPaths bool + // Skip consistency check + SkipConsistencyCheck bool + // Skip extends + SkipExtends bool + // SkipInclude will ignore `include` and only load model from file(s) set by ConfigDetails + SkipInclude bool + // SkipResolveEnvironment will ignore computing `environment` for services + SkipResolveEnvironment bool + // SkipDefaultValues will ignore missing required attributes + SkipDefaultValues bool + // Interpolation options + Interpolate *interp.Options + // Discard 'env_file' entries after resolving to 'environment' section + discardEnvFiles bool + // Set project projectName + projectName string + // Indicates when the projectName was imperatively set or guessed from path + projectNameImperativelySet bool + // Profiles set profiles to enable + Profiles []string + // ResourceLoaders manages support for remote resources + ResourceLoaders []ResourceLoader + // KnownExtensions manages x-* attribute we know and the corresponding go structs + KnownExtensions map[string]any + // Metada for telemetry + Listeners []Listener +} + +type Listener = func(event string, metadata map[string]any) + +// Invoke all listeners for an event +func (o *Options) ProcessEvent(event string, metadata map[string]any) { + for _, l := range o.Listeners { + l(event, metadata) + } +} + +// ResourceLoader is a plugable remote resource resolver +type ResourceLoader interface { + // Accept returns `true` is the resource reference matches ResourceLoader supported protocol(s) + Accept(path string) bool + // Load returns the path to a local copy of remote resource identified by `path`. + Load(ctx context.Context, path string) (string, error) + // Dir computes path to resource"s parent folder, made relative if possible + Dir(path string) string +} + +// RemoteResourceLoaders excludes localResourceLoader from ResourceLoaders +func (o Options) RemoteResourceLoaders() []ResourceLoader { + var loaders []ResourceLoader + for i, loader := range o.ResourceLoaders { + if _, ok := loader.(localResourceLoader); ok { + if i != len(o.ResourceLoaders)-1 { + logrus.Warning("misconfiguration of ResourceLoaders: localResourceLoader should be last") + } + continue + } + loaders = append(loaders, loader) + } + return loaders +} + +type localResourceLoader struct { + WorkingDir string +} + +func (l localResourceLoader) abs(p string) string { + if filepath.IsAbs(p) { + return p + } + return filepath.Join(l.WorkingDir, p) +} + +func (l localResourceLoader) Accept(p string) bool { + _, err := os.Stat(l.abs(p)) + return err == nil +} + +func (l localResourceLoader) Load(_ context.Context, p string) (string, error) { + return l.abs(p), nil +} + +func (l localResourceLoader) Dir(path string) string { + path = l.abs(filepath.Dir(path)) + rel, err := filepath.Rel(l.WorkingDir, path) + if err != nil { + return path + } + return rel +} + +func (o *Options) clone() *Options { + return &Options{ + SkipValidation: o.SkipValidation, + SkipInterpolation: o.SkipInterpolation, + SkipNormalization: o.SkipNormalization, + ResolvePaths: o.ResolvePaths, + ConvertWindowsPaths: o.ConvertWindowsPaths, + SkipConsistencyCheck: o.SkipConsistencyCheck, + SkipExtends: o.SkipExtends, + SkipInclude: o.SkipInclude, + Interpolate: o.Interpolate, + discardEnvFiles: o.discardEnvFiles, + projectName: o.projectName, + projectNameImperativelySet: o.projectNameImperativelySet, + Profiles: o.Profiles, + ResourceLoaders: o.ResourceLoaders, + KnownExtensions: o.KnownExtensions, + Listeners: o.Listeners, + } +} + +func (o *Options) SetProjectName(name string, imperativelySet bool) { + o.projectName = name + o.projectNameImperativelySet = imperativelySet +} + +func (o Options) GetProjectName() (string, bool) { + return o.projectName, o.projectNameImperativelySet +} + +// serviceRef identifies a reference to a service. It's used to detect cyclic +// references in "extends". +type serviceRef struct { + filename string + service string +} + +type cycleTracker struct { + loaded []serviceRef +} + +func (ct *cycleTracker) Add(filename, service string) (*cycleTracker, error) { + toAdd := serviceRef{filename: filename, service: service} + for _, loaded := range ct.loaded { + if toAdd == loaded { + // Create an error message of the form: + // Circular reference: + // service-a in docker-compose.yml + // extends service-b in docker-compose.yml + // extends service-a in docker-compose.yml + errLines := []string{ + "Circular reference:", + fmt.Sprintf(" %s in %s", ct.loaded[0].service, ct.loaded[0].filename), + } + for _, service := range append(ct.loaded[1:], toAdd) { + errLines = append(errLines, fmt.Sprintf(" extends %s in %s", service.service, service.filename)) + } + + return nil, errors.New(strings.Join(errLines, "\n")) + } + } + + var branch []serviceRef + branch = append(branch, ct.loaded...) + branch = append(branch, toAdd) + return &cycleTracker{ + loaded: branch, + }, nil +} + +// WithDiscardEnvFiles sets the Options to discard the `env_file` section after resolving to +// the `environment` section +func WithDiscardEnvFiles(opts *Options) { + opts.discardEnvFiles = true +} + +// WithSkipValidation sets the Options to skip validation when loading sections +func WithSkipValidation(opts *Options) { + opts.SkipValidation = true +} + +// WithProfiles sets profiles to be activated +func WithProfiles(profiles []string) func(*Options) { + return func(opts *Options) { + opts.Profiles = profiles + } +} + +// ParseYAML reads the bytes from a file, parses the bytes into a mapping +// structure, and returns it. +func ParseYAML(source []byte) (map[string]interface{}, error) { + r := bytes.NewReader(source) + decoder := yaml.NewDecoder(r) + m, _, err := parseYAML(decoder) + return m, err +} + +// PostProcessor is used to tweak compose model based on metadata extracted during yaml Unmarshal phase +// that hardly can be implemented using go-yaml and mapstructure +type PostProcessor interface { + yaml.Unmarshaler + + // Apply changes to compose model based on recorder metadata + Apply(interface{}) error +} + +func parseYAML(decoder *yaml.Decoder) (map[string]interface{}, PostProcessor, error) { + var cfg interface{} + processor := ResetProcessor{target: &cfg} + + if err := decoder.Decode(&processor); err != nil { + return nil, nil, err + } + stringMap, ok := cfg.(map[string]interface{}) + if ok { + converted, err := convertToStringKeysRecursive(stringMap, "") + if err != nil { + return nil, nil, err + } + return converted.(map[string]interface{}), &processor, nil + } + cfgMap, ok := cfg.(map[interface{}]interface{}) + if !ok { + return nil, nil, errors.New("Top-level object must be a mapping") + } + converted, err := convertToStringKeysRecursive(cfgMap, "") + if err != nil { + return nil, nil, err + } + return converted.(map[string]interface{}), &processor, nil +} + +// Load reads a ConfigDetails and returns a fully loaded configuration. +// Deprecated: use LoadWithContext. +func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.Project, error) { + return LoadWithContext(context.Background(), configDetails, options...) +} + +// LoadWithContext reads a ConfigDetails and returns a fully loaded configuration +func LoadWithContext(ctx context.Context, configDetails types.ConfigDetails, options ...func(*Options)) (*types.Project, error) { + if len(configDetails.ConfigFiles) < 1 { + return nil, errors.New("No files specified") + } + + opts := &Options{ + Interpolate: &interp.Options{ + Substitute: template.Substitute, + LookupValue: configDetails.LookupEnv, + TypeCastMapping: interpolateTypeCastMapping, + }, + ResolvePaths: true, + } + + for _, op := range options { + op(opts) + } + opts.ResourceLoaders = append(opts.ResourceLoaders, localResourceLoader{configDetails.WorkingDir}) + + projectName, err := projectName(configDetails, opts) + if err != nil { + return nil, err + } + opts.projectName = projectName + + // TODO(milas): this should probably ALWAYS set (overriding any existing) + if _, ok := configDetails.Environment[consts.ComposeProjectName]; !ok && projectName != "" { + if configDetails.Environment == nil { + configDetails.Environment = map[string]string{} + } + configDetails.Environment[consts.ComposeProjectName] = projectName + } + + return load(ctx, configDetails, opts, nil) +} + +func loadYamlModel(ctx context.Context, config types.ConfigDetails, opts *Options, ct *cycleTracker, included []string) (map[string]interface{}, error) { + var ( + dict = map[string]interface{}{} + err error + ) + for _, file := range config.ConfigFiles { + fctx := context.WithValue(ctx, consts.ComposeFileKey{}, file.Filename) + if len(file.Content) == 0 && file.Config == nil { + content, err := os.ReadFile(file.Filename) + if err != nil { + return nil, err + } + file.Content = content + } + + processRawYaml := func(raw interface{}, processors ...PostProcessor) error { + converted, err := convertToStringKeysRecursive(raw, "") + if err != nil { + return err + } + cfg, ok := converted.(map[string]interface{}) + if !ok { + return errors.New("Top-level object must be a mapping") + } + + if opts.Interpolate != nil && !opts.SkipInterpolation { + cfg, err = interp.Interpolate(cfg, *opts.Interpolate) + if err != nil { + return err + } + } + + fixEmptyNotNull(cfg) + + if !opts.SkipExtends { + err = ApplyExtends(fctx, cfg, opts, ct, processors...) + if err != nil { + return err + } + } + + for _, processor := range processors { + if err := processor.Apply(dict); err != nil { + return err + } + } + + if !opts.SkipInclude { + included = append(included, config.ConfigFiles[0].Filename) + err = ApplyInclude(ctx, config, cfg, opts, included) + if err != nil { + return err + } + } + + dict, err = override.Merge(dict, cfg) + if err != nil { + return err + } + + dict, err = override.EnforceUnicity(dict) + if err != nil { + return err + } + + if !opts.SkipValidation { + if err := schema.Validate(dict); err != nil { + return fmt.Errorf("validating %s: %w", file.Filename, err) + } + } + + return err + } + + if file.Config == nil { + r := bytes.NewReader(file.Content) + decoder := yaml.NewDecoder(r) + for { + var raw interface{} + processor := &ResetProcessor{target: &raw} + err := decoder.Decode(processor) + if err != nil && errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, err + } + if err := processRawYaml(raw, processor); err != nil { + return nil, err + } + } + } else { + if err := processRawYaml(file.Config); err != nil { + return nil, err + } + } + } + + dict, err = transform.Canonical(dict) + if err != nil { + return nil, err + } + + // Canonical transformation can reveal duplicates, typically as ports can be a range and conflict with an override + dict, err = override.EnforceUnicity(dict) + if err != nil { + return nil, err + } + + if !opts.SkipDefaultValues { + dict, err = transform.SetDefaultValues(dict) + if err != nil { + return nil, err + } + } + + if !opts.SkipValidation { + if err := validation.Validate(dict); err != nil { + return nil, err + } + } + + if opts.ResolvePaths { + var remotes []paths.RemoteResource + for _, loader := range opts.RemoteResourceLoaders() { + remotes = append(remotes, loader.Accept) + } + err = paths.ResolveRelativePaths(dict, config.WorkingDir, remotes) + if err != nil { + return nil, err + } + } + + return dict, nil +} + +func load(ctx context.Context, configDetails types.ConfigDetails, opts *Options, loaded []string) (*types.Project, error) { + mainFile := configDetails.ConfigFiles[0].Filename + for _, f := range loaded { + if f == mainFile { + loaded = append(loaded, mainFile) + return nil, fmt.Errorf("include cycle detected:\n%s\n include %s", loaded[0], strings.Join(loaded[1:], "\n include ")) + } + } + loaded = append(loaded, mainFile) + + dict, err := loadYamlModel(ctx, configDetails, opts, &cycleTracker{}, nil) + if err != nil { + return nil, err + } + + if len(dict) == 0 { + return nil, errors.New("empty compose file") + } + + project := &types.Project{ + Name: opts.projectName, + WorkingDir: configDetails.WorkingDir, + Environment: configDetails.Environment, + } + delete(dict, "name") // project name set by yaml must be identified by caller as opts.projectName + + dict, err = processExtensions(dict, tree.NewPath(), opts.KnownExtensions) + if err != nil { + return nil, err + } + + err = Transform(dict, project) + if err != nil { + return nil, err + } + + if !opts.SkipNormalization { + err := Normalize(project) + if err != nil { + return nil, err + } + } + + if opts.ConvertWindowsPaths { + for i, service := range project.Services { + for j, volume := range service.Volumes { + service.Volumes[j] = convertVolumePath(volume) + } + project.Services[i] = service + } + } + + if !opts.SkipConsistencyCheck { + err := checkConsistency(project) + if err != nil { + return nil, err + } + } + + if project, err = project.WithProfiles(opts.Profiles); err != nil { + return nil, err + } + + if !opts.SkipResolveEnvironment { + project, err = project.WithServicesEnvironmentResolved(opts.discardEnvFiles) + if err != nil { + return nil, err + } + } + + return project, nil +} + +func InvalidProjectNameErr(v string) error { + return fmt.Errorf( + "invalid project name %q: must consist only of lowercase alphanumeric characters, hyphens, and underscores as well as start with a letter or number", + v, + ) +} + +// projectName determines the canonical name to use for the project considering +// the loader Options as well as `name` fields in Compose YAML fields (which +// also support interpolation). +// +// TODO(milas): restructure loading so that we don't need to re-parse the YAML +// here, as it's both wasteful and makes this code error-prone. +func projectName(details types.ConfigDetails, opts *Options) (string, error) { + projectName, projectNameImperativelySet := opts.GetProjectName() + + // if user did NOT provide a name explicitly, then see if one is defined + // in any of the config files + if !projectNameImperativelySet { + var pjNameFromConfigFile string + for _, configFile := range details.ConfigFiles { + content := configFile.Content + if content == nil { + // This can be hit when Filename is set but Content is not. One + // example is when using ToConfigFiles(). + d, err := os.ReadFile(configFile.Filename) + if err != nil { + return "", fmt.Errorf("failed to read file %q: %w", configFile.Filename, err) + } + content = d + } + yml, err := ParseYAML(content) + if err != nil { + // HACK: the way that loading is currently structured, this is + // a duplicative parse just for the `name`. if it fails, we + // give up but don't return the error, knowing that it'll get + // caught downstream for us + return "", nil + } + if val, ok := yml["name"]; ok && val != "" { + sVal, ok := val.(string) + if !ok { + // HACK: see above - this is a temporary parsed version + // that hasn't been schema-validated, but we don't want + // to be the ones to actually report that, so give up, + // knowing that it'll get caught downstream for us + return "", nil + } + pjNameFromConfigFile = sVal + } + } + if !opts.SkipInterpolation { + interpolated, err := interp.Interpolate( + map[string]interface{}{"name": pjNameFromConfigFile}, + *opts.Interpolate, + ) + if err != nil { + return "", err + } + pjNameFromConfigFile = interpolated["name"].(string) + } + pjNameFromConfigFile = NormalizeProjectName(pjNameFromConfigFile) + if pjNameFromConfigFile != "" { + projectName = pjNameFromConfigFile + } + } + + if projectName == "" { + return "", errors.New("project name must not be empty") + } + + if NormalizeProjectName(projectName) != projectName { + return "", InvalidProjectNameErr(projectName) + } + + return projectName, nil +} + +func NormalizeProjectName(s string) string { + r := regexp.MustCompile("[a-z0-9_-]") + s = strings.ToLower(s) + s = strings.Join(r.FindAllString(s, -1), "") + return strings.TrimLeft(s, "_-") +} + +var userDefinedKeys = []tree.Path{ + "services", + "volumes", + "networks", + "secrets", + "configs", +} + +func processExtensions(dict map[string]any, p tree.Path, extensions map[string]any) (map[string]interface{}, error) { + extras := map[string]any{} + var err error + for key, value := range dict { + skip := false + for _, uk := range userDefinedKeys { + if uk.Matches(p) { + skip = true + break + } + } + if !skip && strings.HasPrefix(key, "x-") { + extras[key] = value + delete(dict, key) + continue + } + switch v := value.(type) { + case map[string]interface{}: + dict[key], err = processExtensions(v, p.Next(key), extensions) + if err != nil { + return nil, err + } + case []interface{}: + for i, e := range v { + if m, ok := e.(map[string]interface{}); ok { + v[i], err = processExtensions(m, p.Next(strconv.Itoa(i)), extensions) + if err != nil { + return nil, err + } + } + } + } + } + for name, val := range extras { + if typ, ok := extensions[name]; ok { + target := reflect.New(reflect.TypeOf(typ)).Elem().Interface() + err = Transform(val, &target) + if err != nil { + return nil, err + } + extras[name] = target + } + } + if len(extras) > 0 { + dict[consts.Extensions] = extras + } + return dict, nil +} + +// Transform converts the source into the target struct with compose types transformer +// and the specified transformers if any. +func Transform(source interface{}, target interface{}) error { + data := mapstructure.Metadata{} + config := &mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + nameServices, + decoderHook, + cast), + Result: target, + TagName: "yaml", + Metadata: &data, + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + return decoder.Decode(source) +} + +// nameServices create implicit `name` key for convenience accessing service +func nameServices(from reflect.Value, to reflect.Value) (interface{}, error) { + if to.Type() == reflect.TypeOf(types.Services{}) { + nameK := reflect.ValueOf("name") + iter := from.MapRange() + for iter.Next() { + name := iter.Key() + elem := iter.Value() + elem.Elem().SetMapIndex(nameK, name) + } + } + return from.Interface(), nil +} + +// keys need to be converted to strings for jsonschema +func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) { + if mapping, ok := value.(map[string]interface{}); ok { + for key, entry := range mapping { + var newKeyPrefix string + if keyPrefix == "" { + newKeyPrefix = key + } else { + newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, key) + } + convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) + if err != nil { + return nil, err + } + mapping[key] = convertedEntry + } + return mapping, nil + } + if mapping, ok := value.(map[interface{}]interface{}); ok { + dict := make(map[string]interface{}) + for key, entry := range mapping { + str, ok := key.(string) + if !ok { + return nil, formatInvalidKeyError(keyPrefix, key) + } + var newKeyPrefix string + if keyPrefix == "" { + newKeyPrefix = str + } else { + newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str) + } + convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) + if err != nil { + return nil, err + } + dict[str] = convertedEntry + } + return dict, nil + } + if list, ok := value.([]interface{}); ok { + var convertedList []interface{} + for index, entry := range list { + newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index) + convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) + if err != nil { + return nil, err + } + convertedList = append(convertedList, convertedEntry) + } + return convertedList, nil + } + return value, nil +} + +func formatInvalidKeyError(keyPrefix string, key interface{}) error { + var location string + if keyPrefix == "" { + location = "at top level" + } else { + location = fmt.Sprintf("in %s", keyPrefix) + } + return fmt.Errorf("Non-string key %s: %#v", location, key) +} + +// Windows path, c:\\my\\path\\shiny, need to be changed to be compatible with +// the Engine. Volume path are expected to be linux style /c/my/path/shiny/ +func convertVolumePath(volume types.ServiceVolumeConfig) types.ServiceVolumeConfig { + volumeName := strings.ToLower(filepath.VolumeName(volume.Source)) + if len(volumeName) != 2 { + return volume + } + + convertedSource := fmt.Sprintf("/%c%s", volumeName[0], volume.Source[len(volumeName):]) + convertedSource = strings.ReplaceAll(convertedSource, "\\", "/") + + volume.Source = convertedSource + return volume +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/mapstructure.go b/vendor/github.com/compose-spec/compose-go/v2/loader/mapstructure.go new file mode 100644 index 000000000000..e5b902ab2918 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/mapstructure.go @@ -0,0 +1,79 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "reflect" + "strconv" +) + +// comparable to yaml.Unmarshaler, decoder allow a type to define it's own custom logic to convert value +// see https://github.com/mitchellh/mapstructure/pull/294 +type decoder interface { + DecodeMapstructure(interface{}) error +} + +// see https://github.com/mitchellh/mapstructure/issues/115#issuecomment-735287466 +// adapted to support types derived from built-in types, as DecodeMapstructure would not be able to mutate internal +// value, so need to invoke DecodeMapstructure defined by pointer to type +func decoderHook(from reflect.Value, to reflect.Value) (interface{}, error) { + // If the destination implements the decoder interface + u, ok := to.Interface().(decoder) + if !ok { + // for non-struct types we need to invoke func (*type) DecodeMapstructure() + if to.CanAddr() { + pto := to.Addr() + u, ok = pto.Interface().(decoder) + } + if !ok { + return from.Interface(), nil + } + } + // If it is nil and a pointer, create and assign the target value first + if to.Type().Kind() == reflect.Ptr && to.IsNil() { + to.Set(reflect.New(to.Type().Elem())) + u = to.Interface().(decoder) + } + // Call the custom DecodeMapstructure method + if err := u.DecodeMapstructure(from.Interface()); err != nil { + return to.Interface(), err + } + return to.Interface(), nil +} + +func cast(from reflect.Value, to reflect.Value) (interface{}, error) { + switch from.Type().Kind() { + case reflect.String: + switch to.Kind() { + case reflect.Bool: + return toBoolean(from.String()) + case reflect.Int: + return toInt(from.String()) + case reflect.Int64: + return toInt64(from.String()) + case reflect.Float32: + return toFloat32(from.String()) + case reflect.Float64: + return toFloat(from.String()) + } + case reflect.Int: + if to.Kind() == reflect.String { + return strconv.FormatInt(from.Int(), 10), nil + } + } + return from.Interface(), nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/normalize.go b/vendor/github.com/compose-spec/compose-go/v2/loader/normalize.go new file mode 100644 index 000000000000..e1f0beebfa03 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/normalize.go @@ -0,0 +1,283 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "fmt" + "strings" + + "github.com/compose-spec/compose-go/v2/errdefs" + "github.com/compose-spec/compose-go/v2/types" + "github.com/sirupsen/logrus" +) + +// Normalize compose project by moving deprecated attributes to their canonical position and injecting implicit defaults +func Normalize(project *types.Project) error { + if project.Networks == nil { + project.Networks = make(map[string]types.NetworkConfig) + } + + // If not declared explicitly, Compose model involves an implicit "default" network + if _, ok := project.Networks["default"]; !ok { + project.Networks["default"] = types.NetworkConfig{} + } + + for name, s := range project.Services { + if len(s.Networks) == 0 && s.NetworkMode == "" { + // Service without explicit network attachment are implicitly exposed on default network + s.Networks = map[string]*types.ServiceNetworkConfig{"default": nil} + } + + if s.PullPolicy == types.PullPolicyIfNotPresent { + s.PullPolicy = types.PullPolicyMissing + } + + fn := func(s string) (string, bool) { + v, ok := project.Environment[s] + return v, ok + } + + if s.Build != nil { + if s.Build.Context == "" { + s.Build.Context = "." + } + if s.Build.Dockerfile == "" && s.Build.DockerfileInline == "" { + s.Build.Dockerfile = "Dockerfile" + } + s.Build.Args = s.Build.Args.Resolve(fn) + } + s.Environment = s.Environment.Resolve(fn) + + for _, link := range s.Links { + parts := strings.Split(link, ":") + if len(parts) == 2 { + link = parts[0] + } + s.DependsOn = setIfMissing(s.DependsOn, link, types.ServiceDependency{ + Condition: types.ServiceConditionStarted, + Restart: true, + Required: true, + }) + } + + for _, namespace := range []string{s.NetworkMode, s.Ipc, s.Pid, s.Uts, s.Cgroup} { + if strings.HasPrefix(namespace, types.ServicePrefix) { + name := namespace[len(types.ServicePrefix):] + s.DependsOn = setIfMissing(s.DependsOn, name, types.ServiceDependency{ + Condition: types.ServiceConditionStarted, + Restart: true, + Required: true, + }) + } + } + + for _, vol := range s.VolumesFrom { + if !strings.HasPrefix(vol, types.ContainerPrefix) { + spec := strings.Split(vol, ":") + s.DependsOn = setIfMissing(s.DependsOn, spec[0], types.ServiceDependency{ + Condition: types.ServiceConditionStarted, + Restart: false, + Required: true, + }) + } + } + + err := relocateLogDriver(&s) + if err != nil { + return err + } + + err = relocateLogOpt(&s) + if err != nil { + return err + } + + err = relocateDockerfile(&s) + if err != nil { + return err + } + + inferImplicitDependencies(&s) + + project.Services[name] = s + } + + setNameFromKey(project) + + return nil +} + +// IsServiceDependency check the relation set by ref refers to a service +func IsServiceDependency(ref string) (string, bool) { + if strings.HasPrefix( + ref, + types.ServicePrefix, + ) { + return ref[len(types.ServicePrefix):], true + } + return "", false +} + +func inferImplicitDependencies(service *types.ServiceConfig) { + var dependencies []string + + maybeReferences := []string{ + service.NetworkMode, + service.Ipc, + service.Pid, + service.Uts, + service.Cgroup, + } + for _, ref := range maybeReferences { + if dep, ok := IsServiceDependency(ref); ok { + dependencies = append(dependencies, dep) + } + } + + for _, vol := range service.VolumesFrom { + spec := strings.Split(vol, ":") + if len(spec) == 0 { + continue + } + if spec[0] == "container" { + continue + } + dependencies = append(dependencies, spec[0]) + } + + for _, link := range service.Links { + dependencies = append(dependencies, strings.Split(link, ":")[0]) + } + + if len(dependencies) > 0 && service.DependsOn == nil { + service.DependsOn = make(types.DependsOnConfig) + } + + for _, d := range dependencies { + if _, ok := service.DependsOn[d]; !ok { + service.DependsOn[d] = types.ServiceDependency{ + Condition: types.ServiceConditionStarted, + Required: true, + } + } + } +} + +// setIfMissing adds a ServiceDependency for service if not already defined +func setIfMissing(d types.DependsOnConfig, service string, dep types.ServiceDependency) types.DependsOnConfig { + if d == nil { + d = types.DependsOnConfig{} + } + if _, ok := d[service]; !ok { + d[service] = dep + } + return d +} + +// Resources with no explicit name are actually named by their key in map +func setNameFromKey(project *types.Project) { + for key, n := range project.Networks { + if n.Name == "" { + if n.External { + n.Name = key + } else { + n.Name = fmt.Sprintf("%s_%s", project.Name, key) + } + project.Networks[key] = n + } + } + + for key, v := range project.Volumes { + if v.Name == "" { + if v.External { + v.Name = key + } else { + v.Name = fmt.Sprintf("%s_%s", project.Name, key) + } + project.Volumes[key] = v + } + } + + for key, c := range project.Configs { + if c.Name == "" { + if c.External { + c.Name = key + } else { + c.Name = fmt.Sprintf("%s_%s", project.Name, key) + } + project.Configs[key] = c + } + } + + for key, s := range project.Secrets { + if s.Name == "" { + if s.External { + s.Name = key + } else { + s.Name = fmt.Sprintf("%s_%s", project.Name, key) + } + project.Secrets[key] = s + } + } +} + +func relocateLogOpt(s *types.ServiceConfig) error { + if len(s.LogOpt) != 0 { + logrus.Warn("`log_opts` is deprecated. Use the `logging` element") + if s.Logging == nil { + s.Logging = &types.LoggingConfig{} + } + for k, v := range s.LogOpt { + if _, ok := s.Logging.Options[k]; !ok { + s.Logging.Options[k] = v + } else { + return fmt.Errorf("can't use both 'log_opt' (deprecated) and 'logging.options': %w", errdefs.ErrInvalid) + } + } + } + return nil +} + +func relocateLogDriver(s *types.ServiceConfig) error { + if s.LogDriver != "" { + logrus.Warn("`log_driver` is deprecated. Use the `logging` element") + if s.Logging == nil { + s.Logging = &types.LoggingConfig{} + } + if s.Logging.Driver == "" { + s.Logging.Driver = s.LogDriver + } else { + return fmt.Errorf("can't use both 'log_driver' (deprecated) and 'logging.driver': %w", errdefs.ErrInvalid) + } + } + return nil +} + +func relocateDockerfile(s *types.ServiceConfig) error { + if s.Dockerfile != "" { + logrus.Warn("`dockerfile` is deprecated. Use the `build` element") + if s.Build == nil { + s.Build = &types.BuildConfig{} + } + if s.Dockerfile == "" { + s.Build.Dockerfile = s.Dockerfile + } else { + return fmt.Errorf("can't use both 'dockerfile' (deprecated) and 'build.dockerfile': %w", errdefs.ErrInvalid) + } + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/paths.go b/vendor/github.com/compose-spec/compose-go/v2/loader/paths.go new file mode 100644 index 000000000000..102ff036e066 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/paths.go @@ -0,0 +1,74 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "os" + "path/filepath" + "strings" + + "github.com/compose-spec/compose-go/v2/types" +) + +// ResolveRelativePaths resolves relative paths based on project WorkingDirectory +func ResolveRelativePaths(project *types.Project) error { + absWorkingDir, err := filepath.Abs(project.WorkingDir) + if err != nil { + return err + } + project.WorkingDir = absWorkingDir + + absComposeFiles, err := absComposeFiles(project.ComposeFiles) + if err != nil { + return err + } + project.ComposeFiles = absComposeFiles + return nil +} + +func absPath(workingDir string, filePath string) string { + if strings.HasPrefix(filePath, "~") { + home, _ := os.UserHomeDir() + return filepath.Join(home, filePath[1:]) + } + if filepath.IsAbs(filePath) { + return filePath + } + return filepath.Join(workingDir, filePath) +} + +func absComposeFiles(composeFiles []string) ([]string, error) { + for i, composeFile := range composeFiles { + absComposefile, err := filepath.Abs(composeFile) + if err != nil { + return nil, err + } + composeFiles[i] = absComposefile + } + return composeFiles, nil +} + +func resolvePaths(basePath string, in types.StringList) types.StringList { + if in == nil { + return nil + } + ret := make(types.StringList, len(in)) + for i := range in { + ret[i] = absPath(basePath, in[i]) + } + return ret +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/reset.go b/vendor/github.com/compose-spec/compose-go/v2/loader/reset.go new file mode 100644 index 000000000000..3c615eff7af6 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/reset.go @@ -0,0 +1,126 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "fmt" + "strconv" + + "github.com/compose-spec/compose-go/v2/tree" + "gopkg.in/yaml.v3" +) + +type ResetProcessor struct { + target interface{} + paths []tree.Path +} + +// UnmarshalYAML implement yaml.Unmarshaler +func (p *ResetProcessor) UnmarshalYAML(value *yaml.Node) error { + resolved, err := p.resolveReset(value, tree.NewPath()) + if err != nil { + return err + } + return resolved.Decode(p.target) +} + +// resolveReset detects `!reset` tag being set on yaml nodes and record position in the yaml tree +func (p *ResetProcessor) resolveReset(node *yaml.Node, path tree.Path) (*yaml.Node, error) { + if node.Tag == "!reset" { + p.paths = append(p.paths, path) + return nil, nil + } + if node.Tag == "!override" { + p.paths = append(p.paths, path) + return node, nil + } + switch node.Kind { + case yaml.SequenceNode: + var nodes []*yaml.Node + for idx, v := range node.Content { + next := path.Next(strconv.Itoa(idx)) + resolved, err := p.resolveReset(v, next) + if err != nil { + return nil, err + } + if resolved != nil { + nodes = append(nodes, resolved) + } + } + node.Content = nodes + case yaml.MappingNode: + var key string + var nodes []*yaml.Node + for idx, v := range node.Content { + if idx%2 == 0 { + key = v.Value + } else { + resolved, err := p.resolveReset(v, path.Next(key)) + if err != nil { + return nil, err + } + if resolved != nil { + nodes = append(nodes, node.Content[idx-1], resolved) + } + } + } + node.Content = nodes + } + return node, nil +} + +// Apply finds the go attributes matching recorded paths and reset them to zero value +func (p *ResetProcessor) Apply(target any) error { + return p.applyNullOverrides(target, tree.NewPath()) +} + +// applyNullOverrides set val to Zero if it matches any of the recorded paths +func (p *ResetProcessor) applyNullOverrides(target any, path tree.Path) error { + switch v := target.(type) { + case map[string]any: + KEYS: + for k, e := range v { + next := path.Next(k) + for _, pattern := range p.paths { + if next.Matches(pattern) { + delete(v, k) + continue KEYS + } + } + err := p.applyNullOverrides(e, next) + if err != nil { + return err + } + } + case []any: + ITER: + for i, e := range v { + next := path.Next(fmt.Sprintf("[%d]", i)) + for _, pattern := range p.paths { + if next.Matches(pattern) { + continue ITER + // TODO(ndeloof) support removal from sequence + } + } + err := p.applyNullOverrides(e, next) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/validate.go b/vendor/github.com/compose-spec/compose-go/v2/loader/validate.go new file mode 100644 index 000000000000..7d77609d4f1f --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/validate.go @@ -0,0 +1,146 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/compose-spec/compose-go/v2/errdefs" + "github.com/compose-spec/compose-go/v2/graph" + "github.com/compose-spec/compose-go/v2/types" +) + +// checkConsistency validate a compose model is consistent +func checkConsistency(project *types.Project) error { + for _, s := range project.Services { + if s.Build == nil && s.Image == "" { + return fmt.Errorf("service %q has neither an image nor a build context specified: %w", s.Name, errdefs.ErrInvalid) + } + + if s.Build != nil { + if s.Build.DockerfileInline != "" && s.Build.Dockerfile != "" { + return fmt.Errorf("service %q declares mutualy exclusive dockerfile and dockerfile_inline: %w", s.Name, errdefs.ErrInvalid) + } + + if len(s.Build.Platforms) > 0 && s.Platform != "" { + var found bool + for _, platform := range s.Build.Platforms { + if platform == s.Platform { + found = true + break + } + } + if !found { + return fmt.Errorf("service.build.platforms MUST include service.platform %q: %w", s.Platform, errdefs.ErrInvalid) + } + } + } + + if s.NetworkMode != "" && len(s.Networks) > 0 { + return fmt.Errorf("service %s declares mutually exclusive `network_mode` and `networks`: %w", s.Name, errdefs.ErrInvalid) + } + for network := range s.Networks { + if _, ok := project.Networks[network]; !ok { + return fmt.Errorf("service %q refers to undefined network %s: %w", s.Name, network, errdefs.ErrInvalid) + } + } + + if s.HealthCheck != nil && len(s.HealthCheck.Test) > 0 { + switch s.HealthCheck.Test[0] { + case "CMD", "CMD-SHELL", "NONE": + default: + return errors.New(`healthcheck.test must start either by "CMD", "CMD-SHELL" or "NONE"`) + } + } + + for dependedService := range s.DependsOn { + if _, err := project.GetService(dependedService); err != nil { + return fmt.Errorf("service %q depends on undefined service %s: %w", s.Name, dependedService, errdefs.ErrInvalid) + } + } + // Check there isn't a cycle in depends_on declarations + if err := graph.InDependencyOrder(context.Background(), project, func(ctx context.Context, s string, config types.ServiceConfig) error { + return nil + }); err != nil { + return err + } + + if strings.HasPrefix(s.NetworkMode, types.ServicePrefix) { + serviceName := s.NetworkMode[len(types.ServicePrefix):] + if _, err := project.GetServices(serviceName); err != nil { + return fmt.Errorf("service %q not found for network_mode 'service:%s'", serviceName, serviceName) + } + } + + for _, volume := range s.Volumes { + if volume.Type == types.VolumeTypeVolume && volume.Source != "" { // non anonymous volumes + if _, ok := project.Volumes[volume.Source]; !ok { + return fmt.Errorf("service %q refers to undefined volume %s: %w", s.Name, volume.Source, errdefs.ErrInvalid) + } + } + } + if s.Build != nil { + for _, secret := range s.Build.Secrets { + if _, ok := project.Secrets[secret.Source]; !ok { + return fmt.Errorf("service %q refers to undefined build secret %s: %w", s.Name, secret.Source, errdefs.ErrInvalid) + } + } + } + for _, config := range s.Configs { + if _, ok := project.Configs[config.Source]; !ok { + return fmt.Errorf("service %q refers to undefined config %s: %w", s.Name, config.Source, errdefs.ErrInvalid) + } + } + + for _, secret := range s.Secrets { + if _, ok := project.Secrets[secret.Source]; !ok { + return fmt.Errorf("service %q refers to undefined secret %s: %w", s.Name, secret.Source, errdefs.ErrInvalid) + } + } + + if s.Scale != nil && s.Deploy != nil { + if s.Deploy.Replicas != nil && *s.Scale != *s.Deploy.Replicas { + return fmt.Errorf("services.%s: can't set distinct values on 'scale' and 'deploy.replicas': %w", + s.Name, errdefs.ErrInvalid) + } + s.Deploy.Replicas = s.Scale + } + + if s.GetScale() > 1 && s.ContainerName != "" { + attr := "scale" + if s.Scale == nil { + attr = "deploy.replicas" + } + return fmt.Errorf("services.%s: can't set container_name and %s as container name must be unique: %w", attr, + s.Name, errdefs.ErrInvalid) + } + } + + for name, secret := range project.Secrets { + if secret.External { + continue + } + if secret.File == "" && secret.Environment == "" { + return fmt.Errorf("secret %q must declare either `file` or `environment`: %w", name, errdefs.ErrInvalid) + } + } + + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/override/extends.go b/vendor/github.com/compose-spec/compose-go/v2/override/extends.go new file mode 100644 index 000000000000..f47912ddffc0 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/override/extends.go @@ -0,0 +1,27 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package override + +import "github.com/compose-spec/compose-go/v2/tree" + +func ExtendService(base, override map[string]any) (map[string]any, error) { + yaml, err := mergeYaml(base, override, tree.NewPath("services.x")) + if err != nil { + return nil, err + } + return yaml.(map[string]any), nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/override/merge.go b/vendor/github.com/compose-spec/compose-go/v2/override/merge.go new file mode 100644 index 000000000000..cadf26290211 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/override/merge.go @@ -0,0 +1,253 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package override + +import ( + "cmp" + "fmt" + "strings" + + "github.com/compose-spec/compose-go/v2/tree" + "golang.org/x/exp/slices" +) + +// Merge applies overrides to a config model +func Merge(right, left map[string]any) (map[string]any, error) { + merged, err := mergeYaml(right, left, tree.NewPath()) + if err != nil { + return nil, err + } + return merged.(map[string]any), nil +} + +type merger func(any, any, tree.Path) (any, error) + +// mergeSpecials defines the custom rules applied by compose when merging yaml trees +var mergeSpecials = map[tree.Path]merger{} + +func init() { + mergeSpecials["networks.*.ipam.config"] = mergeIPAMConfig + mergeSpecials["services.*.annotations"] = mergeToSequence + mergeSpecials["services.*.build"] = mergeBuild + mergeSpecials["services.*.build.args"] = mergeToSequence + mergeSpecials["services.*.build.additional_contexts"] = mergeToSequence + mergeSpecials["services.*.build.labels"] = mergeToSequence + mergeSpecials["services.*.command"] = override + mergeSpecials["services.*.depends_on"] = mergeDependsOn + mergeSpecials["services.*.deploy.labels"] = mergeToSequence + mergeSpecials["services.*.dns"] = mergeToSequence + mergeSpecials["services.*.dns_opt"] = mergeToSequence + mergeSpecials["services.*.dns_search"] = mergeToSequence + mergeSpecials["services.*.entrypoint"] = override + mergeSpecials["services.*.env_file"] = mergeToSequence + mergeSpecials["services.*.environment"] = mergeToSequence + mergeSpecials["services.*.extra_hosts"] = mergeToSequence + mergeSpecials["services.*.healthcheck.test"] = override + mergeSpecials["services.*.labels"] = mergeToSequence + mergeSpecials["services.*.logging"] = mergeLogging + mergeSpecials["services.*.networks"] = mergeNetworks + mergeSpecials["services.*.sysctls"] = mergeToSequence + mergeSpecials["services.*.tmpfs"] = mergeToSequence + mergeSpecials["services.*.ulimits.*"] = mergeUlimit +} + +// mergeYaml merges map[string]any yaml trees handling special rules +func mergeYaml(e any, o any, p tree.Path) (any, error) { + for pattern, merger := range mergeSpecials { + if p.Matches(pattern) { + merged, err := merger(e, o, p) + if err != nil { + return nil, err + } + return merged, nil + } + } + if o == nil { + return e, nil + } + switch value := e.(type) { + case map[string]any: + other, ok := o.(map[string]any) + if !ok { + return nil, fmt.Errorf("cannot override %s", p) + } + return mergeMappings(value, other, p) + case []any: + other, ok := o.([]any) + if !ok { + return nil, fmt.Errorf("cannot override %s", p) + } + return append(value, other...), nil + default: + return o, nil + } +} + +func mergeMappings(mapping map[string]any, other map[string]any, p tree.Path) (map[string]any, error) { + for k, v := range other { + e, ok := mapping[k] + if !ok || strings.HasPrefix(k, "x-") { + mapping[k] = v + continue + } + next := p.Next(k) + merged, err := mergeYaml(e, v, next) + if err != nil { + return nil, err + } + mapping[k] = merged + } + return mapping, nil +} + +// logging driver options are merged only when both compose file define the same driver +func mergeLogging(c any, o any, p tree.Path) (any, error) { + config := c.(map[string]any) + other := o.(map[string]any) + // we override logging config if source and override have the same driver set, or none + d, ok1 := other["driver"] + o, ok2 := config["driver"] + if d == o || !ok1 || !ok2 { + return mergeMappings(config, other, p) + } + return other, nil +} + +func mergeBuild(c any, o any, path tree.Path) (any, error) { + toBuild := func(c any) map[string]any { + switch v := c.(type) { + case string: + return map[string]any{ + "context": v, + } + case map[string]any: + return v + } + return nil + } + return mergeMappings(toBuild(c), toBuild(o), path) +} + +func mergeDependsOn(c any, o any, path tree.Path) (any, error) { + right := convertIntoMapping(c, map[string]any{ + "condition": "service_started", + "required": true, + }) + left := convertIntoMapping(o, map[string]any{ + "condition": "service_started", + "required": true, + }) + return mergeMappings(right, left, path) +} + +func mergeNetworks(c any, o any, path tree.Path) (any, error) { + right := convertIntoMapping(c, nil) + left := convertIntoMapping(o, nil) + return mergeMappings(right, left, path) +} + +func mergeToSequence(c any, o any, _ tree.Path) (any, error) { + right := convertIntoSequence(c) + left := convertIntoSequence(o) + return append(right, left...), nil +} + +func convertIntoSequence(value any) []any { + switch v := value.(type) { + case map[string]any: + seq := make([]any, len(v)) + i := 0 + for k, v := range v { + if v == nil { + seq[i] = k + } else { + seq[i] = fmt.Sprintf("%s=%v", k, v) + } + i++ + } + slices.SortFunc(seq, func(a, b any) int { + return cmp.Compare(a.(string), b.(string)) + }) + return seq + case []any: + return v + case string: + return []any{v} + } + return nil +} + +func mergeUlimit(_ any, o any, p tree.Path) (any, error) { + over, ismapping := o.(map[string]any) + if base, ok := o.(map[string]any); ok && ismapping { + return mergeMappings(base, over, p) + } + return o, nil +} + +func mergeIPAMConfig(c any, o any, path tree.Path) (any, error) { + var ipamConfigs []any + for _, original := range c.([]any) { + right := convertIntoMapping(original, nil) + for _, override := range o.([]any) { + left := convertIntoMapping(override, nil) + if left["subnet"] != right["subnet"] { + // check if left is already in ipamConfigs, add it if not and continue with the next config + if !slices.ContainsFunc(ipamConfigs, func(a any) bool { + return a.(map[string]any)["subnet"] == left["subnet"] + }) { + ipamConfigs = append(ipamConfigs, left) + continue + } + } + merged, err := mergeMappings(right, left, path) + if err != nil { + return nil, err + } + // find index of potential previous config with the same subnet in ipamConfigs + indexIfExist := slices.IndexFunc(ipamConfigs, func(a any) bool { + return a.(map[string]any)["subnet"] == merged["subnet"] + }) + // if a previous config is already in ipamConfigs, replace it + if indexIfExist >= 0 { + ipamConfigs[indexIfExist] = merged + } else { + // or add the new config to ipamConfigs + ipamConfigs = append(ipamConfigs, merged) + } + } + } + return ipamConfigs, nil +} + +func convertIntoMapping(a any, defaultValue any) map[string]any { + switch v := a.(type) { + case map[string]any: + return v + case []any: + converted := map[string]any{} + for _, s := range v { + converted[s.(string)] = defaultValue + } + return converted + } + return nil +} + +func override(_ any, other any, _ tree.Path) (any, error) { + return other, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/override/uncity.go b/vendor/github.com/compose-spec/compose-go/v2/override/uncity.go new file mode 100644 index 000000000000..d8f29b5956d4 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/override/uncity.go @@ -0,0 +1,204 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package override + +import ( + "fmt" + "strconv" + "strings" + + "github.com/compose-spec/compose-go/v2/format" + "github.com/compose-spec/compose-go/v2/tree" +) + +type indexer func(any, tree.Path) (string, error) + +// mergeSpecials defines the custom rules applied by compose when merging yaml trees +var unique = map[tree.Path]indexer{} + +func init() { + unique["networks.*.labels"] = keyValueIndexer + unique["networks.*.ipam.options"] = keyValueIndexer + unique["services.*.annotations"] = keyValueIndexer + unique["services.*.build.args"] = keyValueIndexer + unique["services.*.build.additional_contexts"] = keyValueIndexer + unique["services.*.build.extra_hosts"] = keyValueIndexer + unique["services.*.build.platform"] = keyValueIndexer + unique["services.*.build.tags"] = keyValueIndexer + unique["services.*.build.labels"] = keyValueIndexer + unique["services.*.cap_add"] = keyValueIndexer + unique["services.*.cap_drop"] = keyValueIndexer + unique["services.*.configs"] = mountIndexer("") + unique["services.*.deploy.labels"] = keyValueIndexer + unique["services.*.dns"] = keyValueIndexer + unique["services.*.dns_opt"] = keyValueIndexer + unique["services.*.dns_search"] = keyValueIndexer + unique["services.*.environment"] = keyValueIndexer + unique["services.*.env_file"] = envFileIndexer + unique["services.*.expose"] = exposeIndexer + unique["services.*.extra_hosts"] = keyValueIndexer + unique["services.*.labels"] = keyValueIndexer + unique["services.*.links"] = keyValueIndexer + unique["services.*.networks.*.aliases"] = keyValueIndexer + unique["services.*.networks.*.link_local_ips"] = keyValueIndexer + unique["services.*.ports"] = portIndexer + unique["services.*.profiles"] = keyValueIndexer + unique["services.*.secrets"] = mountIndexer("/run/secrets") + unique["services.*.sysctls"] = keyValueIndexer + unique["services.*.tmpfs"] = keyValueIndexer + unique["services.*.volumes"] = volumeIndexer +} + +// EnforceUnicity removes redefinition of elements declared in a sequence +func EnforceUnicity(value map[string]any) (map[string]any, error) { + uniq, err := enforceUnicity(value, tree.NewPath()) + if err != nil { + return nil, err + } + return uniq.(map[string]any), nil +} + +func enforceUnicity(value any, p tree.Path) (any, error) { + switch v := value.(type) { + case map[string]any: + for k, e := range v { + u, err := enforceUnicity(e, p.Next(k)) + if err != nil { + return nil, err + } + v[k] = u + } + return v, nil + case []any: + for pattern, indexer := range unique { + if p.Matches(pattern) { + seq := []any{} + keys := map[string]int{} + for i, entry := range v { + key, err := indexer(entry, p.Next(fmt.Sprintf("[%d]", i))) + if err != nil { + return nil, err + } + if j, ok := keys[key]; ok { + seq[j] = entry + } else { + seq = append(seq, entry) + keys[key] = len(seq) - 1 + } + } + return seq, nil + } + } + } + return value, nil +} + +func keyValueIndexer(y any, _ tree.Path) (string, error) { + value := y.(string) + key, _, found := strings.Cut(value, "=") + if !found { + return value, nil + } + return key, nil +} + +func volumeIndexer(y any, p tree.Path) (string, error) { + switch value := y.(type) { + case map[string]any: + target, ok := value["target"].(string) + if !ok { + return "", fmt.Errorf("service volume %s is missing a mount target", p) + } + return target, nil + case string: + volume, err := format.ParseVolume(value) + if err != nil { + return "", err + } + return volume.Target, nil + } + return "", nil +} + +func exposeIndexer(a any, path tree.Path) (string, error) { + switch v := a.(type) { + case string: + return v, nil + case int: + return strconv.Itoa(v), nil + default: + return "", fmt.Errorf("%s: unsupported expose value %s", path, a) + } +} + +func mountIndexer(defaultPath string) indexer { + return func(a any, path tree.Path) (string, error) { + switch v := a.(type) { + case string: + return fmt.Sprintf("%s/%s", defaultPath, v), nil + case map[string]any: + t, ok := v["target"] + if ok { + return t.(string), nil + } + return fmt.Sprintf("%s/%s", defaultPath, v["source"]), nil + default: + return "", fmt.Errorf("%s: unsupported expose value %s", path, a) + } + } +} + +func portIndexer(y any, p tree.Path) (string, error) { + switch value := y.(type) { + case int: + return strconv.Itoa(value), nil + case map[string]any: + target, ok := value["target"] + if !ok { + return "", fmt.Errorf("service ports %s is missing a target port", p) + } + published, ok := value["published"] + if !ok { + // try to parse it as an int + if pub, ok := value["published"]; ok { + published = fmt.Sprintf("%d", pub) + } + } + host, ok := value["host_ip"] + if !ok { + host = "0.0.0.0" + } + protocol, ok := value["protocol"] + if !ok { + protocol = "tcp" + } + return fmt.Sprintf("%s:%s:%d/%s", host, published, target, protocol), nil + case string: + return value, nil + } + return "", nil +} + +func envFileIndexer(y any, _ tree.Path) (string, error) { + switch value := y.(type) { + case string: + return value, nil + case map[string]any: + return value["path"].(string), nil + } + return "", nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/paths/context.go b/vendor/github.com/compose-spec/compose-go/v2/paths/context.go new file mode 100644 index 000000000000..b4585dc4e0ea --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/paths/context.go @@ -0,0 +1,44 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package paths + +import "strings" + +func (r *relativePathsResolver) absContextPath(value any) (any, error) { + v := value.(string) + if strings.Contains(v, "://") { // `docker-image://` or any builder specific context type + return v, nil + } + if isRemoteContext(v) { + return v, nil + } + return r.absPath(v) +} + +// isRemoteContext returns true if the value is a Git reference or HTTP(S) URL. +// +// Any other value is assumed to be a local filesystem path and returns false. +// +// See: https://github.com/moby/buildkit/blob/18fc875d9bfd6e065cd8211abc639434ba65aa56/frontend/dockerui/context.go#L76-L79 +func isRemoteContext(maybeURL string) bool { + for _, prefix := range []string{"https://", "http://", "git://", "ssh://", "github.com/", "git@"} { + if strings.HasPrefix(maybeURL, prefix) { + return true + } + } + return false +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/paths/extends.go b/vendor/github.com/compose-spec/compose-go/v2/paths/extends.go new file mode 100644 index 000000000000..aa61a9f9aa75 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/paths/extends.go @@ -0,0 +1,25 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package paths + +func (r *relativePathsResolver) absExtendsPath(value any) (any, error) { + v := value.(string) + if r.isRemoteResource(v) { + return v, nil + } + return r.absPath(v) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/paths/home.go b/vendor/github.com/compose-spec/compose-go/v2/paths/home.go new file mode 100644 index 000000000000..a5579262be09 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/paths/home.go @@ -0,0 +1,37 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package paths + +import ( + "os" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" +) + +func ExpandUser(p string) string { + if strings.HasPrefix(p, "~") { + home, err := os.UserHomeDir() + if err != nil { + logrus.Warn("cannot expand '~', because the environment lacks HOME") + return p + } + return filepath.Join(home, p[1:]) + } + return p +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go b/vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go new file mode 100644 index 000000000000..ecfa0e9b6d2f --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go @@ -0,0 +1,161 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package paths + +import ( + "errors" + "fmt" + "path/filepath" + + "github.com/compose-spec/compose-go/v2/tree" + "github.com/compose-spec/compose-go/v2/types" +) + +type resolver func(any) (any, error) + +// ResolveRelativePaths make relative paths absolute +func ResolveRelativePaths(project map[string]any, base string, remotes []RemoteResource) error { + r := relativePathsResolver{ + workingDir: base, + remotes: remotes, + } + r.resolvers = map[tree.Path]resolver{ + "services.*.build.context": r.absContextPath, + "services.*.build.additional_contexts.*": r.absContextPath, + "services.*.env_file.*.path": r.absPath, + "services.*.extends.file": r.absExtendsPath, + "services.*.develop.watch.*.path": r.absPath, + "services.*.volumes.*": r.absVolumeMount, + "configs.*.file": r.maybeUnixPath, + "secrets.*.file": r.maybeUnixPath, + "include.path": r.absPath, + "include.project_directory": r.absPath, + "include.env_file": r.absPath, + "volumes.*": r.volumeDriverOpts, + } + _, err := r.resolveRelativePaths(project, tree.NewPath()) + return err +} + +type RemoteResource func(path string) bool + +type relativePathsResolver struct { + workingDir string + remotes []RemoteResource + resolvers map[tree.Path]resolver +} + +func (r *relativePathsResolver) isRemoteResource(path string) bool { + for _, remote := range r.remotes { + if remote(path) { + return true + } + } + return false +} + +func (r *relativePathsResolver) resolveRelativePaths(value any, p tree.Path) (any, error) { + for pattern, resolver := range r.resolvers { + if p.Matches(pattern) { + return resolver(value) + } + } + switch v := value.(type) { + case map[string]any: + for k, e := range v { + resolved, err := r.resolveRelativePaths(e, p.Next(k)) + if err != nil { + return nil, err + } + v[k] = resolved + } + case []any: + for i, e := range v { + resolved, err := r.resolveRelativePaths(e, p.Next("[]")) + if err != nil { + return nil, err + } + v[i] = resolved + } + } + return value, nil +} + +func (r *relativePathsResolver) absPath(value any) (any, error) { + switch v := value.(type) { + case []any: + for i, s := range v { + abs, err := r.absPath(s) + if err != nil { + return nil, err + } + v[i] = abs + } + return v, nil + case string: + v = ExpandUser(v) + if filepath.IsAbs(v) { + return v, nil + } + if v != "" { + return filepath.Join(r.workingDir, v), nil + } + return v, nil + } + return nil, fmt.Errorf("unexpected type %T", value) +} + +func (r *relativePathsResolver) absVolumeMount(a any) (any, error) { + vol := a.(map[string]any) + if vol["type"] != types.VolumeTypeBind { + return vol, nil + } + src, ok := vol["source"] + if !ok { + return nil, errors.New(`invalid mount config for type "bind": field Source must not be empty`) + } + abs, err := r.maybeUnixPath(src.(string)) + if err != nil { + return nil, err + } + vol["source"] = abs + return vol, nil +} + +func (r *relativePathsResolver) volumeDriverOpts(a any) (any, error) { + if a == nil { + return nil, nil + } + vol := a.(map[string]any) + if vol["driver"] != "local" { + return vol, nil + } + do, ok := vol["driver_opts"] + if !ok { + return vol, nil + } + opts := do.(map[string]any) + if dev, ok := opts["device"]; opts["o"] == "bind" && ok { + // This is actually a bind mount + path, err := r.maybeUnixPath(dev) + if err != nil { + return nil, err + } + opts["device"] = path + } + return vol, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/paths/unix.go b/vendor/github.com/compose-spec/compose-go/v2/paths/unix.go new file mode 100644 index 000000000000..dc1c68b62a24 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/paths/unix.go @@ -0,0 +1,40 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package paths + +import ( + "path" + "path/filepath" +) + +func (r *relativePathsResolver) maybeUnixPath(a any) (any, error) { + p := a.(string) + p = ExpandUser(p) + // Check if source is an absolute path (either Unix or Windows), to + // handle a Windows client with a Unix daemon or vice-versa. + // + // Note that this is not required for Docker for Windows when specifying + // a local Windows path, because Docker for Windows translates the Windows + // path into a valid path within the VM. + if !path.IsAbs(p) && !isWindowsAbs(p) { + if filepath.IsAbs(p) { + return p, nil + } + return filepath.Join(r.workingDir, p), nil + } + return p, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/paths/windows_path.go b/vendor/github.com/compose-spec/compose-go/v2/paths/windows_path.go new file mode 100644 index 000000000000..746aefd15e35 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/paths/windows_path.go @@ -0,0 +1,82 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package paths + +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// https://github.com/golang/go/blob/master/LICENSE + +// This file contains utilities to check for Windows absolute paths on Linux. +// The code in this file was largely copied from the Golang filepath package +// https://github.com/golang/go/blob/1d0e94b1e13d5e8a323a63cd1cc1ef95290c9c36/src/path/filepath/path_windows.go#L12-L65 + +func isSlash(c uint8) bool { + return c == '\\' || c == '/' +} + +// isAbs reports whether the path is a Windows absolute path. +func isWindowsAbs(path string) (b bool) { + l := volumeNameLen(path) + if l == 0 { + return false + } + path = path[l:] + if path == "" { + return false + } + return isSlash(path[0]) +} + +// volumeNameLen returns length of the leading volume name on Windows. +// It returns 0 elsewhere. +// nolint: gocyclo +func volumeNameLen(path string) int { + if len(path) < 2 { + return 0 + } + // with drive letter + c := path[0] + if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { + return 2 + } + // is it UNC? https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx + if l := len(path); l >= 5 && isSlash(path[0]) && isSlash(path[1]) && + !isSlash(path[2]) && path[2] != '.' { + // first, leading `\\` and next shouldn't be `\`. its server name. + for n := 3; n < l-1; n++ { + // second, next '\' shouldn't be repeated. + if isSlash(path[n]) { + n++ + // third, following something characters. its share name. + if !isSlash(path[n]) { + if path[n] == '.' { + break + } + for ; n < l; n++ { + if isSlash(path[n]) { + break + } + } + return n + } + break + } + } + } + return 0 +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json b/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json new file mode 100644 index 000000000000..1b12822d21c9 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json @@ -0,0 +1,907 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema#", + "id": "compose_spec.json", + "type": "object", + "title": "Compose Specification", + "description": "The Compose file is a YAML file defining a multi-containers based application.", + + "properties": { + "version": { + "type": "string", + "description": "declared for backward compatibility, ignored." + }, + + "name": { + "type": "string", + "pattern": "^[a-z0-9][a-z0-9_-]*$", + "description": "define the Compose project name, until user defines one explicitly." + }, + + "include": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/include" + }, + "description": "compose sub-projects to be included." + }, + + "services": { + "id": "#/properties/services", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/service" + } + }, + "additionalProperties": false + }, + + "networks": { + "id": "#/properties/networks", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/network" + } + } + }, + + "volumes": { + "id": "#/properties/volumes", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/volume" + } + }, + "additionalProperties": false + }, + + "secrets": { + "id": "#/properties/secrets", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/secret" + } + }, + "additionalProperties": false + }, + + "configs": { + "id": "#/properties/configs", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/config" + } + }, + "additionalProperties": false + } + }, + + "patternProperties": {"^x-": {}}, + "additionalProperties": false, + + "definitions": { + + "service": { + "id": "#/definitions/service", + "type": "object", + + "properties": { + "develop": {"$ref": "#/definitions/development"}, + "deploy": {"$ref": "#/definitions/deployment"}, + "annotations": {"$ref": "#/definitions/list_or_dict"}, + "attach": {"type": "boolean"}, + "build": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "context": {"type": "string"}, + "dockerfile": {"type": "string"}, + "dockerfile_inline": {"type": "string"}, + "args": {"$ref": "#/definitions/list_or_dict"}, + "ssh": {"$ref": "#/definitions/list_or_dict"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "cache_from": {"type": "array", "items": {"type": "string"}}, + "cache_to": {"type": "array", "items": {"type": "string"}}, + "no_cache": {"type": "boolean"}, + "additional_contexts": {"$ref": "#/definitions/list_or_dict"}, + "network": {"type": "string"}, + "pull": {"type": "boolean"}, + "target": {"type": "string"}, + "shm_size": {"type": ["integer", "string"]}, + "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, + "isolation": {"type": "string"}, + "privileged": {"type": "boolean"}, + "secrets": {"$ref": "#/definitions/service_config_or_secret"}, + "tags": {"type": "array", "items": {"type": "string"}}, + "ulimits": {"$ref": "#/definitions/ulimits"}, + "platforms": {"type": "array", "items": {"type": "string"}} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + }, + "blkio_config": { + "type": "object", + "properties": { + "device_read_bps": { + "type": "array", + "items": {"$ref": "#/definitions/blkio_limit"} + }, + "device_read_iops": { + "type": "array", + "items": {"$ref": "#/definitions/blkio_limit"} + }, + "device_write_bps": { + "type": "array", + "items": {"$ref": "#/definitions/blkio_limit"} + }, + "device_write_iops": { + "type": "array", + "items": {"$ref": "#/definitions/blkio_limit"} + }, + "weight": {"type": "integer"}, + "weight_device": { + "type": "array", + "items": {"$ref": "#/definitions/blkio_weight"} + } + }, + "additionalProperties": false + }, + "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cgroup": {"type": "string", "enum": ["host", "private"]}, + "cgroup_parent": {"type": "string"}, + "command": {"$ref": "#/definitions/command"}, + "configs": {"$ref": "#/definitions/service_config_or_secret"}, + "container_name": {"type": "string"}, + "cpu_count": {"type": "integer", "minimum": 0}, + "cpu_percent": {"type": "integer", "minimum": 0, "maximum": 100}, + "cpu_shares": {"type": ["number", "string"]}, + "cpu_quota": {"type": ["number", "string"]}, + "cpu_period": {"type": ["number", "string"]}, + "cpu_rt_period": {"type": ["number", "string"]}, + "cpu_rt_runtime": {"type": ["number", "string"]}, + "cpus": {"type": ["number", "string"]}, + "cpuset": {"type": "string"}, + "credential_spec": { + "type": "object", + "properties": { + "config": {"type": "string"}, + "file": {"type": "string"}, + "registry": {"type": "string"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "depends_on": { + "oneOf": [ + {"$ref": "#/definitions/list_of_strings"}, + { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "type": "object", + "additionalProperties": false, + "properties": { + "restart": {"type": "boolean"}, + "required": { + "type": "boolean", + "default": true + }, + "condition": { + "type": "string", + "enum": ["service_started", "service_healthy", "service_completed_successfully"] + } + }, + "required": ["condition"] + } + } + } + ] + }, + "device_cgroup_rules": {"$ref": "#/definitions/list_of_strings"}, + "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "dns": {"$ref": "#/definitions/string_or_list"}, + "dns_opt": {"type": "array","items": {"type": "string"}, "uniqueItems": true}, + "dns_search": {"$ref": "#/definitions/string_or_list"}, + "domainname": {"type": "string"}, + "entrypoint": {"$ref": "#/definitions/command"}, + "env_file": {"$ref": "#/definitions/env_file"}, + "environment": {"$ref": "#/definitions/list_or_dict"}, + + "expose": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "expose" + }, + "uniqueItems": true + }, + "extends": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + + "properties": { + "service": {"type": "string"}, + "file": {"type": "string"} + }, + "required": ["service"], + "additionalProperties": false + } + ] + }, + "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, + "group_add": { + "type": "array", + "items": { + "type": ["string", "number"] + }, + "uniqueItems": true + }, + "healthcheck": {"$ref": "#/definitions/healthcheck"}, + "hostname": {"type": "string"}, + "image": {"type": "string"}, + "init": {"type": "boolean"}, + "ipc": {"type": "string"}, + "isolation": {"type": "string"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "logging": { + "type": "object", + + "properties": { + "driver": {"type": "string"}, + "options": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number", "null"]} + } + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "mac_address": {"type": "string"}, + "mem_limit": {"type": ["number", "string"]}, + "mem_reservation": {"type": ["string", "integer"]}, + "mem_swappiness": {"type": "integer"}, + "memswap_limit": {"type": ["number", "string"]}, + "network_mode": {"type": "string"}, + "networks": { + "oneOf": [ + {"$ref": "#/definitions/list_of_strings"}, + { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "oneOf": [ + { + "type": "object", + "properties": { + "aliases": {"$ref": "#/definitions/list_of_strings"}, + "ipv4_address": {"type": "string"}, + "ipv6_address": {"type": "string"}, + "link_local_ips": {"$ref": "#/definitions/list_of_strings"}, + "mac_address": {"type": "string"}, + "priority": {"type": "number"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + {"type": "null"} + ] + } + }, + "additionalProperties": false + } + ] + }, + "oom_kill_disable": {"type": "boolean"}, + "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000}, + "pid": {"type": ["string", "null"]}, + "pids_limit": {"type": ["number", "string"]}, + "platform": {"type": "string"}, + "ports": { + "type": "array", + "items": { + "oneOf": [ + {"type": "number", "format": "ports"}, + {"type": "string", "format": "ports"}, + { + "type": "object", + "properties": { + "mode": {"type": "string"}, + "host_ip": {"type": "string"}, + "target": {"type": "integer"}, + "published": {"type": ["string", "integer"]}, + "protocol": {"type": "string"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + }, + "uniqueItems": true + }, + "privileged": {"type": "boolean"}, + "profiles": {"$ref": "#/definitions/list_of_strings"}, + "pull_policy": {"type": "string", "enum": [ + "always", "never", "if_not_present", "build", "missing" + ]}, + "read_only": {"type": "boolean"}, + "restart": {"type": "string"}, + "runtime": { + "type": "string" + }, + "scale": { + "type": "integer" + }, + "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "shm_size": {"type": ["number", "string"]}, + "secrets": {"$ref": "#/definitions/service_config_or_secret"}, + "sysctls": {"$ref": "#/definitions/list_or_dict"}, + "stdin_open": {"type": "boolean"}, + "stop_grace_period": {"type": "string", "format": "duration"}, + "stop_signal": {"type": "string"}, + "storage_opt": {"type": "object"}, + "tmpfs": {"$ref": "#/definitions/string_or_list"}, + "tty": {"type": "boolean"}, + "ulimits": {"$ref": "#/definitions/ulimits"}, + "user": {"type": "string"}, + "uts": {"type": "string"}, + "userns_mode": {"type": "string"}, + "volumes": { + "type": "array", + "items": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "required": ["type"], + "properties": { + "type": {"type": "string"}, + "source": {"type": "string"}, + "target": {"type": "string"}, + "read_only": {"type": "boolean"}, + "consistency": {"type": "string"}, + "bind": { + "type": "object", + "properties": { + "propagation": {"type": "string"}, + "create_host_path": {"type": "boolean"}, + "selinux": {"type": "string", "enum": ["z", "Z"]} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "volume": { + "type": "object", + "properties": { + "nocopy": {"type": "boolean"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "tmpfs": { + "type": "object", + "properties": { + "size": { + "oneOf": [ + {"type": "integer", "minimum": 0}, + {"type": "string"} + ] + }, + "mode": {"type": "number"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + }, + "uniqueItems": true + }, + "volumes_from": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true + }, + "working_dir": {"type": "string"} + }, + "patternProperties": {"^x-": {}}, + "additionalProperties": false + }, + + "healthcheck": { + "id": "#/definitions/healthcheck", + "type": "object", + "properties": { + "disable": {"type": "boolean"}, + "interval": {"type": "string", "format": "duration"}, + "retries": {"type": "number"}, + "test": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "timeout": {"type": "string", "format": "duration"}, + "start_period": {"type": "string", "format": "duration"}, + "start_interval": {"type": "string", "format": "duration"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "development": { + "id": "#/definitions/development", + "type": ["object", "null"], + "properties": { + "watch": { + "type": "array", + "items": { + "type": "object", + "properties": { + "ignore": {"type": "array", "items": {"type": "string"}}, + "path": {"type": "string"}, + "action": {"type": "string", "enum": ["rebuild", "sync", "sync+restart"]}, + "target": {"type": "string"} + } + }, + "required": ["path", "action"], + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + } + }, + "deployment": { + "id": "#/definitions/deployment", + "type": ["object", "null"], + "properties": { + "mode": {"type": "string"}, + "endpoint_mode": {"type": "string"}, + "replicas": {"type": "integer"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "rollback_config": { + "type": "object", + "properties": { + "parallelism": {"type": "integer"}, + "delay": {"type": "string", "format": "duration"}, + "failure_action": {"type": "string"}, + "monitor": {"type": "string", "format": "duration"}, + "max_failure_ratio": {"type": "number"}, + "order": {"type": "string", "enum": [ + "start-first", "stop-first" + ]} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "update_config": { + "type": "object", + "properties": { + "parallelism": {"type": "integer"}, + "delay": {"type": "string", "format": "duration"}, + "failure_action": {"type": "string"}, + "monitor": {"type": "string", "format": "duration"}, + "max_failure_ratio": {"type": "number"}, + "order": {"type": "string", "enum": [ + "start-first", "stop-first" + ]} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpus": {"type": ["number", "string"]}, + "memory": {"type": "string"}, + "pids": {"type": "integer"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "reservations": { + "type": "object", + "properties": { + "cpus": {"type": ["number", "string"]}, + "memory": {"type": "string"}, + "generic_resources": {"$ref": "#/definitions/generic_resources"}, + "devices": {"$ref": "#/definitions/devices"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "restart_policy": { + "type": "object", + "properties": { + "condition": {"type": "string"}, + "delay": {"type": "string", "format": "duration"}, + "max_attempts": {"type": "integer"}, + "window": {"type": "string", "format": "duration"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "placement": { + "type": "object", + "properties": { + "constraints": {"type": "array", "items": {"type": "string"}}, + "preferences": { + "type": "array", + "items": { + "type": "object", + "properties": { + "spread": {"type": "string"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "max_replicas_per_node": {"type": "integer"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + + "generic_resources": { + "id": "#/definitions/generic_resources", + "type": "array", + "items": { + "type": "object", + "properties": { + "discrete_resource_spec": { + "type": "object", + "properties": { + "kind": {"type": "string"}, + "value": {"type": "number"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + + "devices": { + "id": "#/definitions/devices", + "type": "array", + "items": { + "type": "object", + "properties": { + "capabilities": {"$ref": "#/definitions/list_of_strings"}, + "count": {"type": ["string", "integer"]}, + "device_ids": {"$ref": "#/definitions/list_of_strings"}, + "driver":{"type": "string"}, + "options":{"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + + "include": { + "id": "#/definitions/include", + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "path": {"$ref": "#/definitions/string_or_list"}, + "env_file": {"$ref": "#/definitions/string_or_list"}, + "project_directory": {"type": "string"} + }, + "additionalProperties": false + } + ] + }, + + "network": { + "id": "#/definitions/network", + "type": ["object", "null"], + "properties": { + "name": {"type": "string"}, + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "ipam": { + "type": "object", + "properties": { + "driver": {"type": "string"}, + "config": { + "type": "array", + "items": { + "type": "object", + "properties": { + "subnet": {"type": "string", "format": "subnet_ip_address"}, + "ip_range": {"type": "string"}, + "gateway": {"type": "string"}, + "aux_addresses": { + "type": "object", + "additionalProperties": false, + "patternProperties": {"^.+$": {"type": "string"}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "options": { + "type": "object", + "additionalProperties": false, + "patternProperties": {"^.+$": {"type": "string"}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": { + "deprecated": true, + "type": "string" + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "internal": {"type": "boolean"}, + "enable_ipv6": {"type": "boolean"}, + "attachable": {"type": "boolean"}, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + + "volume": { + "id": "#/definitions/volume", + "type": ["object", "null"], + "properties": { + "name": {"type": "string"}, + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": { + "deprecated": true, + "type": "string" + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + + "secret": { + "id": "#/definitions/secret", + "type": "object", + "properties": { + "name": {"type": "string"}, + "environment": {"type": "string"}, + "file": {"type": "string"}, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + } + }, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "template_driver": {"type": "string"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + + "config": { + "id": "#/definitions/config", + "type": "object", + "properties": { + "name": {"type": "string"}, + "content": {"type": "string"}, + "environment": {"type": "string"}, + "file": {"type": "string"}, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": { + "deprecated": true, + "type": "string" + } + } + }, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "template_driver": {"type": "string"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + + "command": { + "oneOf": [ + {"type": "null"}, + {"type": "string"}, + {"type": "array","items": {"type": "string"}} + ] + }, + + "env_file": { + "oneOf": [ + {"type": "string"}, + { + "type": "array", + "items": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "additionalProperties": false, + "properties": { + "path": { + "type": "string" + }, + "required": { + "type": "boolean", + "default": true + } + }, + "required": [ + "path" + ] + } + ] + } + } + ] + }, + + "string_or_list": { + "oneOf": [ + {"type": "string"}, + {"$ref": "#/definitions/list_of_strings"} + ] + }, + + "list_of_strings": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true + }, + + "list_or_dict": { + "oneOf": [ + { + "type": "object", + "patternProperties": { + ".+": { + "type": ["string", "number", "boolean", "null"] + } + }, + "additionalProperties": false + }, + {"type": "array", "items": {"type": "string"}, "uniqueItems": true} + ] + }, + + "blkio_limit": { + "type": "object", + "properties": { + "path": {"type": "string"}, + "rate": {"type": ["integer", "string"]} + }, + "additionalProperties": false + }, + "blkio_weight": { + "type": "object", + "properties": { + "path": {"type": "string"}, + "weight": {"type": "integer"} + }, + "additionalProperties": false + }, + "service_config_or_secret": { + "type": "array", + "items": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "source": {"type": "string"}, + "target": {"type": "string"}, + "uid": {"type": "string"}, + "gid": {"type": "string"}, + "mode": {"type": "number"} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + } + }, + "ulimits": { + "type": "object", + "patternProperties": { + "^[a-z]+$": { + "oneOf": [ + {"type": "integer"}, + { + "type": "object", + "properties": { + "hard": {"type": "integer"}, + "soft": {"type": "integer"} + }, + "required": ["soft", "hard"], + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + } + } + }, + "constraints": { + "service": { + "id": "#/definitions/constraints/service", + "anyOf": [ + {"required": ["build"]}, + {"required": ["image"]} + ], + "properties": { + "build": { + "required": ["context"] + } + } + } + } + } +} \ No newline at end of file diff --git a/vendor/github.com/compose-spec/compose-go/v2/schema/schema.go b/vendor/github.com/compose-spec/compose-go/v2/schema/schema.go new file mode 100644 index 000000000000..bfbaa93557b9 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/schema/schema.go @@ -0,0 +1,164 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package schema + +import ( + // Enable support for embedded static resources + _ "embed" + "fmt" + "strings" + "time" + + "github.com/xeipuuv/gojsonschema" +) + +type portsFormatChecker struct{} + +func (checker portsFormatChecker) IsFormat(_ interface{}) bool { + // TODO: implement this + return true +} + +type durationFormatChecker struct{} + +func (checker durationFormatChecker) IsFormat(input interface{}) bool { + value, ok := input.(string) + if !ok { + return false + } + _, err := time.ParseDuration(value) + return err == nil +} + +func init() { + gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{}) + gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{}) + gojsonschema.FormatCheckers.Add("duration", durationFormatChecker{}) +} + +// Schema is the compose-spec JSON schema +// +//go:embed compose-spec.json +var Schema string + +// Validate uses the jsonschema to validate the configuration +func Validate(config map[string]interface{}) error { + schemaLoader := gojsonschema.NewStringLoader(Schema) + dataLoader := gojsonschema.NewGoLoader(config) + + result, err := gojsonschema.Validate(schemaLoader, dataLoader) + if err != nil { + return err + } + + if !result.Valid() { + return toError(result) + } + + return nil +} + +func toError(result *gojsonschema.Result) error { + err := getMostSpecificError(result.Errors()) + return err +} + +const ( + jsonschemaOneOf = "number_one_of" + jsonschemaAnyOf = "number_any_of" +) + +func getDescription(err validationError) string { + switch err.parent.Type() { + case "invalid_type": + if expectedType, ok := err.parent.Details()["expected"].(string); ok { + return fmt.Sprintf("must be a %s", humanReadableType(expectedType)) + } + case jsonschemaOneOf, jsonschemaAnyOf: + if err.child == nil { + return err.parent.Description() + } + return err.child.Description() + } + return err.parent.Description() +} + +func humanReadableType(definition string) string { + if definition[0:1] == "[" { + allTypes := strings.Split(definition[1:len(definition)-1], ",") + for i, t := range allTypes { + allTypes[i] = humanReadableType(t) + } + return fmt.Sprintf( + "%s or %s", + strings.Join(allTypes[0:len(allTypes)-1], ", "), + allTypes[len(allTypes)-1], + ) + } + if definition == "object" { + return "mapping" + } + if definition == "array" { + return "list" + } + return definition +} + +type validationError struct { + parent gojsonschema.ResultError + child gojsonschema.ResultError +} + +func (err validationError) Error() string { + description := getDescription(err) + return fmt.Sprintf("%s %s", err.parent.Field(), description) +} + +func getMostSpecificError(errors []gojsonschema.ResultError) validationError { + mostSpecificError := 0 + for i, err := range errors { + if specificity(err) > specificity(errors[mostSpecificError]) { + mostSpecificError = i + continue + } + + if specificity(err) == specificity(errors[mostSpecificError]) { + // Invalid type errors win in a tie-breaker for most specific field name + if err.Type() == "invalid_type" && errors[mostSpecificError].Type() != "invalid_type" { + mostSpecificError = i + } + } + } + + if mostSpecificError+1 == len(errors) { + return validationError{parent: errors[mostSpecificError]} + } + + switch errors[mostSpecificError].Type() { + case "number_one_of", "number_any_of": + return validationError{ + parent: errors[mostSpecificError], + child: errors[mostSpecificError+1], + } + default: + return validationError{parent: errors[mostSpecificError]} + } +} + +func specificity(err gojsonschema.ResultError) int { + return len(strings.Split(err.Field(), ".")) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/template/template.go b/vendor/github.com/compose-spec/compose-go/v2/template/template.go new file mode 100644 index 000000000000..9367f3954695 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/template/template.go @@ -0,0 +1,473 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package template + +import ( + "errors" + "fmt" + "regexp" + "sort" + "strings" + + "github.com/sirupsen/logrus" +) + +var delimiter = "\\$" +var substitutionNamed = "[_a-z][_a-z0-9]*" +var substitutionBraced = "[_a-z][_a-z0-9]*(?::?[-+?](.*))?" + +var groupEscaped = "escaped" +var groupNamed = "named" +var groupBraced = "braced" +var groupInvalid = "invalid" + +var patternString = fmt.Sprintf( + "%s(?i:(?P<%s>%s)|(?P<%s>%s)|{(?:(?P<%s>%s)}|(?P<%s>)))", + delimiter, + groupEscaped, delimiter, + groupNamed, substitutionNamed, + groupBraced, substitutionBraced, + groupInvalid, +) + +var defaultPattern = regexp.MustCompile(patternString) + +// InvalidTemplateError is returned when a variable template is not in a valid +// format +type InvalidTemplateError struct { + Template string +} + +func (e InvalidTemplateError) Error() string { + return fmt.Sprintf("Invalid template: %#v", e.Template) +} + +// MissingRequiredError is returned when a variable template is missing +type MissingRequiredError struct { + Variable string + Reason string +} + +func (e MissingRequiredError) Error() string { + if e.Reason != "" { + return fmt.Sprintf("required variable %s is missing a value: %s", e.Variable, e.Reason) + } + return fmt.Sprintf("required variable %s is missing a value", e.Variable) +} + +// Mapping is a user-supplied function which maps from variable names to values. +// Returns the value as a string and a bool indicating whether +// the value is present, to distinguish between an empty string +// and the absence of a value. +type Mapping func(string) (string, bool) + +// SubstituteFunc is a user-supplied function that apply substitution. +// Returns the value as a string, a bool indicating if the function could apply +// the substitution and an error. +type SubstituteFunc func(string, Mapping) (string, bool, error) + +// ReplacementFunc is a user-supplied function that is apply to the matching +// substring. Returns the value as a string and an error. +type ReplacementFunc func(string, Mapping, *Config) (string, error) + +type Config struct { + pattern *regexp.Regexp + substituteFunc SubstituteFunc + replacementFunc ReplacementFunc + logging bool +} + +type Option func(*Config) + +func WithPattern(pattern *regexp.Regexp) Option { + return func(cfg *Config) { + cfg.pattern = pattern + } +} + +func WithSubstitutionFunction(subsFunc SubstituteFunc) Option { + return func(cfg *Config) { + cfg.substituteFunc = subsFunc + } +} + +func WithReplacementFunction(replacementFunc ReplacementFunc) Option { + return func(cfg *Config) { + cfg.replacementFunc = replacementFunc + } +} + +func WithoutLogging(cfg *Config) { + cfg.logging = false +} + +// SubstituteWithOptions substitute variables in the string with their values. +// It accepts additional options such as a custom function or pattern. +func SubstituteWithOptions(template string, mapping Mapping, options ...Option) (string, error) { + var returnErr error + + cfg := &Config{ + pattern: defaultPattern, + replacementFunc: DefaultReplacementFunc, + logging: true, + } + for _, o := range options { + o(cfg) + } + + result := cfg.pattern.ReplaceAllStringFunc(template, func(substring string) string { + replacement, err := cfg.replacementFunc(substring, mapping, cfg) + if err != nil { + // Add the template for template errors + var tmplErr *InvalidTemplateError + if errors.As(err, &tmplErr) { + if tmplErr.Template == "" { + tmplErr.Template = template + } + } + // Save the first error to be returned + if returnErr == nil { + returnErr = err + } + + } + return replacement + }) + + return result, returnErr +} + +func DefaultReplacementFunc(substring string, mapping Mapping, cfg *Config) (string, error) { + value, _, err := DefaultReplacementAppliedFunc(substring, mapping, cfg) + return value, err +} + +func DefaultReplacementAppliedFunc(substring string, mapping Mapping, cfg *Config) (string, bool, error) { + pattern := cfg.pattern + subsFunc := cfg.substituteFunc + if subsFunc == nil { + _, subsFunc = getSubstitutionFunctionForTemplate(substring) + } + + closingBraceIndex := getFirstBraceClosingIndex(substring) + rest := "" + if closingBraceIndex > -1 { + rest = substring[closingBraceIndex+1:] + substring = substring[0 : closingBraceIndex+1] + } + + matches := pattern.FindStringSubmatch(substring) + groups := matchGroups(matches, pattern) + if escaped := groups[groupEscaped]; escaped != "" { + return escaped, true, nil + } + + braced := false + substitution := groups[groupNamed] + if substitution == "" { + substitution = groups[groupBraced] + braced = true + } + + if substitution == "" { + return "", false, &InvalidTemplateError{} + } + + if braced { + value, applied, err := subsFunc(substitution, mapping) + if err != nil { + return "", false, err + } + if applied { + interpolatedNested, err := SubstituteWith(rest, mapping, pattern) + if err != nil { + return "", false, err + } + return value + interpolatedNested, true, nil + } + } + + value, ok := mapping(substitution) + if !ok && cfg.logging { + logrus.Warnf("The %q variable is not set. Defaulting to a blank string.", substitution) + } + + return value, ok, nil +} + +// SubstituteWith substitute variables in the string with their values. +// It accepts additional substitute function. +func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) { + options := []Option{ + WithPattern(pattern), + } + if len(subsFuncs) > 0 { + options = append(options, WithSubstitutionFunction(subsFuncs[0])) + } + + return SubstituteWithOptions(template, mapping, options...) +} + +func getSubstitutionFunctionForTemplate(template string) (string, SubstituteFunc) { + interpolationMapping := []struct { + string + SubstituteFunc + }{ + {":?", requiredErrorWhenEmptyOrUnset}, + {"?", requiredErrorWhenUnset}, + {":-", defaultWhenEmptyOrUnset}, + {"-", defaultWhenUnset}, + {":+", defaultWhenNotEmpty}, + {"+", defaultWhenSet}, + } + sort.Slice(interpolationMapping, func(i, j int) bool { + idxI := strings.Index(template, interpolationMapping[i].string) + idxJ := strings.Index(template, interpolationMapping[j].string) + if idxI < 0 { + return false + } + if idxJ < 0 { + return true + } + return idxI < idxJ + }) + + return interpolationMapping[0].string, interpolationMapping[0].SubstituteFunc +} + +func getFirstBraceClosingIndex(s string) int { + openVariableBraces := 0 + for i := 0; i < len(s); i++ { + if s[i] == '}' { + openVariableBraces-- + if openVariableBraces == 0 { + return i + } + } + if strings.HasPrefix(s[i:], "${") { + openVariableBraces++ + i++ + } + } + return -1 +} + +// Substitute variables in the string with their values +func Substitute(template string, mapping Mapping) (string, error) { + return SubstituteWith(template, mapping, defaultPattern) +} + +// ExtractVariables returns a map of all the variables defined in the specified +// composefile (dict representation) and their default value if any. +func ExtractVariables(configDict map[string]interface{}, pattern *regexp.Regexp) map[string]Variable { + if pattern == nil { + pattern = defaultPattern + } + return recurseExtract(configDict, pattern) +} + +func recurseExtract(value interface{}, pattern *regexp.Regexp) map[string]Variable { + m := map[string]Variable{} + + switch value := value.(type) { + case string: + if values, is := extractVariable(value, pattern); is { + for _, v := range values { + m[v.Name] = v + } + } + case map[string]interface{}: + for _, elem := range value { + submap := recurseExtract(elem, pattern) + for key, value := range submap { + m[key] = value + } + } + + case []interface{}: + for _, elem := range value { + if values, is := extractVariable(elem, pattern); is { + for _, v := range values { + m[v.Name] = v + } + } + } + } + + return m +} + +type Variable struct { + Name string + DefaultValue string + PresenceValue string + Required bool +} + +func extractVariable(value interface{}, pattern *regexp.Regexp) ([]Variable, bool) { + sValue, ok := value.(string) + if !ok { + return []Variable{}, false + } + matches := pattern.FindAllStringSubmatch(sValue, -1) + if len(matches) == 0 { + return []Variable{}, false + } + values := []Variable{} + for _, match := range matches { + groups := matchGroups(match, pattern) + if escaped := groups[groupEscaped]; escaped != "" { + continue + } + val := groups[groupNamed] + if val == "" { + val = groups[groupBraced] + } + name := val + var defaultValue string + var presenceValue string + var required bool + switch { + case strings.Contains(val, ":?"): + name, _ = partition(val, ":?") + required = true + case strings.Contains(val, "?"): + name, _ = partition(val, "?") + required = true + case strings.Contains(val, ":-"): + name, defaultValue = partition(val, ":-") + case strings.Contains(val, "-"): + name, defaultValue = partition(val, "-") + case strings.Contains(val, ":+"): + name, presenceValue = partition(val, ":+") + case strings.Contains(val, "+"): + name, presenceValue = partition(val, "+") + } + values = append(values, Variable{ + Name: name, + DefaultValue: defaultValue, + PresenceValue: presenceValue, + Required: required, + }) + } + return values, len(values) > 0 +} + +// Soft default (fall back if unset or empty) +func defaultWhenEmptyOrUnset(substitution string, mapping Mapping) (string, bool, error) { + return withDefaultWhenAbsence(substitution, mapping, true) +} + +// Hard default (fall back if-and-only-if empty) +func defaultWhenUnset(substitution string, mapping Mapping) (string, bool, error) { + return withDefaultWhenAbsence(substitution, mapping, false) +} + +func defaultWhenNotEmpty(substitution string, mapping Mapping) (string, bool, error) { + return withDefaultWhenPresence(substitution, mapping, true) +} + +func defaultWhenSet(substitution string, mapping Mapping) (string, bool, error) { + return withDefaultWhenPresence(substitution, mapping, false) +} + +func requiredErrorWhenEmptyOrUnset(substitution string, mapping Mapping) (string, bool, error) { + return withRequired(substitution, mapping, ":?", func(v string) bool { return v != "" }) +} + +func requiredErrorWhenUnset(substitution string, mapping Mapping) (string, bool, error) { + return withRequired(substitution, mapping, "?", func(_ string) bool { return true }) +} + +func withDefaultWhenPresence(substitution string, mapping Mapping, notEmpty bool) (string, bool, error) { + sep := "+" + if notEmpty { + sep = ":+" + } + if !strings.Contains(substitution, sep) { + return "", false, nil + } + name, defaultValue := partition(substitution, sep) + defaultValue, err := Substitute(defaultValue, mapping) + if err != nil { + return "", false, err + } + value, ok := mapping(name) + if ok && (!notEmpty || (notEmpty && value != "")) { + return defaultValue, true, nil + } + return value, true, nil +} + +func withDefaultWhenAbsence(substitution string, mapping Mapping, emptyOrUnset bool) (string, bool, error) { + sep := "-" + if emptyOrUnset { + sep = ":-" + } + if !strings.Contains(substitution, sep) { + return "", false, nil + } + name, defaultValue := partition(substitution, sep) + defaultValue, err := Substitute(defaultValue, mapping) + if err != nil { + return "", false, err + } + value, ok := mapping(name) + if !ok || (emptyOrUnset && value == "") { + return defaultValue, true, nil + } + return value, true, nil +} + +func withRequired(substitution string, mapping Mapping, sep string, valid func(string) bool) (string, bool, error) { + if !strings.Contains(substitution, sep) { + return "", false, nil + } + name, errorMessage := partition(substitution, sep) + errorMessage, err := Substitute(errorMessage, mapping) + if err != nil { + return "", false, err + } + value, ok := mapping(name) + if !ok || !valid(value) { + return "", true, &MissingRequiredError{ + Reason: errorMessage, + Variable: name, + } + } + return value, true, nil +} + +func matchGroups(matches []string, pattern *regexp.Regexp) map[string]string { + groups := make(map[string]string) + for i, name := range pattern.SubexpNames()[1:] { + groups[name] = matches[i+1] + } + return groups +} + +// Split the string at the first occurrence of sep, and return the part before the separator, +// and the part after the separator. +// +// If the separator is not found, return the string itself, followed by an empty string. +func partition(s, sep string) (string, string) { + if strings.Contains(s, sep) { + parts := strings.SplitN(s, sep, 2) + return parts[0], parts[1] + } + return s, "" +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/build.go b/vendor/github.com/compose-spec/compose-go/v2/transform/build.go new file mode 100644 index 000000000000..88ac05bf9911 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/build.go @@ -0,0 +1,48 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformBuild(data any, p tree.Path) (any, error) { + switch v := data.(type) { + case map[string]any: + return transformMapping(v, p) + case string: + return map[string]any{ + "context": v, + }, nil + default: + return data, fmt.Errorf("%s: invalid type %T for build", p, v) + } +} + +func defaultBuildContext(data any, _ tree.Path) (any, error) { + switch v := data.(type) { + case map[string]any: + if _, ok := v["context"]; !ok { + v["context"] = "." + } + return v, nil + default: + return data, nil + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/canonical.go b/vendor/github.com/compose-spec/compose-go/v2/transform/canonical.go new file mode 100644 index 000000000000..4b6603fcb088 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/canonical.go @@ -0,0 +1,107 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "github.com/compose-spec/compose-go/v2/tree" +) + +type transformFunc func(data any, p tree.Path) (any, error) + +var transformers = map[tree.Path]transformFunc{} + +func init() { + transformers["services.*"] = transformService + transformers["services.*.build.secrets.*"] = transformFileMount + transformers["services.*.build.additional_contexts"] = transformKeyValue + transformers["services.*.depends_on"] = transformDependsOn + transformers["services.*.env_file"] = transformEnvFile + transformers["services.*.extends"] = transformExtends + transformers["services.*.networks"] = transformServiceNetworks + transformers["services.*.volumes.*"] = transformVolumeMount + transformers["services.*.secrets.*"] = transformFileMount + transformers["services.*.configs.*"] = transformFileMount + transformers["services.*.ports"] = transformPorts + transformers["services.*.build"] = transformBuild + transformers["services.*.build.ssh"] = transformSSH + transformers["services.*.ulimits.*"] = transformUlimits + transformers["services.*.build.ulimits.*"] = transformUlimits + transformers["volumes.*"] = transformMaybeExternal + transformers["networks.*"] = transformMaybeExternal + transformers["secrets.*"] = transformMaybeExternal + transformers["configs.*"] = transformMaybeExternal + transformers["include.*"] = transformInclude +} + +// Canonical transforms a compose model into canonical syntax +func Canonical(yaml map[string]any) (map[string]any, error) { + canonical, err := transform(yaml, tree.NewPath()) + if err != nil { + return nil, err + } + return canonical.(map[string]any), nil +} + +func transform(data any, p tree.Path) (any, error) { + for pattern, transformer := range transformers { + if p.Matches(pattern) { + t, err := transformer(data, p) + if err != nil { + return nil, err + } + return t, nil + } + } + switch v := data.(type) { + case map[string]any: + a, err := transformMapping(v, p) + if err != nil { + return a, err + } + return v, nil + case []any: + a, err := transformSequence(v, p) + if err != nil { + return a, err + } + return v, nil + default: + return data, nil + } +} + +func transformSequence(v []any, p tree.Path) ([]any, error) { + for i, e := range v { + t, err := transform(e, p.Next("[]")) + if err != nil { + return nil, err + } + v[i] = t + } + return v, nil +} + +func transformMapping(v map[string]any, p tree.Path) (map[string]any, error) { + for k, e := range v { + t, err := transform(e, p.Next(k)) + if err != nil { + return nil, err + } + v[k] = t + } + return v, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go b/vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go new file mode 100644 index 000000000000..49293b721c05 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go @@ -0,0 +1,87 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "github.com/compose-spec/compose-go/v2/tree" +) + +var defaultValues = map[tree.Path]transformFunc{} + +func init() { + defaultValues["services.*.build"] = defaultBuildContext + defaultValues["services.*.secrets.*"] = defaultSecretMount +} + +// SetDefaultValues transforms a compose model to set default values to missing attributes +func SetDefaultValues(yaml map[string]any) (map[string]any, error) { + result, err := setDefaults(yaml, tree.NewPath()) + if err != nil { + return nil, err + } + return result.(map[string]any), nil +} + +func setDefaults(data any, p tree.Path) (any, error) { + for pattern, transformer := range defaultValues { + if p.Matches(pattern) { + t, err := transformer(data, p) + if err != nil { + return nil, err + } + return t, nil + } + } + switch v := data.(type) { + case map[string]any: + a, err := setDefaultsMapping(v, p) + if err != nil { + return a, err + } + return v, nil + case []any: + a, err := setDefaultsSequence(v, p) + if err != nil { + return a, err + } + return v, nil + default: + return data, nil + } +} + +func setDefaultsSequence(v []any, p tree.Path) ([]any, error) { + for i, e := range v { + t, err := setDefaults(e, p.Next("[]")) + if err != nil { + return nil, err + } + v[i] = t + } + return v, nil +} + +func setDefaultsMapping(v map[string]any, p tree.Path) (map[string]any, error) { + for k, e := range v { + t, err := setDefaults(e, p.Next(k)) + if err != nil { + return nil, err + } + v[k] = t + } + return v, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/dependson.go b/vendor/github.com/compose-spec/compose-go/v2/transform/dependson.go new file mode 100644 index 000000000000..8c6e1ed37dbb --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/dependson.go @@ -0,0 +1,53 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformDependsOn(data any, p tree.Path) (any, error) { + switch v := data.(type) { + case map[string]any: + for i, e := range v { + d, ok := e.(map[string]any) + if !ok { + return nil, fmt.Errorf("%s.%s: unsupported value %s", p, i, v) + } + if _, ok := d["condition"]; !ok { + d["condition"] = "service_started" + } + if _, ok := d["required"]; !ok { + d["required"] = true + } + } + return v, nil + case []any: + d := map[string]any{} + for _, k := range v { + d[k.(string)] = map[string]any{ + "condition": "service_started", + "required": true, + } + } + return d, nil + default: + return data, fmt.Errorf("%s: invalid type %T for depend_on", p, v) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/envfile.go b/vendor/github.com/compose-spec/compose-go/v2/transform/envfile.go new file mode 100644 index 000000000000..842303b1a551 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/envfile.go @@ -0,0 +1,55 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformEnvFile(data any, p tree.Path) (any, error) { + switch v := data.(type) { + case string: + return []any{ + transformEnvFileValue(v), + }, nil + case []any: + for i, e := range v { + v[i] = transformEnvFileValue(e) + } + return v, nil + default: + return nil, fmt.Errorf("%s: invalid type %T for env_file", p, v) + } +} + +func transformEnvFileValue(data any) any { + switch v := data.(type) { + case string: + return map[string]any{ + "path": v, + "required": true, + } + case map[string]any: + if _, ok := v["required"]; !ok { + v["required"] = true + } + return v + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/extends.go b/vendor/github.com/compose-spec/compose-go/v2/transform/extends.go new file mode 100644 index 000000000000..9f77176ecdf7 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/extends.go @@ -0,0 +1,36 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformExtends(data any, p tree.Path) (any, error) { + switch v := data.(type) { + case map[string]any: + return transformMapping(v, p) + case string: + return map[string]any{ + "service": v, + }, nil + default: + return data, fmt.Errorf("%s: invalid type %T for extends", p, v) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/external.go b/vendor/github.com/compose-spec/compose-go/v2/transform/external.go new file mode 100644 index 000000000000..9e24eb83eb97 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/external.go @@ -0,0 +1,54 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" + "github.com/sirupsen/logrus" +) + +func transformMaybeExternal(data any, p tree.Path) (any, error) { + if data == nil { + return nil, nil + } + resource, err := transformMapping(data.(map[string]any), p) + if err != nil { + return nil, err + } + + if ext, ok := resource["external"]; ok { + name, named := resource["name"] + if external, ok := ext.(map[string]any); ok { + resource["external"] = true + if extname, extNamed := external["name"]; extNamed { + logrus.Warnf("%s: external.name is deprecated. Please set name and external: true", p) + if named && extname != name { + return nil, fmt.Errorf("%s: name and external.name conflict; only use name", p) + } + if !named { + // adopt (deprecated) external.name if set + resource["name"] = extname + return resource, nil + } + } + } + } + + return resource, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/include.go b/vendor/github.com/compose-spec/compose-go/v2/transform/include.go new file mode 100644 index 000000000000..2488483a6646 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/include.go @@ -0,0 +1,36 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformInclude(data any, p tree.Path) (any, error) { + switch v := data.(type) { + case map[string]any: + return v, nil + case string: + return map[string]any{ + "path": v, + }, nil + default: + return data, fmt.Errorf("%s: invalid type %T for external", p, v) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/mapping.go b/vendor/github.com/compose-spec/compose-go/v2/transform/mapping.go new file mode 100644 index 000000000000..b247f84f655d --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/mapping.go @@ -0,0 +1,43 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + "strings" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformKeyValue(data any, p tree.Path) (any, error) { + switch v := data.(type) { + case map[string]any: + return v, nil + case []any: + mapping := map[string]any{} + for _, e := range v { + before, after, found := strings.Cut(e.(string), "=") + if !found { + return nil, fmt.Errorf("%s: invalid value %s, expected key=value", p, e) + } + mapping[before] = after + } + return mapping, nil + default: + return nil, fmt.Errorf("%s: invalid type %T", p, v) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/ports.go b/vendor/github.com/compose-spec/compose-go/v2/transform/ports.go new file mode 100644 index 000000000000..9dc72e95a9cb --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/ports.go @@ -0,0 +1,86 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" + "github.com/compose-spec/compose-go/v2/types" + "github.com/mitchellh/mapstructure" +) + +func transformPorts(data any, p tree.Path) (any, error) { + switch entries := data.(type) { + case []any: + // We process the list instead of individual items here. + // The reason is that one entry might be mapped to multiple ServicePortConfig. + // Therefore we take an input of a list and return an output of a list. + var ports []any + for _, entry := range entries { + switch value := entry.(type) { + case int: + parsed, err := types.ParsePortConfig(fmt.Sprint(value)) + if err != nil { + return data, err + } + for _, v := range parsed { + m, err := encode(v) + if err != nil { + return nil, err + } + ports = append(ports, m) + } + case string: + parsed, err := types.ParsePortConfig(value) + if err != nil { + return data, err + } + if err != nil { + return nil, err + } + for _, v := range parsed { + m, err := encode(v) + if err != nil { + return nil, err + } + ports = append(ports, m) + } + case map[string]any: + ports = append(ports, value) + default: + return data, fmt.Errorf("%s: invalid type %T for port", p, value) + } + } + return ports, nil + default: + return data, fmt.Errorf("%s: invalid type %T for port", p, entries) + } +} + +func encode(v any) (map[string]any, error) { + m := map[string]any{} + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Result: &m, + TagName: "yaml", + }) + if err != nil { + return nil, err + } + err = decoder.Decode(v) + return m, err +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/secrets.go b/vendor/github.com/compose-spec/compose-go/v2/transform/secrets.go new file mode 100644 index 000000000000..1e84d18db140 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/secrets.go @@ -0,0 +1,49 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformFileMount(data any, p tree.Path) (any, error) { + switch v := data.(type) { + case map[string]any: + return data, nil + case string: + return map[string]any{ + "source": v, + }, nil + default: + return nil, fmt.Errorf("%s: unsupported type %T", p, data) + } +} + +func defaultSecretMount(data any, p tree.Path) (any, error) { + switch v := data.(type) { + case map[string]any: + source := v["source"] + if _, ok := v["target"]; !ok { + v["target"] = fmt.Sprintf("/run/secrets/%s", source) + } + return v, nil + default: + return nil, fmt.Errorf("%s: unsupported type %T", p, data) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/services.go b/vendor/github.com/compose-spec/compose-go/v2/transform/services.go new file mode 100644 index 000000000000..c14114528a8a --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/services.go @@ -0,0 +1,41 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformService(data any, p tree.Path) (any, error) { + switch value := data.(type) { + case map[string]any: + return transformMapping(value, p) + default: + return value, nil + } +} + +func transformServiceNetworks(data any, _ tree.Path) (any, error) { + if slice, ok := data.([]any); ok { + networks := make(map[string]any, len(slice)) + for _, net := range slice { + networks[net.(string)] = nil + } + return networks, nil + } + return data, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/ssh.go b/vendor/github.com/compose-spec/compose-go/v2/transform/ssh.go new file mode 100644 index 000000000000..48e2a6cfbab1 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/ssh.go @@ -0,0 +1,51 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + "strings" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformSSH(data any, p tree.Path) (any, error) { + switch v := data.(type) { + case map[string]any: + return v, nil + case []any: + result := make(map[string]any, len(v)) + for _, e := range v { + s, ok := e.(string) + if !ok { + return nil, fmt.Errorf("invalid ssh key type %T", e) + } + id, path, ok := strings.Cut(s, "=") + if !ok { + if id != "default" { + return nil, fmt.Errorf("invalid ssh key %q", s) + } + result[id] = nil + continue + } + result[id] = path + } + return result, nil + default: + return data, fmt.Errorf("%s: invalid type %T for ssh", p, v) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/ulimits.go b/vendor/github.com/compose-spec/compose-go/v2/transform/ulimits.go new file mode 100644 index 000000000000..de7784c323f9 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/ulimits.go @@ -0,0 +1,34 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformUlimits(data any, p tree.Path) (any, error) { + switch v := data.(type) { + case map[string]any: + return v, nil + case int: + return v, nil + default: + return data, fmt.Errorf("%s: invalid type %T for external", p, v) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/volume.go b/vendor/github.com/compose-spec/compose-go/v2/transform/volume.go new file mode 100644 index 000000000000..e6dd8795721e --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/volume.go @@ -0,0 +1,49 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + "path" + + "github.com/compose-spec/compose-go/v2/format" + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformVolumeMount(data any, p tree.Path) (any, error) { + switch v := data.(type) { + case map[string]any: + return v, nil + case string: + volume, err := format.ParseVolume(v) // TODO(ndeloof) ParseVolume should not rely on types and return map[string] + if err != nil { + return nil, err + } + volume.Target = cleanTarget(volume.Target) + + return encode(volume) + default: + return data, fmt.Errorf("%s: invalid type %T for service volume mount", p, v) + } +} + +func cleanTarget(target string) string { + if target == "" { + return "" + } + return path.Clean(target) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/tree/path.go b/vendor/github.com/compose-spec/compose-go/v2/tree/path.go new file mode 100644 index 000000000000..f8a8d9a64a77 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/tree/path.go @@ -0,0 +1,87 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tree + +import ( + "strings" +) + +const pathSeparator = "." + +// PathMatchAll is a token used as part of a Path to match any key at that level +// in the nested structure +const PathMatchAll = "*" + +// PathMatchList is a token used as part of a Path to match items in a list +const PathMatchList = "[]" + +// Path is a dotted path of keys to a value in a nested mapping structure. A * +// section in a path will match any key in the mapping structure. +type Path string + +// NewPath returns a new Path +func NewPath(items ...string) Path { + return Path(strings.Join(items, pathSeparator)) +} + +// Next returns a new path by append part to the current path +func (p Path) Next(part string) Path { + if p == "" { + return Path(part) + } + part = strings.ReplaceAll(part, pathSeparator, "👻") + return Path(string(p) + pathSeparator + part) +} + +func (p Path) Parts() []string { + return strings.Split(string(p), pathSeparator) +} + +func (p Path) Matches(pattern Path) bool { + patternParts := pattern.Parts() + parts := p.Parts() + + if len(patternParts) != len(parts) { + return false + } + for index, part := range parts { + switch patternParts[index] { + case PathMatchAll, part: + continue + default: + return false + } + } + return true +} + +func (p Path) Last() string { + parts := p.Parts() + return parts[len(parts)-1] +} + +func (p Path) Parent() Path { + index := strings.LastIndex(string(p), pathSeparator) + if index > 0 { + return p[0:index] + } + return "" +} + +func (p Path) String() string { + return strings.ReplaceAll(string(p), "👻", pathSeparator) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/bytes.go b/vendor/github.com/compose-spec/compose-go/v2/types/bytes.go new file mode 100644 index 000000000000..1b2cd4196bcf --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/bytes.go @@ -0,0 +1,48 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" + + "github.com/docker/go-units" +) + +// UnitBytes is the bytes type +type UnitBytes int64 + +// MarshalYAML makes UnitBytes implement yaml.Marshaller +func (u UnitBytes) MarshalYAML() (interface{}, error) { + return fmt.Sprintf("%d", u), nil +} + +// MarshalJSON makes UnitBytes implement json.Marshaler +func (u UnitBytes) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%d"`, u)), nil +} + +func (u *UnitBytes) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case int: + *u = UnitBytes(v) + case string: + b, err := units.RAMInBytes(fmt.Sprint(value)) + *u = UnitBytes(b) + return err + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/command.go b/vendor/github.com/compose-spec/compose-go/v2/types/command.go new file mode 100644 index 000000000000..90d575d8cde0 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/command.go @@ -0,0 +1,86 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import "github.com/mattn/go-shellwords" + +// ShellCommand is a string or list of string args. +// +// When marshaled to YAML, nil command fields will be omitted if `omitempty` +// is specified as a struct tag. Explicitly empty commands (i.e. `[]` or +// empty string will serialize to an empty array (`[]`). +// +// When marshaled to JSON, the `omitempty` struct must NOT be specified. +// If the command field is nil, it will be serialized as `null`. +// Explicitly empty commands (i.e. `[]` or empty string) will serialize to +// an empty array (`[]`). +// +// The distinction between nil and explicitly empty is important to distinguish +// between an unset value and a provided, but empty, value, which should be +// preserved so that it can override any base value (e.g. container entrypoint). +// +// The different semantics between YAML and JSON are due to limitations with +// JSON marshaling + `omitempty` in the Go stdlib, while gopkg.in/yaml.v3 gives +// us more flexibility via the yaml.IsZeroer interface. +// +// In the future, it might make sense to make fields of this type be +// `*ShellCommand` to avoid this situation, but that would constitute a +// breaking change. +type ShellCommand []string + +// IsZero returns true if the slice is nil. +// +// Empty (but non-nil) slices are NOT considered zero values. +func (s ShellCommand) IsZero() bool { + // we do NOT want len(s) == 0, ONLY explicitly nil + return s == nil +} + +// MarshalYAML returns nil (which will be serialized as `null`) for nil slices +// and delegates to the standard marshaller behavior otherwise. +// +// NOTE: Typically the nil case here is not hit because IsZero has already +// short-circuited marshalling, but this ensures that the type serializes +// accurately if the `omitempty` struct tag is omitted/forgotten. +// +// A similar MarshalJSON() implementation is not needed because the Go stdlib +// already serializes nil slices to `null`, whereas gopkg.in/yaml.v3 by default +// serializes nil slices to `[]`. +func (s ShellCommand) MarshalYAML() (interface{}, error) { + if s == nil { + return nil, nil + } + return []string(s), nil +} + +func (s *ShellCommand) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case string: + cmd, err := shellwords.Parse(v) + if err != nil { + return err + } + *s = cmd + case []interface{}: + cmd := make([]string, len(v)) + for i, s := range v { + cmd[i] = s.(string) + } + *s = cmd + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/config.go b/vendor/github.com/compose-spec/compose-go/v2/types/config.go new file mode 100644 index 000000000000..27487ca2801d --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/config.go @@ -0,0 +1,135 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "encoding/json" + "runtime" + "strings" + + "github.com/mitchellh/mapstructure" +) + +var ( + // isCaseInsensitiveEnvVars is true on platforms where environment variable names are treated case-insensitively. + isCaseInsensitiveEnvVars = (runtime.GOOS == "windows") +) + +// ConfigDetails are the details about a group of ConfigFiles +type ConfigDetails struct { + Version string + WorkingDir string + ConfigFiles []ConfigFile + Environment Mapping +} + +// LookupEnv provides a lookup function for environment variables +func (cd *ConfigDetails) LookupEnv(key string) (string, bool) { + v, ok := cd.Environment[key] + if !isCaseInsensitiveEnvVars || ok { + return v, ok + } + // variable names must be treated case-insensitively on some platforms (that is, Windows). + // Resolves in this way: + // * Return the value if its name matches with the passed name case-sensitively. + // * Otherwise, return the value if its lower-cased name matches lower-cased passed name. + // * The value is indefinite if multiple variables match. + lowerKey := strings.ToLower(key) + for k, v := range cd.Environment { + if strings.ToLower(k) == lowerKey { + return v, true + } + } + return "", false +} + +// ConfigFile is a filename and the contents of the file as a Dict +type ConfigFile struct { + // Filename is the name of the yaml configuration file + Filename string + // Content is the raw yaml content. Will be loaded from Filename if not set + Content []byte + // Config if the yaml tree for this config file. Will be parsed from Content if not set + Config map[string]interface{} +} + +func ToConfigFiles(path []string) (f []ConfigFile) { + for _, p := range path { + f = append(f, ConfigFile{Filename: p}) + } + return +} + +// Config is a full compose file configuration and model +type Config struct { + Filename string `yaml:"-" json:"-"` + Name string `yaml:"name,omitempty" json:"name,omitempty"` + Services Services `yaml:"services" json:"services"` + Networks Networks `yaml:"networks,omitempty" json:"networks,omitempty"` + Volumes Volumes `yaml:"volumes,omitempty" json:"volumes,omitempty"` + Secrets Secrets `yaml:"secrets,omitempty" json:"secrets,omitempty"` + Configs Configs `yaml:"configs,omitempty" json:"configs,omitempty"` + Extensions Extensions `yaml:",inline" json:"-"` + Include []IncludeConfig `yaml:"include,omitempty" json:"include,omitempty"` +} + +// Volumes is a map of VolumeConfig +type Volumes map[string]VolumeConfig + +// Networks is a map of NetworkConfig +type Networks map[string]NetworkConfig + +// Secrets is a map of SecretConfig +type Secrets map[string]SecretConfig + +// Configs is a map of ConfigObjConfig +type Configs map[string]ConfigObjConfig + +// Extensions is a map of custom extension +type Extensions map[string]interface{} + +// MarshalJSON makes Config implement json.Marshaler +func (c Config) MarshalJSON() ([]byte, error) { + m := map[string]interface{}{ + "services": c.Services, + } + + if len(c.Networks) > 0 { + m["networks"] = c.Networks + } + if len(c.Volumes) > 0 { + m["volumes"] = c.Volumes + } + if len(c.Secrets) > 0 { + m["secrets"] = c.Secrets + } + if len(c.Configs) > 0 { + m["configs"] = c.Configs + } + for k, v := range c.Extensions { + m[k] = v + } + return json.Marshal(m) +} + +func (e Extensions) Get(name string, target interface{}) (bool, error) { + if v, ok := e[name]; ok { + err := mapstructure.Decode(v, target) + return true, err + } + return false, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/develop.go b/vendor/github.com/compose-spec/compose-go/v2/types/develop.go new file mode 100644 index 000000000000..110591d31716 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/develop.go @@ -0,0 +1,38 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +type DevelopConfig struct { + Watch []Trigger `json:"watch,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +type WatchAction string + +const ( + WatchActionSync WatchAction = "sync" + WatchActionRebuild WatchAction = "rebuild" + WatchActionSyncRestart WatchAction = "sync+restart" +) + +type Trigger struct { + Path string `json:"path,omitempty"` + Action WatchAction `json:"action,omitempty"` + Target string `json:"target,omitempty"` + Ignore []string `json:"ignore,omitempty"` +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/device.go b/vendor/github.com/compose-spec/compose-go/v2/types/device.go new file mode 100644 index 000000000000..240e87786b1b --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/device.go @@ -0,0 +1,52 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" + "strconv" + "strings" +) + +type DeviceRequest struct { + Capabilities []string `yaml:"capabilities,omitempty" json:"capabilities,omitempty"` + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + Count DeviceCount `yaml:"count,omitempty" json:"count,omitempty"` + IDs []string `yaml:"device_ids,omitempty" json:"device_ids,omitempty"` +} + +type DeviceCount int64 + +func (c *DeviceCount) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case int: + *c = DeviceCount(v) + case string: + if strings.ToLower(v) == "all" { + *c = -1 + return nil + } + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("invalid value %q, the only value allowed is 'all' or a number", v) + } + *c = DeviceCount(i) + default: + return fmt.Errorf("invalid type %T for device count", v) + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/duration.go b/vendor/github.com/compose-spec/compose-go/v2/types/duration.go new file mode 100644 index 000000000000..95f562a7cf2b --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/duration.go @@ -0,0 +1,60 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "encoding/json" + "fmt" + "strings" + "time" +) + +// Duration is a thin wrapper around time.Duration with improved JSON marshalling +type Duration time.Duration + +func (d Duration) String() string { + return time.Duration(d).String() +} + +func (d *Duration) DecodeMapstructure(value interface{}) error { + v, err := time.ParseDuration(fmt.Sprint(value)) + if err != nil { + return err + } + *d = Duration(v) + return nil +} + +// MarshalJSON makes Duration implement json.Marshaler +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +// MarshalYAML makes Duration implement yaml.Marshaler +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +func (d *Duration) UnmarshalJSON(b []byte) error { + s := strings.Trim(string(b), "\"") + timeDuration, err := time.ParseDuration(s) + if err != nil { + return err + } + *d = Duration(timeDuration) + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/envfile.go b/vendor/github.com/compose-spec/compose-go/v2/types/envfile.go new file mode 100644 index 000000000000..f0fa72213867 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/envfile.go @@ -0,0 +1,46 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "encoding/json" +) + +type EnvFile struct { + Path string `yaml:"path,omitempty" json:"path,omitempty"` + Required bool `yaml:"required" json:"required"` +} + +// MarshalYAML makes EnvFile implement yaml.Marshaler +func (e EnvFile) MarshalYAML() (interface{}, error) { + if e.Required { + return e.Path, nil + } + return map[string]any{ + "path": e.Path, + "required": e.Required, + }, nil +} + +// MarshalJSON makes EnvFile implement json.Marshaler +func (e *EnvFile) MarshalJSON() ([]byte, error) { + if e.Required { + return json.Marshal(e.Path) + } + // Pass as a value to avoid re-entering this method and use the default implementation + return json.Marshal(*e) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/healthcheck.go b/vendor/github.com/compose-spec/compose-go/v2/types/healthcheck.go new file mode 100644 index 000000000000..c6c3b37e0d5a --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/healthcheck.go @@ -0,0 +1,53 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" +) + +// HealthCheckConfig the healthcheck configuration for a service +type HealthCheckConfig struct { + Test HealthCheckTest `yaml:"test,omitempty" json:"test,omitempty"` + Timeout *Duration `yaml:"timeout,omitempty" json:"timeout,omitempty"` + Interval *Duration `yaml:"interval,omitempty" json:"interval,omitempty"` + Retries *uint64 `yaml:"retries,omitempty" json:"retries,omitempty"` + StartPeriod *Duration `yaml:"start_period,omitempty" json:"start_period,omitempty"` + StartInterval *Duration `yaml:"start_interval,omitempty" json:"start_interval,omitempty"` + Disable bool `yaml:"disable,omitempty" json:"disable,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// HealthCheckTest is the command run to test the health of a service +type HealthCheckTest []string + +func (l *HealthCheckTest) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case string: + *l = []string{"CMD-SHELL", v} + case []interface{}: + seq := make([]string, len(v)) + for i, e := range v { + seq[i] = e.(string) + } + *l = seq + default: + return fmt.Errorf("unexpected value type %T for healthcheck.test", value) + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/hostList.go b/vendor/github.com/compose-spec/compose-go/v2/types/hostList.go new file mode 100644 index 000000000000..e224b8166968 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/hostList.go @@ -0,0 +1,133 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// HostsList is a list of colon-separated host-ip mappings +type HostsList map[string][]string + +// NewHostsList creates a HostsList from a list of `host=ip` strings +func NewHostsList(hosts []string) (HostsList, error) { + list := HostsList{} + for _, s := range hosts { + var found bool + for _, sep := range hostListSerapators { + host, ip, ok := strings.Cut(s, sep) + if ok { + // Mapping found with this separator, stop here. + if ips, ok := list[host]; ok { + list[host] = append(ips, ip) + } else { + list[host] = []string{ip} + } + found = true + break + } + } + if !found { + return nil, fmt.Errorf("invalid additional host, missing IP: %s", s) + } + } + err := list.cleanup() + return list, err +} + +// AsList returns host-ip mappings as a list of strings, using the given +// separator. The Docker Engine API expects ':' separators, the original format +// for '--add-hosts'. But an '=' separator is used in YAML/JSON renderings to +// make IPv6 addresses more readable (for example "my-host=::1" instead of +// "my-host:::1"). +func (h HostsList) AsList(sep string) []string { + l := make([]string, 0, len(h)) + for k, v := range h { + for _, ip := range v { + l = append(l, fmt.Sprintf("%s%s%s", k, sep, ip)) + } + } + return l +} + +func (h HostsList) MarshalYAML() (interface{}, error) { + list := h.AsList("=") + sort.Strings(list) + return list, nil +} + +func (h HostsList) MarshalJSON() ([]byte, error) { + list := h.AsList("=") + sort.Strings(list) + return json.Marshal(list) +} + +var hostListSerapators = []string{"=", ":"} + +func (h *HostsList) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case map[string]interface{}: + list := make(HostsList, len(v)) + for i, e := range v { + if e == nil { + e = "" + } + list[i] = []string{fmt.Sprint(e)} + } + err := list.cleanup() + if err != nil { + return err + } + *h = list + return nil + case []interface{}: + s := make([]string, len(v)) + for i, e := range v { + s[i] = fmt.Sprint(e) + } + list, err := NewHostsList(s) + if err != nil { + return err + } + *h = list + return nil + default: + return fmt.Errorf("unexpected value type %T for mapping", value) + } +} + +func (h HostsList) cleanup() error { + for host, ips := range h { + // Check that there is a hostname and that it doesn't contain either + // of the allowed separators, to generate a clearer error than the + // engine would do if it splits the string differently. + if host == "" || strings.ContainsAny(host, ":=") { + return fmt.Errorf("bad host name '%s'", host) + } + for i, ip := range ips { + // Remove brackets from IP addresses (for example "[::1]" -> "::1"). + if len(ip) > 2 && ip[0] == '[' && ip[len(ip)-1] == ']' { + ips[i] = ip[1 : len(ip)-1] + } + } + h[host] = ips + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/labels.go b/vendor/github.com/compose-spec/compose-go/v2/types/labels.go new file mode 100644 index 000000000000..000476bf6959 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/labels.go @@ -0,0 +1,77 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" + "strings" +) + +// Labels is a mapping type for labels +type Labels map[string]string + +func (l Labels) Add(key, value string) Labels { + if l == nil { + l = Labels{} + } + l[key] = value + return l +} + +func (l Labels) AsList() []string { + s := make([]string, len(l)) + i := 0 + for k, v := range l { + s[i] = fmt.Sprintf("%s=%s", k, v) + i++ + } + return s +} + +// label value can be a string | number | boolean | null (empty) +func labelValue(e interface{}) string { + if e == nil { + return "" + } + switch v := e.(type) { + case string: + return v + default: + return fmt.Sprint(v) + } +} + +func (l *Labels) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case map[string]interface{}: + labels := make(map[string]string, len(v)) + for k, e := range v { + labels[k] = labelValue(e) + } + *l = labels + case []interface{}: + labels := make(map[string]string, len(v)) + for _, s := range v { + k, e, _ := strings.Cut(fmt.Sprint(s), "=") + labels[k] = labelValue(e) + } + *l = labels + default: + return fmt.Errorf("unexpected value type %T for labels", value) + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/mapping.go b/vendor/github.com/compose-spec/compose-go/v2/types/mapping.go new file mode 100644 index 000000000000..de6fb1233df4 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/mapping.go @@ -0,0 +1,217 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" + "sort" + "strings" +) + +// MappingWithEquals is a mapping type that can be converted from a list of +// key[=value] strings. +// For the key with an empty value (`key=`), the mapped value is set to a pointer to `""`. +// For the key without value (`key`), the mapped value is set to nil. +type MappingWithEquals map[string]*string + +// NewMappingWithEquals build a new Mapping from a set of KEY=VALUE strings +func NewMappingWithEquals(values []string) MappingWithEquals { + mapping := MappingWithEquals{} + for _, env := range values { + tokens := strings.SplitN(env, "=", 2) + if len(tokens) > 1 { + mapping[tokens[0]] = &tokens[1] + } else { + mapping[env] = nil + } + } + return mapping +} + +// OverrideBy update MappingWithEquals with values from another MappingWithEquals +func (m MappingWithEquals) OverrideBy(other MappingWithEquals) MappingWithEquals { + for k, v := range other { + m[k] = v + } + return m +} + +// Resolve update a MappingWithEquals for keys without value (`key`, but not `key=`) +func (m MappingWithEquals) Resolve(lookupFn func(string) (string, bool)) MappingWithEquals { + for k, v := range m { + if v == nil { + if value, ok := lookupFn(k); ok { + m[k] = &value + } + } + } + return m +} + +// RemoveEmpty excludes keys that are not associated with a value +func (m MappingWithEquals) RemoveEmpty() MappingWithEquals { + for k, v := range m { + if v == nil { + delete(m, k) + } + } + return m +} + +func (m *MappingWithEquals) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case map[string]interface{}: + mapping := make(MappingWithEquals, len(v)) + for k, e := range v { + mapping[k] = mappingValue(e) + } + *m = mapping + case []interface{}: + mapping := make(MappingWithEquals, len(v)) + for _, s := range v { + k, e, ok := strings.Cut(fmt.Sprint(s), "=") + if !ok { + mapping[k] = nil + } else { + mapping[k] = mappingValue(e) + } + } + *m = mapping + default: + return fmt.Errorf("unexpected value type %T for mapping", value) + } + return nil +} + +// label value can be a string | number | boolean | null +func mappingValue(e interface{}) *string { + if e == nil { + return nil + } + switch v := e.(type) { + case string: + return &v + default: + s := fmt.Sprint(v) + return &s + } +} + +// Mapping is a mapping type that can be converted from a list of +// key[=value] strings. +// For the key with an empty value (`key=`), or key without value (`key`), the +// mapped value is set to an empty string `""`. +type Mapping map[string]string + +// NewMapping build a new Mapping from a set of KEY=VALUE strings +func NewMapping(values []string) Mapping { + mapping := Mapping{} + for _, value := range values { + parts := strings.SplitN(value, "=", 2) + key := parts[0] + switch { + case len(parts) == 1: + mapping[key] = "" + default: + mapping[key] = parts[1] + } + } + return mapping +} + +// convert values into a set of KEY=VALUE strings +func (m Mapping) Values() []string { + values := make([]string, 0, len(m)) + for k, v := range m { + values = append(values, fmt.Sprintf("%s=%s", k, v)) + } + sort.Strings(values) + return values +} + +// ToMappingWithEquals converts Mapping into a MappingWithEquals with pointer references +func (m Mapping) ToMappingWithEquals() MappingWithEquals { + mapping := MappingWithEquals{} + for k, v := range m { + v := v + mapping[k] = &v + } + return mapping +} + +func (m Mapping) Resolve(s string) (string, bool) { + v, ok := m[s] + return v, ok +} + +func (m Mapping) Clone() Mapping { + clone := Mapping{} + for k, v := range m { + clone[k] = v + } + return clone +} + +// Merge adds all values from second mapping which are not already defined +func (m Mapping) Merge(o Mapping) Mapping { + for k, v := range o { + if _, set := m[k]; !set { + m[k] = v + } + } + return m +} + +func (m *Mapping) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case map[string]interface{}: + mapping := make(Mapping, len(v)) + for k, e := range v { + if e == nil { + e = "" + } + mapping[k] = fmt.Sprint(e) + } + *m = mapping + case []interface{}: + *m = decodeMapping(v, "=") + default: + return fmt.Errorf("unexpected value type %T for mapping", value) + } + return nil +} + +// Generate a mapping by splitting strings at any of seps, which will be tried +// in-order for each input string. (For example, to allow the preferred 'host=ip' +// in 'extra_hosts', as well as 'host:ip' for backwards compatibility.) +func decodeMapping(v []interface{}, seps ...string) map[string]string { + mapping := make(Mapping, len(v)) + for _, s := range v { + for i, sep := range seps { + k, e, ok := strings.Cut(fmt.Sprint(s), sep) + if ok { + // Mapping found with this separator, stop here. + mapping[k] = e + break + } else if i == len(seps)-1 { + // No more separators to try, map to empty string. + mapping[k] = "" + } + } + } + return mapping +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/options.go b/vendor/github.com/compose-spec/compose-go/v2/types/options.go new file mode 100644 index 000000000000..c603d051307b --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/options.go @@ -0,0 +1,42 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import "fmt" + +// Options is a mapping type for options we pass as-is to container runtime +type Options map[string]string + +func (d *Options) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case map[string]interface{}: + m := make(map[string]string) + for key, e := range v { + if e == nil { + m[key] = "" + } else { + m[key] = fmt.Sprint(e) + } + } + *d = m + case map[string]string: + *d = v + default: + return fmt.Errorf("invalid type %T for options", value) + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/project.go b/vendor/github.com/compose-spec/compose-go/v2/types/project.go new file mode 100644 index 000000000000..afd787efe9a9 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/project.go @@ -0,0 +1,664 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "sort" + + "github.com/compose-spec/compose-go/v2/dotenv" + "github.com/compose-spec/compose-go/v2/utils" + "github.com/distribution/reference" + "github.com/mitchellh/copystructure" + godigest "github.com/opencontainers/go-digest" + "golang.org/x/exp/maps" + "golang.org/x/sync/errgroup" + "gopkg.in/yaml.v3" +) + +// Project is the result of loading a set of compose files +// Since v2, Project are managed as immutable objects. +// Each public functions which mutate Project state now return a copy of the original Project with the expected changes. +type Project struct { + Name string `yaml:"name,omitempty" json:"name,omitempty"` + WorkingDir string `yaml:"-" json:"-"` + Services Services `yaml:"services" json:"services"` + Networks Networks `yaml:"networks,omitempty" json:"networks,omitempty"` + Volumes Volumes `yaml:"volumes,omitempty" json:"volumes,omitempty"` + Secrets Secrets `yaml:"secrets,omitempty" json:"secrets,omitempty"` + Configs Configs `yaml:"configs,omitempty" json:"configs,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` // https://github.com/golang/go/issues/6213 + + ComposeFiles []string `yaml:"-" json:"-"` + Environment Mapping `yaml:"-" json:"-"` + + // DisabledServices track services which have been disable as profile is not active + DisabledServices Services `yaml:"-" json:"-"` + Profiles []string `yaml:"-" json:"-"` +} + +// ServiceNames return names for all services in this Compose config +func (p *Project) ServiceNames() []string { + var names []string + for k := range p.Services { + names = append(names, k) + } + sort.Strings(names) + return names +} + +// DisabledServiceNames return names for all disabled services in this Compose config +func (p *Project) DisabledServiceNames() []string { + var names []string + for k := range p.DisabledServices { + names = append(names, k) + } + sort.Strings(names) + return names +} + +// VolumeNames return names for all volumes in this Compose config +func (p *Project) VolumeNames() []string { + var names []string + for k := range p.Volumes { + names = append(names, k) + } + sort.Strings(names) + return names +} + +// NetworkNames return names for all volumes in this Compose config +func (p *Project) NetworkNames() []string { + var names []string + for k := range p.Networks { + names = append(names, k) + } + sort.Strings(names) + return names +} + +// SecretNames return names for all secrets in this Compose config +func (p *Project) SecretNames() []string { + var names []string + for k := range p.Secrets { + names = append(names, k) + } + sort.Strings(names) + return names +} + +// ConfigNames return names for all configs in this Compose config +func (p *Project) ConfigNames() []string { + var names []string + for k := range p.Configs { + names = append(names, k) + } + sort.Strings(names) + return names +} + +func (p *Project) ServicesWithBuild() []string { + servicesBuild := p.Services.Filter(func(s ServiceConfig) bool { + return s.Build != nil && s.Build.Context != "" + }) + return maps.Keys(servicesBuild) +} + +func (p *Project) ServicesWithExtends() []string { + servicesExtends := p.Services.Filter(func(s ServiceConfig) bool { + return s.Extends != nil && *s.Extends != (ExtendsConfig{}) + }) + return maps.Keys(servicesExtends) +} + +func (p *Project) ServicesWithDependsOn() []string { + servicesDependsOn := p.Services.Filter(func(s ServiceConfig) bool { + return len(s.DependsOn) > 0 + }) + return maps.Keys(servicesDependsOn) +} + +func (p *Project) ServicesWithCapabilities() ([]string, []string, []string) { + capabilities := []string{} + gpu := []string{} + tpu := []string{} + for _, service := range p.Services { + deploy := service.Deploy + if deploy == nil { + continue + } + reservation := deploy.Resources.Reservations + if reservation == nil { + continue + } + devices := reservation.Devices + for _, d := range devices { + if len(d.Capabilities) > 0 { + capabilities = append(capabilities, service.Name) + } + for _, c := range d.Capabilities { + if c == "gpu" { + gpu = append(gpu, service.Name) + } else if c == "tpu" { + tpu = append(tpu, service.Name) + } + } + } + } + + return utils.RemoveDuplicates(capabilities), utils.RemoveDuplicates(gpu), utils.RemoveDuplicates(tpu) +} + +// GetServices retrieve services by names, or return all services if no name specified +func (p *Project) GetServices(names ...string) (Services, error) { + if len(names) == 0 { + return p.Services, nil + } + services := Services{} + for _, name := range names { + service, err := p.GetService(name) + if err != nil { + return nil, err + } + services[name] = service + } + return services, nil +} + +func (p *Project) getServicesByNames(names ...string) (Services, []string) { + if len(names) == 0 { + return p.Services, nil + } + services := Services{} + var servicesNotFound []string + for _, name := range names { + service, ok := p.Services[name] + if !ok { + servicesNotFound = append(servicesNotFound, name) + continue + } + services[name] = service + } + return services, servicesNotFound +} + +// GetDisabledService retrieve disabled service by name +func (p Project) GetDisabledService(name string) (ServiceConfig, error) { + service, ok := p.DisabledServices[name] + if !ok { + return ServiceConfig{}, fmt.Errorf("no such service: %s", name) + } + return service, nil +} + +// GetService retrieve a specific service by name +func (p *Project) GetService(name string) (ServiceConfig, error) { + service, ok := p.Services[name] + if !ok { + _, ok := p.DisabledServices[name] + if ok { + return ServiceConfig{}, fmt.Errorf("service %s is disabled", name) + } + return ServiceConfig{}, fmt.Errorf("no such service: %s", name) + } + return service, nil +} + +func (p *Project) AllServices() Services { + all := Services{} + for name, service := range p.Services { + all[name] = service + } + for name, service := range p.DisabledServices { + all[name] = service + } + return all +} + +type ServiceFunc func(name string, service *ServiceConfig) error + +// ForEachService runs ServiceFunc on each service and dependencies according to DependencyPolicy +func (p *Project) ForEachService(names []string, fn ServiceFunc, options ...DependencyOption) error { + if len(options) == 0 { + // backward compatibility + options = []DependencyOption{IncludeDependencies} + } + return p.withServices(names, fn, map[string]bool{}, options, map[string]ServiceDependency{}) +} + +type withServicesOptions struct { + dependencyPolicy int +} + +const ( + includeDependencies = iota + includeDependents + ignoreDependencies +) + +func (p *Project) withServices(names []string, fn ServiceFunc, seen map[string]bool, options []DependencyOption, dependencies map[string]ServiceDependency) error { + services, servicesNotFound := p.getServicesByNames(names...) + if len(servicesNotFound) > 0 { + for _, serviceNotFound := range servicesNotFound { + if dependency, ok := dependencies[serviceNotFound]; !ok || dependency.Required { + return fmt.Errorf("no such service: %s", serviceNotFound) + } + } + } + opts := withServicesOptions{ + dependencyPolicy: includeDependencies, + } + for _, option := range options { + option(&opts) + } + + for name, service := range services { + if seen[name] { + continue + } + seen[name] = true + var dependencies map[string]ServiceDependency + switch opts.dependencyPolicy { + case includeDependents: + dependencies = utils.MapsAppend(dependencies, p.dependentsForService(service)) + case includeDependencies: + dependencies = utils.MapsAppend(dependencies, service.DependsOn) + case ignoreDependencies: + // Noop + } + if len(dependencies) > 0 { + err := p.withServices(utils.MapKeys(dependencies), fn, seen, options, dependencies) + if err != nil { + return err + } + } + if err := fn(name, service.deepCopy()); err != nil { + return err + } + } + return nil +} + +func (p *Project) GetDependentsForService(s ServiceConfig) []string { + return utils.MapKeys(p.dependentsForService(s)) +} + +func (p *Project) dependentsForService(s ServiceConfig) map[string]ServiceDependency { + dependent := make(map[string]ServiceDependency) + for _, service := range p.Services { + for name, dependency := range service.DependsOn { + if name == s.Name { + dependent[service.Name] = dependency + } + } + } + return dependent +} + +// RelativePath resolve a relative path based project's working directory +func (p *Project) RelativePath(path string) string { + if path[0] == '~' { + home, _ := os.UserHomeDir() + path = filepath.Join(home, path[1:]) + } + if filepath.IsAbs(path) { + return path + } + return filepath.Join(p.WorkingDir, path) +} + +// HasProfile return true if service has no profile declared or has at least one profile matching +func (s ServiceConfig) HasProfile(profiles []string) bool { + if len(s.Profiles) == 0 { + return true + } + for _, p := range profiles { + for _, sp := range s.Profiles { + if sp == p { + return true + } + } + } + return false +} + +// WithProfiles disables services which don't match selected profiles +// It returns a new Project instance with the changes and keep the original Project unchanged +func (p *Project) WithProfiles(profiles []string) (*Project, error) { + newProject := p.deepCopy() + for _, p := range profiles { + if p == "*" { + return newProject, nil + } + } + enabled := Services{} + disabled := Services{} + for name, service := range newProject.AllServices() { + if service.HasProfile(profiles) { + enabled[name] = service + } else { + disabled[name] = service + } + } + newProject.Services = enabled + newProject.DisabledServices = disabled + newProject.Profiles = profiles + return newProject, nil +} + +// WithServicesEnabled ensures services are enabled and activate profiles accordingly +// It returns a new Project instance with the changes and keep the original Project unchanged +func (p *Project) WithServicesEnabled(names ...string) (*Project, error) { + newProject := p.deepCopy() + if len(names) == 0 { + return newProject, nil + } + + profiles := append([]string{}, p.Profiles...) + for _, name := range names { + if _, ok := newProject.Services[name]; ok { + // already enabled + continue + } + service := p.DisabledServices[name] + profiles = append(profiles, service.Profiles...) + } + newProject, err := newProject.WithProfiles(profiles) + if err != nil { + return newProject, err + } + + return newProject.WithServicesEnvironmentResolved(true) +} + +// WithoutUnnecessaryResources drops networks/volumes/secrets/configs that are not referenced by active services +// It returns a new Project instance with the changes and keep the original Project unchanged +func (p *Project) WithoutUnnecessaryResources() *Project { + newProject := p.deepCopy() + requiredNetworks := map[string]struct{}{} + requiredVolumes := map[string]struct{}{} + requiredSecrets := map[string]struct{}{} + requiredConfigs := map[string]struct{}{} + for _, s := range newProject.Services { + for k := range s.Networks { + requiredNetworks[k] = struct{}{} + } + for _, v := range s.Volumes { + if v.Type != VolumeTypeVolume || v.Source == "" { + continue + } + requiredVolumes[v.Source] = struct{}{} + } + for _, v := range s.Secrets { + requiredSecrets[v.Source] = struct{}{} + } + if s.Build != nil { + for _, v := range s.Build.Secrets { + requiredSecrets[v.Source] = struct{}{} + } + } + for _, v := range s.Configs { + requiredConfigs[v.Source] = struct{}{} + } + } + + networks := Networks{} + for k := range requiredNetworks { + if value, ok := p.Networks[k]; ok { + networks[k] = value + } + } + newProject.Networks = networks + + volumes := Volumes{} + for k := range requiredVolumes { + if value, ok := p.Volumes[k]; ok { + volumes[k] = value + } + } + newProject.Volumes = volumes + + secrets := Secrets{} + for k := range requiredSecrets { + if value, ok := p.Secrets[k]; ok { + secrets[k] = value + } + } + newProject.Secrets = secrets + + configs := Configs{} + for k := range requiredConfigs { + if value, ok := p.Configs[k]; ok { + configs[k] = value + } + } + newProject.Configs = configs + return newProject +} + +type DependencyOption func(options *withServicesOptions) + +func IncludeDependencies(options *withServicesOptions) { + options.dependencyPolicy = includeDependencies +} + +func IncludeDependents(options *withServicesOptions) { + options.dependencyPolicy = includeDependents +} + +func IgnoreDependencies(options *withServicesOptions) { + options.dependencyPolicy = ignoreDependencies +} + +// WithSelectedServices restricts the project model to selected services and dependencies +// It returns a new Project instance with the changes and keep the original Project unchanged +func (p *Project) WithSelectedServices(names []string, options ...DependencyOption) (*Project, error) { + newProject := p.deepCopy() + if len(names) == 0 { + // All services + return newProject, nil + } + + set := utils.NewSet[string]() + err := p.ForEachService(names, func(name string, service *ServiceConfig) error { + set.Add(name) + return nil + }, options...) + if err != nil { + return nil, err + } + + // Disable all services which are not explicit target or dependencies + enabled := Services{} + for name, s := range newProject.Services { + if _, ok := set[name]; ok { + // remove all dependencies but those implied by explicitly selected services + dependencies := s.DependsOn + for d := range dependencies { + if _, ok := set[d]; !ok { + delete(dependencies, d) + } + } + s.DependsOn = dependencies + enabled[name] = s + } else { + newProject = newProject.WithServicesDisabled(name) + } + } + newProject.Services = enabled + return newProject, nil +} + +// WithServicesDisabled removes from the project model the given services and their references in all dependencies +// It returns a new Project instance with the changes and keep the original Project unchanged +func (p *Project) WithServicesDisabled(names ...string) *Project { + newProject := p.deepCopy() + if len(names) == 0 { + return newProject + } + if newProject.DisabledServices == nil { + newProject.DisabledServices = Services{} + } + for _, name := range names { + // We should remove all dependencies which reference the disabled service + for i, s := range newProject.Services { + if _, ok := s.DependsOn[name]; ok { + delete(s.DependsOn, name) + newProject.Services[i] = s + } + } + if service, ok := newProject.Services[name]; ok { + newProject.DisabledServices[name] = service + delete(newProject.Services, name) + } + } + return newProject +} + +// WithImagesResolved updates services images to include digest computed by a resolver function +// It returns a new Project instance with the changes and keep the original Project unchanged +func (p *Project) WithImagesResolved(resolver func(named reference.Named) (godigest.Digest, error)) (*Project, error) { + newProject := p.deepCopy() + eg := errgroup.Group{} + for i, s := range newProject.Services { + idx := i + service := s + + if service.Image == "" { + continue + } + eg.Go(func() error { + named, err := reference.ParseDockerRef(service.Image) + if err != nil { + return err + } + + if _, ok := named.(reference.Canonical); !ok { + // image is named but not digested reference + digest, err := resolver(named) + if err != nil { + return err + } + named, err = reference.WithDigest(named, digest) + if err != nil { + return err + } + } + + service.Image = named.String() + newProject.Services[idx] = service + return nil + }) + } + return newProject, eg.Wait() +} + +// MarshalYAML marshal Project into a yaml tree +func (p *Project) MarshalYAML() ([]byte, error) { + buf := bytes.NewBuffer([]byte{}) + encoder := yaml.NewEncoder(buf) + encoder.SetIndent(2) + // encoder.CompactSeqIndent() FIXME https://github.com/go-yaml/yaml/pull/753 + err := encoder.Encode(p) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSON makes Config implement json.Marshaler +func (p *Project) MarshalJSON() ([]byte, error) { + m := map[string]interface{}{ + "name": p.Name, + "services": p.Services, + } + + if len(p.Networks) > 0 { + m["networks"] = p.Networks + } + if len(p.Volumes) > 0 { + m["volumes"] = p.Volumes + } + if len(p.Secrets) > 0 { + m["secrets"] = p.Secrets + } + if len(p.Configs) > 0 { + m["configs"] = p.Configs + } + for k, v := range p.Extensions { + m[k] = v + } + return json.Marshal(m) +} + +// WithServicesEnvironmentResolved parses env_files set for services to resolve the actual environment map for services +// It returns a new Project instance with the changes and keep the original Project unchanged +func (p Project) WithServicesEnvironmentResolved(discardEnvFiles bool) (*Project, error) { + newProject := p.deepCopy() + for i, service := range newProject.Services { + service.Environment = service.Environment.Resolve(newProject.Environment.Resolve) + + environment := MappingWithEquals{} + // resolve variables based on other files we already parsed, + project's environment + var resolve dotenv.LookupFn = func(s string) (string, bool) { + v, ok := environment[s] + if ok && v != nil { + return *v, ok + } + return newProject.Environment.Resolve(s) + } + + for _, envFile := range service.EnvFiles { + if _, err := os.Stat(envFile.Path); os.IsNotExist(err) { + if envFile.Required { + return nil, fmt.Errorf("env file %s not found: %w", envFile.Path, err) + } + continue + } + b, err := os.ReadFile(envFile.Path) + if err != nil { + return nil, fmt.Errorf("failed to load %s: %w", envFile.Path, err) + } + + fileVars, err := dotenv.ParseWithLookup(bytes.NewBuffer(b), resolve) + if err != nil { + return nil, fmt.Errorf("failed to read %s: %w", envFile.Path, err) + } + environment.OverrideBy(Mapping(fileVars).ToMappingWithEquals()) + } + + service.Environment = environment.OverrideBy(service.Environment) + + if discardEnvFiles { + service.EnvFiles = nil + } + newProject.Services[i] = service + } + return newProject, nil +} + +func (p *Project) deepCopy() *Project { + instance, err := copystructure.Copy(p) + if err != nil { + panic(err) + } + return instance.(*Project) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/services.go b/vendor/github.com/compose-spec/compose-go/v2/types/services.go new file mode 100644 index 000000000000..0efc4b9fab6c --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/services.go @@ -0,0 +1,45 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +// Services is a map of ServiceConfig +type Services map[string]ServiceConfig + +// GetProfiles retrieve the profiles implicitly enabled by explicitly targeting selected services +func (s Services) GetProfiles() []string { + set := map[string]struct{}{} + for _, service := range s { + for _, p := range service.Profiles { + set[p] = struct{}{} + } + } + var profiles []string + for k := range set { + profiles = append(profiles, k) + } + return profiles +} + +func (s Services) Filter(predicate func(ServiceConfig) bool) Services { + services := Services{} + for name, service := range s { + if predicate(service) { + services[name] = service + } + } + return services +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/ssh.go b/vendor/github.com/compose-spec/compose-go/v2/types/ssh.go new file mode 100644 index 000000000000..6d0edb6956b8 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/ssh.go @@ -0,0 +1,73 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" +) + +type SSHKey struct { + ID string `yaml:"id,omitempty" json:"id,omitempty"` + Path string `path:"path,omitempty" json:"path,omitempty"` +} + +// SSHConfig is a mapping type for SSH build config +type SSHConfig []SSHKey + +func (s SSHConfig) Get(id string) (string, error) { + for _, sshKey := range s { + if sshKey.ID == id { + return sshKey.Path, nil + } + } + return "", fmt.Errorf("ID %s not found in SSH keys", id) +} + +// MarshalYAML makes SSHKey implement yaml.Marshaller +func (s SSHKey) MarshalYAML() (interface{}, error) { + if s.Path == "" { + return s.ID, nil + } + return fmt.Sprintf("%s: %s", s.ID, s.Path), nil +} + +// MarshalJSON makes SSHKey implement json.Marshaller +func (s SSHKey) MarshalJSON() ([]byte, error) { + if s.Path == "" { + return []byte(fmt.Sprintf(`%q`, s.ID)), nil + } + return []byte(fmt.Sprintf(`%q: %s`, s.ID, s.Path)), nil +} + +func (s *SSHConfig) DecodeMapstructure(value interface{}) error { + v, ok := value.(map[string]any) + if !ok { + return fmt.Errorf("invalid ssh config type %T", value) + } + result := make(SSHConfig, len(v)) + i := 0 + for id, path := range v { + key := SSHKey{ID: id} + if path != nil { + key.Path = fmt.Sprint(path) + } + result[i] = key + i++ + } + *s = result + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/stringOrList.go b/vendor/github.com/compose-spec/compose-go/v2/types/stringOrList.go new file mode 100644 index 000000000000..059043fb8840 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/stringOrList.go @@ -0,0 +1,57 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import "fmt" + +// StringList is a type for fields that can be a string or list of strings +type StringList []string + +func (l *StringList) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case string: + *l = []string{v} + case []interface{}: + list := make([]string, len(v)) + for i, e := range v { + list[i] = e.(string) + } + *l = list + default: + return fmt.Errorf("invalid type %T for string list", value) + } + return nil +} + +// StringOrNumberList is a type for fields that can be a list of strings or numbers +type StringOrNumberList []string + +func (l *StringOrNumberList) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case string: + *l = []string{v} + case []interface{}: + list := make([]string, len(v)) + for i, e := range v { + list[i] = fmt.Sprint(e) + } + *l = list + default: + return fmt.Errorf("invalid type %T for string list", value) + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/types.go b/vendor/github.com/compose-spec/compose-go/v2/types/types.go new file mode 100644 index 000000000000..1c5cca701f4c --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/types.go @@ -0,0 +1,756 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + + "github.com/docker/go-connections/nat" + "github.com/mitchellh/copystructure" +) + +// ServiceConfig is the configuration of one service +type ServiceConfig struct { + Name string `yaml:"name,omitempty" json:"-"` + Profiles []string `yaml:"profiles,omitempty" json:"profiles,omitempty"` + + Annotations Mapping `yaml:"annotations,omitempty" json:"annotations,omitempty"` + Attach *bool `yaml:"attach,omitempty" json:"attach,omitempty"` + Build *BuildConfig `yaml:"build,omitempty" json:"build,omitempty"` + Develop *DevelopConfig `yaml:"develop,omitempty" json:"develop,omitempty"` + BlkioConfig *BlkioConfig `yaml:"blkio_config,omitempty" json:"blkio_config,omitempty"` + CapAdd []string `yaml:"cap_add,omitempty" json:"cap_add,omitempty"` + CapDrop []string `yaml:"cap_drop,omitempty" json:"cap_drop,omitempty"` + CgroupParent string `yaml:"cgroup_parent,omitempty" json:"cgroup_parent,omitempty"` + Cgroup string `yaml:"cgroup,omitempty" json:"cgroup,omitempty"` + CPUCount int64 `yaml:"cpu_count,omitempty" json:"cpu_count,omitempty"` + CPUPercent float32 `yaml:"cpu_percent,omitempty" json:"cpu_percent,omitempty"` + CPUPeriod int64 `yaml:"cpu_period,omitempty" json:"cpu_period,omitempty"` + CPUQuota int64 `yaml:"cpu_quota,omitempty" json:"cpu_quota,omitempty"` + CPURTPeriod int64 `yaml:"cpu_rt_period,omitempty" json:"cpu_rt_period,omitempty"` + CPURTRuntime int64 `yaml:"cpu_rt_runtime,omitempty" json:"cpu_rt_runtime,omitempty"` + CPUS float32 `yaml:"cpus,omitempty" json:"cpus,omitempty"` + CPUSet string `yaml:"cpuset,omitempty" json:"cpuset,omitempty"` + CPUShares int64 `yaml:"cpu_shares,omitempty" json:"cpu_shares,omitempty"` + + // Command for the service containers. + // If set, overrides COMMAND from the image. + // + // Set to `[]` or an empty string to clear the command from the image. + Command ShellCommand `yaml:"command,omitempty" json:"command"` // NOTE: we can NOT omitempty for JSON! see ShellCommand type for details. + + Configs []ServiceConfigObjConfig `yaml:"configs,omitempty" json:"configs,omitempty"` + ContainerName string `yaml:"container_name,omitempty" json:"container_name,omitempty"` + CredentialSpec *CredentialSpecConfig `yaml:"credential_spec,omitempty" json:"credential_spec,omitempty"` + DependsOn DependsOnConfig `yaml:"depends_on,omitempty" json:"depends_on,omitempty"` + Deploy *DeployConfig `yaml:"deploy,omitempty" json:"deploy,omitempty"` + DeviceCgroupRules []string `yaml:"device_cgroup_rules,omitempty" json:"device_cgroup_rules,omitempty"` + Devices []string `yaml:"devices,omitempty" json:"devices,omitempty"` + DNS StringList `yaml:"dns,omitempty" json:"dns,omitempty"` + DNSOpts []string `yaml:"dns_opt,omitempty" json:"dns_opt,omitempty"` + DNSSearch StringList `yaml:"dns_search,omitempty" json:"dns_search,omitempty"` + Dockerfile string `yaml:"dockerfile,omitempty" json:"dockerfile,omitempty"` + DomainName string `yaml:"domainname,omitempty" json:"domainname,omitempty"` + + // Entrypoint for the service containers. + // If set, overrides ENTRYPOINT from the image. + // + // Set to `[]` or an empty string to clear the entrypoint from the image. + Entrypoint ShellCommand `yaml:"entrypoint,omitempty" json:"entrypoint"` // NOTE: we can NOT omitempty for JSON! see ShellCommand type for details. + + Environment MappingWithEquals `yaml:"environment,omitempty" json:"environment,omitempty"` + EnvFiles []EnvFile `yaml:"env_file,omitempty" json:"env_file,omitempty"` + Expose StringOrNumberList `yaml:"expose,omitempty" json:"expose,omitempty"` + Extends *ExtendsConfig `yaml:"extends,omitempty" json:"extends,omitempty"` + ExternalLinks []string `yaml:"external_links,omitempty" json:"external_links,omitempty"` + ExtraHosts HostsList `yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` + GroupAdd []string `yaml:"group_add,omitempty" json:"group_add,omitempty"` + Hostname string `yaml:"hostname,omitempty" json:"hostname,omitempty"` + HealthCheck *HealthCheckConfig `yaml:"healthcheck,omitempty" json:"healthcheck,omitempty"` + Image string `yaml:"image,omitempty" json:"image,omitempty"` + Init *bool `yaml:"init,omitempty" json:"init,omitempty"` + Ipc string `yaml:"ipc,omitempty" json:"ipc,omitempty"` + Isolation string `yaml:"isolation,omitempty" json:"isolation,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + CustomLabels Labels `yaml:"-" json:"-"` + Links []string `yaml:"links,omitempty" json:"links,omitempty"` + Logging *LoggingConfig `yaml:"logging,omitempty" json:"logging,omitempty"` + LogDriver string `yaml:"log_driver,omitempty" json:"log_driver,omitempty"` + LogOpt map[string]string `yaml:"log_opt,omitempty" json:"log_opt,omitempty"` + MemLimit UnitBytes `yaml:"mem_limit,omitempty" json:"mem_limit,omitempty"` + MemReservation UnitBytes `yaml:"mem_reservation,omitempty" json:"mem_reservation,omitempty"` + MemSwapLimit UnitBytes `yaml:"memswap_limit,omitempty" json:"memswap_limit,omitempty"` + MemSwappiness UnitBytes `yaml:"mem_swappiness,omitempty" json:"mem_swappiness,omitempty"` + MacAddress string `yaml:"mac_address,omitempty" json:"mac_address,omitempty"` + Net string `yaml:"net,omitempty" json:"net,omitempty"` + NetworkMode string `yaml:"network_mode,omitempty" json:"network_mode,omitempty"` + Networks map[string]*ServiceNetworkConfig `yaml:"networks,omitempty" json:"networks,omitempty"` + OomKillDisable bool `yaml:"oom_kill_disable,omitempty" json:"oom_kill_disable,omitempty"` + OomScoreAdj int64 `yaml:"oom_score_adj,omitempty" json:"oom_score_adj,omitempty"` + Pid string `yaml:"pid,omitempty" json:"pid,omitempty"` + PidsLimit int64 `yaml:"pids_limit,omitempty" json:"pids_limit,omitempty"` + Platform string `yaml:"platform,omitempty" json:"platform,omitempty"` + Ports []ServicePortConfig `yaml:"ports,omitempty" json:"ports,omitempty"` + Privileged bool `yaml:"privileged,omitempty" json:"privileged,omitempty"` + PullPolicy string `yaml:"pull_policy,omitempty" json:"pull_policy,omitempty"` + ReadOnly bool `yaml:"read_only,omitempty" json:"read_only,omitempty"` + Restart string `yaml:"restart,omitempty" json:"restart,omitempty"` + Runtime string `yaml:"runtime,omitempty" json:"runtime,omitempty"` + Scale *int `yaml:"scale,omitempty" json:"scale,omitempty"` + Secrets []ServiceSecretConfig `yaml:"secrets,omitempty" json:"secrets,omitempty"` + SecurityOpt []string `yaml:"security_opt,omitempty" json:"security_opt,omitempty"` + ShmSize UnitBytes `yaml:"shm_size,omitempty" json:"shm_size,omitempty"` + StdinOpen bool `yaml:"stdin_open,omitempty" json:"stdin_open,omitempty"` + StopGracePeriod *Duration `yaml:"stop_grace_period,omitempty" json:"stop_grace_period,omitempty"` + StopSignal string `yaml:"stop_signal,omitempty" json:"stop_signal,omitempty"` + StorageOpt map[string]string `yaml:"storage_opt,omitempty" json:"storage_opt,omitempty"` + Sysctls Mapping `yaml:"sysctls,omitempty" json:"sysctls,omitempty"` + Tmpfs StringList `yaml:"tmpfs,omitempty" json:"tmpfs,omitempty"` + Tty bool `yaml:"tty,omitempty" json:"tty,omitempty"` + Ulimits map[string]*UlimitsConfig `yaml:"ulimits,omitempty" json:"ulimits,omitempty"` + User string `yaml:"user,omitempty" json:"user,omitempty"` + UserNSMode string `yaml:"userns_mode,omitempty" json:"userns_mode,omitempty"` + Uts string `yaml:"uts,omitempty" json:"uts,omitempty"` + VolumeDriver string `yaml:"volume_driver,omitempty" json:"volume_driver,omitempty"` + Volumes []ServiceVolumeConfig `yaml:"volumes,omitempty" json:"volumes,omitempty"` + VolumesFrom []string `yaml:"volumes_from,omitempty" json:"volumes_from,omitempty"` + WorkingDir string `yaml:"working_dir,omitempty" json:"working_dir,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// MarshalYAML makes ServiceConfig implement yaml.Marshaller +func (s ServiceConfig) MarshalYAML() (interface{}, error) { + type t ServiceConfig + value := t(s) + value.Name = "" // set during map to slice conversion, not part of the yaml representation + return value, nil +} + +// NetworksByPriority return the service networks IDs sorted according to Priority +func (s *ServiceConfig) NetworksByPriority() []string { + type key struct { + name string + priority int + } + var keys []key + for k, v := range s.Networks { + priority := 0 + if v != nil { + priority = v.Priority + } + keys = append(keys, key{ + name: k, + priority: priority, + }) + } + sort.Slice(keys, func(i, j int) bool { + return keys[i].priority > keys[j].priority + }) + var sorted []string + for _, k := range keys { + sorted = append(sorted, k.name) + } + return sorted +} + +func (s *ServiceConfig) GetScale() int { + if s.Scale != nil { + return *s.Scale + } + if s.Deploy != nil && s.Deploy.Replicas != nil { + // this should not be required as compose-go enforce consistency between scale anr replicas + return *s.Deploy.Replicas + } + return 1 +} + +func (s *ServiceConfig) SetScale(scale int) { + s.Scale = &scale + if s.Deploy != nil { + s.Deploy.Replicas = &scale + } +} + +func (s *ServiceConfig) deepCopy() *ServiceConfig { + instance, err := copystructure.Copy(s) + if err != nil { + panic(err) + } + return instance.(*ServiceConfig) +} + +const ( + // PullPolicyAlways always pull images + PullPolicyAlways = "always" + // PullPolicyNever never pull images + PullPolicyNever = "never" + // PullPolicyIfNotPresent pull missing images + PullPolicyIfNotPresent = "if_not_present" + // PullPolicyMissing pull missing images + PullPolicyMissing = "missing" + // PullPolicyBuild force building images + PullPolicyBuild = "build" +) + +const ( + // RestartPolicyAlways always restart the container if it stops + RestartPolicyAlways = "always" + // RestartPolicyOnFailure restart the container if it exits due to an error + RestartPolicyOnFailure = "on-failure" + // RestartPolicyNo do not automatically restart the container + RestartPolicyNo = "no" + // RestartPolicyUnlessStopped always restart the container unless the container is stopped (manually or otherwise) + RestartPolicyUnlessStopped = "unless-stopped" +) + +const ( + // ServicePrefix is the prefix for references pointing to a service + ServicePrefix = "service:" + // ContainerPrefix is the prefix for references pointing to a container + ContainerPrefix = "container:" + + // NetworkModeServicePrefix is the prefix for network_mode pointing to a service + // Deprecated prefer ServicePrefix + NetworkModeServicePrefix = ServicePrefix + // NetworkModeContainerPrefix is the prefix for network_mode pointing to a container + // Deprecated prefer ContainerPrefix + NetworkModeContainerPrefix = ContainerPrefix +) + +// GetDependencies retrieves all services this service depends on +func (s ServiceConfig) GetDependencies() []string { + var dependencies []string + for service := range s.DependsOn { + dependencies = append(dependencies, service) + } + return dependencies +} + +// GetDependents retrieves all services which depend on this service +func (s ServiceConfig) GetDependents(p *Project) []string { + var dependent []string + for _, service := range p.Services { + for name := range service.DependsOn { + if name == s.Name { + dependent = append(dependent, service.Name) + } + } + } + return dependent +} + +// BuildConfig is a type for build +type BuildConfig struct { + Context string `yaml:"context,omitempty" json:"context,omitempty"` + Dockerfile string `yaml:"dockerfile,omitempty" json:"dockerfile,omitempty"` + DockerfileInline string `yaml:"dockerfile_inline,omitempty" json:"dockerfile_inline,omitempty"` + Args MappingWithEquals `yaml:"args,omitempty" json:"args,omitempty"` + SSH SSHConfig `yaml:"ssh,omitempty" json:"ssh,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + CacheFrom StringList `yaml:"cache_from,omitempty" json:"cache_from,omitempty"` + CacheTo StringList `yaml:"cache_to,omitempty" json:"cache_to,omitempty"` + NoCache bool `yaml:"no_cache,omitempty" json:"no_cache,omitempty"` + AdditionalContexts Mapping `yaml:"additional_contexts,omitempty" json:"additional_contexts,omitempty"` + Pull bool `yaml:"pull,omitempty" json:"pull,omitempty"` + ExtraHosts HostsList `yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` + Isolation string `yaml:"isolation,omitempty" json:"isolation,omitempty"` + Network string `yaml:"network,omitempty" json:"network,omitempty"` + Target string `yaml:"target,omitempty" json:"target,omitempty"` + Secrets []ServiceSecretConfig `yaml:"secrets,omitempty" json:"secrets,omitempty"` + ShmSize UnitBytes `yaml:"shm_size,omitempty" json:"shm_size,omitempty"` + Tags StringList `yaml:"tags,omitempty" json:"tags,omitempty"` + Ulimits map[string]*UlimitsConfig `yaml:"ulimits,omitempty" json:"ulimits,omitempty"` + Platforms StringList `yaml:"platforms,omitempty" json:"platforms,omitempty"` + Privileged bool `yaml:"privileged,omitempty" json:"privileged,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// BlkioConfig define blkio config +type BlkioConfig struct { + Weight uint16 `yaml:"weight,omitempty" json:"weight,omitempty"` + WeightDevice []WeightDevice `yaml:"weight_device,omitempty" json:"weight_device,omitempty"` + DeviceReadBps []ThrottleDevice `yaml:"device_read_bps,omitempty" json:"device_read_bps,omitempty"` + DeviceReadIOps []ThrottleDevice `yaml:"device_read_iops,omitempty" json:"device_read_iops,omitempty"` + DeviceWriteBps []ThrottleDevice `yaml:"device_write_bps,omitempty" json:"device_write_bps,omitempty"` + DeviceWriteIOps []ThrottleDevice `yaml:"device_write_iops,omitempty" json:"device_write_iops,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate UnitBytes + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// MappingWithColon is a mapping type that can be converted from a list of +// 'key: value' strings +type MappingWithColon map[string]string + +// LoggingConfig the logging configuration for a service +type LoggingConfig struct { + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + Options Options `yaml:"options,omitempty" json:"options,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// DeployConfig the deployment configuration for a service +type DeployConfig struct { + Mode string `yaml:"mode,omitempty" json:"mode,omitempty"` + Replicas *int `yaml:"replicas,omitempty" json:"replicas,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + UpdateConfig *UpdateConfig `yaml:"update_config,omitempty" json:"update_config,omitempty"` + RollbackConfig *UpdateConfig `yaml:"rollback_config,omitempty" json:"rollback_config,omitempty"` + Resources Resources `yaml:"resources,omitempty" json:"resources,omitempty"` + RestartPolicy *RestartPolicy `yaml:"restart_policy,omitempty" json:"restart_policy,omitempty"` + Placement Placement `yaml:"placement,omitempty" json:"placement,omitempty"` + EndpointMode string `yaml:"endpoint_mode,omitempty" json:"endpoint_mode,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// UpdateConfig the service update configuration +type UpdateConfig struct { + Parallelism *uint64 `yaml:"parallelism,omitempty" json:"parallelism,omitempty"` + Delay Duration `yaml:"delay,omitempty" json:"delay,omitempty"` + FailureAction string `yaml:"failure_action,omitempty" json:"failure_action,omitempty"` + Monitor Duration `yaml:"monitor,omitempty" json:"monitor,omitempty"` + MaxFailureRatio float32 `yaml:"max_failure_ratio,omitempty" json:"max_failure_ratio,omitempty"` + Order string `yaml:"order,omitempty" json:"order,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// Resources the resource limits and reservations +type Resources struct { + Limits *Resource `yaml:"limits,omitempty" json:"limits,omitempty"` + Reservations *Resource `yaml:"reservations,omitempty" json:"reservations,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// Resource is a resource to be limited or reserved +type Resource struct { + // TODO: types to convert from units and ratios + NanoCPUs string `yaml:"cpus,omitempty" json:"cpus,omitempty"` + MemoryBytes UnitBytes `yaml:"memory,omitempty" json:"memory,omitempty"` + Pids int64 `yaml:"pids,omitempty" json:"pids,omitempty"` + Devices []DeviceRequest `yaml:"devices,omitempty" json:"devices,omitempty"` + GenericResources []GenericResource `yaml:"generic_resources,omitempty" json:"generic_resources,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// GenericResource represents a "user defined" resource which can +// only be an integer (e.g: SSD=3) for a service +type GenericResource struct { + DiscreteResourceSpec *DiscreteGenericResource `yaml:"discrete_resource_spec,omitempty" json:"discrete_resource_spec,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `json:"kind"` + Value int64 `json:"value"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// RestartPolicy the service restart policy +type RestartPolicy struct { + Condition string `yaml:"condition,omitempty" json:"condition,omitempty"` + Delay *Duration `yaml:"delay,omitempty" json:"delay,omitempty"` + MaxAttempts *uint64 `yaml:"max_attempts,omitempty" json:"max_attempts,omitempty"` + Window *Duration `yaml:"window,omitempty" json:"window,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// Placement constraints for the service +type Placement struct { + Constraints []string `yaml:"constraints,omitempty" json:"constraints,omitempty"` + Preferences []PlacementPreferences `yaml:"preferences,omitempty" json:"preferences,omitempty"` + MaxReplicas uint64 `yaml:"max_replicas_per_node,omitempty" json:"max_replicas_per_node,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// PlacementPreferences is the preferences for a service placement +type PlacementPreferences struct { + Spread string `yaml:"spread,omitempty" json:"spread,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// ServiceNetworkConfig is the network configuration for a service +type ServiceNetworkConfig struct { + Priority int `yaml:"priority,omitempty" json:"priority,omitempty"` + Aliases []string `yaml:"aliases,omitempty" json:"aliases,omitempty"` + Ipv4Address string `yaml:"ipv4_address,omitempty" json:"ipv4_address,omitempty"` + Ipv6Address string `yaml:"ipv6_address,omitempty" json:"ipv6_address,omitempty"` + LinkLocalIPs []string `yaml:"link_local_ips,omitempty" json:"link_local_ips,omitempty"` + MacAddress string `yaml:"mac_address,omitempty" json:"mac_address,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// ServicePortConfig is the port configuration for a service +type ServicePortConfig struct { + Mode string `yaml:"mode,omitempty" json:"mode,omitempty"` + HostIP string `yaml:"host_ip,omitempty" json:"host_ip,omitempty"` + Target uint32 `yaml:"target,omitempty" json:"target,omitempty"` + Published string `yaml:"published,omitempty" json:"published,omitempty"` + Protocol string `yaml:"protocol,omitempty" json:"protocol,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// ParsePortConfig parse short syntax for service port configuration +func ParsePortConfig(value string) ([]ServicePortConfig, error) { + var portConfigs []ServicePortConfig + ports, portBindings, err := nat.ParsePortSpecs([]string{value}) + if err != nil { + return nil, err + } + // We need to sort the key of the ports to make sure it is consistent + keys := []string{} + for port := range ports { + keys = append(keys, string(port)) + } + sort.Strings(keys) + + for _, key := range keys { + port := nat.Port(key) + converted, err := convertPortToPortConfig(port, portBindings) + if err != nil { + return nil, err + } + portConfigs = append(portConfigs, converted...) + } + return portConfigs, nil +} + +func convertPortToPortConfig(port nat.Port, portBindings map[nat.Port][]nat.PortBinding) ([]ServicePortConfig, error) { + var portConfigs []ServicePortConfig + for _, binding := range portBindings[port] { + portConfigs = append(portConfigs, ServicePortConfig{ + HostIP: binding.HostIP, + Protocol: strings.ToLower(port.Proto()), + Target: uint32(port.Int()), + Published: binding.HostPort, + Mode: "ingress", + }) + } + return portConfigs, nil +} + +// ServiceVolumeConfig are references to a volume used by a service +type ServiceVolumeConfig struct { + Type string `yaml:"type,omitempty" json:"type,omitempty"` + Source string `yaml:"source,omitempty" json:"source,omitempty"` + Target string `yaml:"target,omitempty" json:"target,omitempty"` + ReadOnly bool `yaml:"read_only,omitempty" json:"read_only,omitempty"` + Consistency string `yaml:"consistency,omitempty" json:"consistency,omitempty"` + Bind *ServiceVolumeBind `yaml:"bind,omitempty" json:"bind,omitempty"` + Volume *ServiceVolumeVolume `yaml:"volume,omitempty" json:"volume,omitempty"` + Tmpfs *ServiceVolumeTmpfs `yaml:"tmpfs,omitempty" json:"tmpfs,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// String render ServiceVolumeConfig as a volume string, one can parse back using loader.ParseVolume +func (s ServiceVolumeConfig) String() string { + access := "rw" + if s.ReadOnly { + access = "ro" + } + options := []string{access} + if s.Bind != nil && s.Bind.SELinux != "" { + options = append(options, s.Bind.SELinux) + } + if s.Bind != nil && s.Bind.Propagation != "" { + options = append(options, s.Bind.Propagation) + } + if s.Volume != nil && s.Volume.NoCopy { + options = append(options, "nocopy") + } + return fmt.Sprintf("%s:%s:%s", s.Source, s.Target, strings.Join(options, ",")) +} + +const ( + // VolumeTypeBind is the type for mounting host dir + VolumeTypeBind = "bind" + // VolumeTypeVolume is the type for remote storage volumes + VolumeTypeVolume = "volume" + // VolumeTypeTmpfs is the type for mounting tmpfs + VolumeTypeTmpfs = "tmpfs" + // VolumeTypeNamedPipe is the type for mounting Windows named pipes + VolumeTypeNamedPipe = "npipe" + // VolumeTypeCluster is the type for mounting container storage interface (CSI) volumes + VolumeTypeCluster = "cluster" + + // SElinuxShared share the volume content + SElinuxShared = "z" + // SElinuxUnshared label content as private unshared + SElinuxUnshared = "Z" +) + +// ServiceVolumeBind are options for a service volume of type bind +type ServiceVolumeBind struct { + SELinux string `yaml:"selinux,omitempty" json:"selinux,omitempty"` + Propagation string `yaml:"propagation,omitempty" json:"propagation,omitempty"` + CreateHostPath bool `yaml:"create_host_path,omitempty" json:"create_host_path,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// SELinux represents the SELinux re-labeling options. +const ( + // SELinuxShared option indicates that the bind mount content is shared among multiple containers + SELinuxShared string = "z" + // SELinuxPrivate option indicates that the bind mount content is private and unshared + SELinuxPrivate string = "Z" +) + +// Propagation represents the propagation of a mount. +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate string = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate string = "private" + // PropagationRShared RSHARED + PropagationRShared string = "rshared" + // PropagationShared SHARED + PropagationShared string = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave string = "rslave" + // PropagationSlave SLAVE + PropagationSlave string = "slave" +) + +// ServiceVolumeVolume are options for a service volume of type volume +type ServiceVolumeVolume struct { + NoCopy bool `yaml:"nocopy,omitempty" json:"nocopy,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// ServiceVolumeTmpfs are options for a service volume of type tmpfs +type ServiceVolumeTmpfs struct { + Size UnitBytes `yaml:"size,omitempty" json:"size,omitempty"` + + Mode uint32 `yaml:"mode,omitempty" json:"mode,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// FileReferenceConfig for a reference to a swarm file object +type FileReferenceConfig struct { + Source string `yaml:"source,omitempty" json:"source,omitempty"` + Target string `yaml:"target,omitempty" json:"target,omitempty"` + UID string `yaml:"uid,omitempty" json:"uid,omitempty"` + GID string `yaml:"gid,omitempty" json:"gid,omitempty"` + Mode *uint32 `yaml:"mode,omitempty" json:"mode,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// ServiceConfigObjConfig is the config obj configuration for a service +type ServiceConfigObjConfig FileReferenceConfig + +// ServiceSecretConfig is the secret configuration for a service +type ServiceSecretConfig FileReferenceConfig + +// UlimitsConfig the ulimit configuration +type UlimitsConfig struct { + Single int `yaml:"single,omitempty" json:"single,omitempty"` + Soft int `yaml:"soft,omitempty" json:"soft,omitempty"` + Hard int `yaml:"hard,omitempty" json:"hard,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +func (u *UlimitsConfig) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case *UlimitsConfig: + // this call to DecodeMapstructure is triggered after initial value conversion as we use a map[string]*UlimitsConfig + return nil + case int: + u.Single = v + u.Soft = 0 + u.Hard = 0 + case map[string]any: + u.Single = 0 + soft, ok := v["soft"] + if ok { + u.Soft = soft.(int) + } + hard, ok := v["hard"] + if ok { + u.Hard = hard.(int) + } + default: + return fmt.Errorf("unexpected value type %T for ulimit", value) + } + return nil +} + +// MarshalYAML makes UlimitsConfig implement yaml.Marshaller +func (u *UlimitsConfig) MarshalYAML() (interface{}, error) { + if u.Single != 0 { + return u.Single, nil + } + return struct { + Soft int + Hard int + }{ + Soft: u.Soft, + Hard: u.Hard, + }, nil +} + +// MarshalJSON makes UlimitsConfig implement json.Marshaller +func (u *UlimitsConfig) MarshalJSON() ([]byte, error) { + if u.Single != 0 { + return json.Marshal(u.Single) + } + // Pass as a value to avoid re-entering this method and use the default implementation + return json.Marshal(*u) +} + +// NetworkConfig for a network +type NetworkConfig struct { + Name string `yaml:"name,omitempty" json:"name,omitempty"` + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + DriverOpts Options `yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` + Ipam IPAMConfig `yaml:"ipam,omitempty" json:"ipam,omitempty"` + External External `yaml:"external,omitempty" json:"external,omitempty"` + Internal bool `yaml:"internal,omitempty" json:"internal,omitempty"` + Attachable bool `yaml:"attachable,omitempty" json:"attachable,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + EnableIPv6 bool `yaml:"enable_ipv6,omitempty" json:"enable_ipv6,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// IPAMConfig for a network +type IPAMConfig struct { + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + Config []*IPAMPool `yaml:"config,omitempty" json:"config,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// IPAMPool for a network +type IPAMPool struct { + Subnet string `yaml:"subnet,omitempty" json:"subnet,omitempty"` + Gateway string `yaml:"gateway,omitempty" json:"gateway,omitempty"` + IPRange string `yaml:"ip_range,omitempty" json:"ip_range,omitempty"` + AuxiliaryAddresses Mapping `yaml:"aux_addresses,omitempty" json:"aux_addresses,omitempty"` + Extensions map[string]interface{} `yaml:",inline" json:"-"` +} + +// VolumeConfig for a volume +type VolumeConfig struct { + Name string `yaml:"name,omitempty" json:"name,omitempty"` + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + DriverOpts Options `yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` + External External `yaml:"external,omitempty" json:"external,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// External identifies a Volume or Network as a reference to a resource that is +// not managed, and should already exist. +type External bool + +// CredentialSpecConfig for credential spec on Windows +type CredentialSpecConfig struct { + Config string `yaml:"config,omitempty" json:"config,omitempty"` // Config was added in API v1.40 + File string `yaml:"file,omitempty" json:"file,omitempty"` + Registry string `yaml:"registry,omitempty" json:"registry,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// FileObjectConfig is a config type for a file used by a service +type FileObjectConfig struct { + Name string `yaml:"name,omitempty" json:"name,omitempty"` + File string `yaml:"file,omitempty" json:"file,omitempty"` + Environment string `yaml:"environment,omitempty" json:"environment,omitempty"` + Content string `yaml:"content,omitempty" json:"content,omitempty"` + External External `yaml:"external,omitempty" json:"external,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + DriverOpts map[string]string `yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` + TemplateDriver string `yaml:"template_driver,omitempty" json:"template_driver,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +const ( + // ServiceConditionCompletedSuccessfully is the type for waiting until a service has completed successfully (exit code 0). + ServiceConditionCompletedSuccessfully = "service_completed_successfully" + + // ServiceConditionHealthy is the type for waiting until a service is healthy. + ServiceConditionHealthy = "service_healthy" + + // ServiceConditionStarted is the type for waiting until a service has started (default). + ServiceConditionStarted = "service_started" +) + +type DependsOnConfig map[string]ServiceDependency + +type ServiceDependency struct { + Condition string `yaml:"condition,omitempty" json:"condition,omitempty"` + Restart bool `yaml:"restart,omitempty" json:"restart,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` + Required bool `yaml:"required" json:"required"` +} + +type ExtendsConfig struct { + File string `yaml:"file,omitempty" json:"file,omitempty"` + Service string `yaml:"service,omitempty" json:"service,omitempty"` +} + +// SecretConfig for a secret +type SecretConfig FileObjectConfig + +// ConfigObjConfig is the config for the swarm "Config" object +type ConfigObjConfig FileObjectConfig + +type IncludeConfig struct { + Path StringList `yaml:"path,omitempty" json:"path,omitempty"` + ProjectDirectory string `yaml:"project_directory,omitempty" json:"project_directory,omitempty"` + EnvFile StringList `yaml:"env_file,omitempty" json:"env_file,omitempty"` +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/utils/collectionutils.go b/vendor/github.com/compose-spec/compose-go/v2/utils/collectionutils.go new file mode 100644 index 000000000000..4df8fb1f68e6 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/utils/collectionutils.go @@ -0,0 +1,68 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "golang.org/x/exp/constraints" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" +) + +func MapKeys[T constraints.Ordered, U any](theMap map[T]U) []T { + result := maps.Keys(theMap) + slices.Sort(result) + return result +} + +func MapsAppend[T comparable, U any](target map[T]U, source map[T]U) map[T]U { + if target == nil { + return source + } + if source == nil { + return target + } + for key, value := range source { + if _, ok := target[key]; !ok { + target[key] = value + } + } + return target +} + +func ArrayContains[T comparable](source []T, toCheck []T) bool { + for _, value := range toCheck { + if !slices.Contains(source, value) { + return false + } + } + return true +} + +func RemoveDuplicates[T comparable](slice []T) []T { + // Create a map to store unique elements + seen := make(map[T]bool) + result := []T{} + + // Loop through the slice, adding elements to the map if they haven't been seen before + for _, val := range slice { + if _, ok := seen[val]; !ok { + seen[val] = true + result = append(result, val) + } + } + return result +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/utils/set.go b/vendor/github.com/compose-spec/compose-go/v2/utils/set.go new file mode 100644 index 000000000000..bbbeaa966def --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/utils/set.go @@ -0,0 +1,95 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +type Set[T comparable] map[T]struct{} + +func NewSet[T comparable](v ...T) Set[T] { + if len(v) == 0 { + return make(Set[T]) + } + + out := make(Set[T], len(v)) + for i := range v { + out.Add(v[i]) + } + return out +} + +func (s Set[T]) Has(v T) bool { + _, ok := s[v] + return ok +} + +func (s Set[T]) Add(v T) { + s[v] = struct{}{} +} + +func (s Set[T]) AddAll(v ...T) { + for _, e := range v { + s[e] = struct{}{} + } +} + +func (s Set[T]) Remove(v T) bool { + _, ok := s[v] + if ok { + delete(s, v) + } + return ok +} + +func (s Set[T]) Clear() { + for v := range s { + delete(s, v) + } +} + +func (s Set[T]) Elements() []T { + elements := make([]T, 0, len(s)) + for v := range s { + elements = append(elements, v) + } + return elements +} + +func (s Set[T]) RemoveAll(elements ...T) { + for _, e := range elements { + s.Remove(e) + } +} + +func (s Set[T]) Diff(other Set[T]) Set[T] { + out := make(Set[T]) + for k := range s { + if _, ok := other[k]; !ok { + out[k] = struct{}{} + } + } + return out +} + +func (s Set[T]) Union(other Set[T]) Set[T] { + out := make(Set[T]) + for k := range s { + out[k] = struct{}{} + } + for k := range other { + out[k] = struct{}{} + } + return out +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/utils/stringutils.go b/vendor/github.com/compose-spec/compose-go/v2/utils/stringutils.go new file mode 100644 index 000000000000..dfabf6c97158 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/utils/stringutils.go @@ -0,0 +1,48 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "fmt" + "strconv" + "strings" +) + +// StringToBool converts a string to a boolean ignoring errors +func StringToBool(s string) bool { + b, _ := strconv.ParseBool(strings.ToLower(strings.TrimSpace(s))) + return b +} + +// GetAsEqualsMap split key=value formatted strings into a key : value map +func GetAsEqualsMap(em []string) map[string]string { + m := make(map[string]string) + for _, v := range em { + kv := strings.SplitN(v, "=", 2) + m[kv[0]] = kv[1] + } + return m +} + +// GetAsEqualsMap format a key : value map into key=value strings +func GetAsStringList(em map[string]string) []string { + m := make([]string, 0, len(em)) + for k, v := range em { + m = append(m, fmt.Sprintf("%s=%s", k, v)) + } + return m +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/validation/external.go b/vendor/github.com/compose-spec/compose-go/v2/validation/external.go new file mode 100644 index 000000000000..b74d551a02b1 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/validation/external.go @@ -0,0 +1,49 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package validation + +import ( + "fmt" + "strings" + + "github.com/compose-spec/compose-go/v2/consts" + "github.com/compose-spec/compose-go/v2/tree" +) + +func checkExternal(v map[string]any, p tree.Path) error { + b, ok := v["external"] + if !ok { + return nil + } + if !b.(bool) { + return nil + } + + for k := range v { + switch k { + case "name", "external", consts.Extensions: + continue + default: + if strings.HasPrefix(k, "x-") { + // custom extension, ignored + continue + } + return fmt.Errorf("%s: conflicting parameters \"external\" and %q specified", p, k) + } + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/validation/validation.go b/vendor/github.com/compose-spec/compose-go/v2/validation/validation.go new file mode 100644 index 000000000000..e7cd67545662 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/validation/validation.go @@ -0,0 +1,96 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package validation + +import ( + "fmt" + "strings" + + "github.com/compose-spec/compose-go/v2/tree" +) + +type checkerFunc func(value any, p tree.Path) error + +var checks = map[tree.Path]checkerFunc{ + "volumes.*": checkVolume, + "configs.*": checkFileObject("file", "environment", "content"), + "secrets.*": checkFileObject("file", "environment"), + "services.*.develop.watch.*.path": checkPath, +} + +func Validate(dict map[string]any) error { + return check(dict, tree.NewPath()) +} + +func check(value any, p tree.Path) error { + for pattern, fn := range checks { + if p.Matches(pattern) { + return fn(value, p) + } + } + switch v := value.(type) { + case map[string]any: + for k, v := range v { + err := check(v, p.Next(k)) + if err != nil { + return err + } + } + case []any: + for _, e := range v { + err := check(e, p.Next("[]")) + if err != nil { + return err + } + } + } + return nil +} + +func checkFileObject(keys ...string) checkerFunc { + return func(value any, p tree.Path) error { + + v := value.(map[string]any) + count := 0 + for _, s := range keys { + if _, ok := v[s]; ok { + count++ + } + } + if count > 1 { + return fmt.Errorf("%s: %s attributes are mutually exclusive", p, strings.Join(keys, "|")) + } + if count == 0 { + if _, ok := v["driver"]; ok { + // User specified a custom driver, which might have it's own way to set content + return nil + } + if _, ok := v["external"]; !ok { + return fmt.Errorf("%s: one of %s must be set", p, strings.Join(keys, "|")) + } + } + return nil + } +} + +func checkPath(value any, p tree.Path) error { + v := value.(string) + if v == "" { + return fmt.Errorf("%s: value can't be blank", p) + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/validation/volume.go b/vendor/github.com/compose-spec/compose-go/v2/validation/volume.go new file mode 100644 index 000000000000..5b400681163d --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/validation/volume.go @@ -0,0 +1,39 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package validation + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func checkVolume(value any, p tree.Path) error { + if value == nil { + return nil + } + v, ok := value.(map[string]any) + if !ok { + return fmt.Errorf("expected volume, got %s", value) + } + + err := checkExternal(v, p) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/mattn/go-shellwords/.travis.yml b/vendor/github.com/mattn/go-shellwords/.travis.yml new file mode 100644 index 000000000000..ebd5edd8983e --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/.travis.yml @@ -0,0 +1,16 @@ +arch: + - amd64 + - ppc64le +language: go +sudo: false +go: + - tip + +before_install: + - go get -t -v ./... + +script: + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-shellwords/LICENSE b/vendor/github.com/mattn/go-shellwords/LICENSE new file mode 100644 index 000000000000..740fa931322d --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-shellwords/README.md b/vendor/github.com/mattn/go-shellwords/README.md new file mode 100644 index 000000000000..bdd531918cf0 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/README.md @@ -0,0 +1,55 @@ +# go-shellwords + +[![codecov](https://codecov.io/gh/mattn/go-shellwords/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-shellwords) +[![Build Status](https://travis-ci.org/mattn/go-shellwords.svg?branch=master)](https://travis-ci.org/mattn/go-shellwords) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/mattn/go-shellwords)](https://pkg.go.dev/github.com/mattn/go-shellwords) +[![ci](https://github.com/mattn/go-shellwords/ci/badge.svg)](https://github.com/mattn/go-shellwords/actions) + +Parse line as shell words. + +## Usage + +```go +args, err := shellwords.Parse("./foo --bar=baz") +// args should be ["./foo", "--bar=baz"] +``` + +```go +envs, args, err := shellwords.ParseWithEnvs("FOO=foo BAR=baz ./foo --bar=baz") +// envs should be ["FOO=foo", "BAR=baz"] +// args should be ["./foo", "--bar=baz"] +``` + +```go +os.Setenv("FOO", "bar") +p := shellwords.NewParser() +p.ParseEnv = true +args, err := p.Parse("./foo $FOO") +// args should be ["./foo", "bar"] +``` + +```go +p := shellwords.NewParser() +p.ParseBacktick = true +args, err := p.Parse("./foo `echo $SHELL`") +// args should be ["./foo", "/bin/bash"] +``` + +```go +shellwords.ParseBacktick = true +p := shellwords.NewParser() +args, err := p.Parse("./foo `echo $SHELL`") +// args should be ["./foo", "/bin/bash"] +``` + +# Thanks + +This is based on cpan module [Parse::CommandLine](https://metacpan.org/pod/Parse::CommandLine). + +# License + +under the MIT License: http://mattn.mit-license.org/2017 + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-shellwords/go.test.sh b/vendor/github.com/mattn/go-shellwords/go.test.sh new file mode 100644 index 000000000000..a7deaca96a01 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-shellwords/shellwords.go b/vendor/github.com/mattn/go-shellwords/shellwords.go new file mode 100644 index 000000000000..1b42a001709e --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/shellwords.go @@ -0,0 +1,317 @@ +package shellwords + +import ( + "bytes" + "errors" + "os" + "strings" + "unicode" +) + +var ( + ParseEnv bool = false + ParseBacktick bool = false +) + +func isSpace(r rune) bool { + switch r { + case ' ', '\t', '\r', '\n': + return true + } + return false +} + +func replaceEnv(getenv func(string) string, s string) string { + if getenv == nil { + getenv = os.Getenv + } + + var buf bytes.Buffer + rs := []rune(s) + for i := 0; i < len(rs); i++ { + r := rs[i] + if r == '\\' { + i++ + if i == len(rs) { + break + } + buf.WriteRune(rs[i]) + continue + } else if r == '$' { + i++ + if i == len(rs) { + buf.WriteRune(r) + break + } + if rs[i] == 0x7b { + i++ + p := i + for ; i < len(rs); i++ { + r = rs[i] + if r == '\\' { + i++ + if i == len(rs) { + return s + } + continue + } + if r == 0x7d || (!unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r)) { + break + } + } + if r != 0x7d { + return s + } + if i > p { + buf.WriteString(getenv(s[p:i])) + } + } else { + p := i + for ; i < len(rs); i++ { + r := rs[i] + if r == '\\' { + i++ + if i == len(rs) { + return s + } + continue + } + if !unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r) { + break + } + } + if i > p { + buf.WriteString(getenv(s[p:i])) + i-- + } else { + buf.WriteString(s[p:]) + } + } + } else { + buf.WriteRune(r) + } + } + return buf.String() +} + +type Parser struct { + ParseEnv bool + ParseBacktick bool + Position int + Dir string + + // If ParseEnv is true, use this for getenv. + // If nil, use os.Getenv. + Getenv func(string) string +} + +func NewParser() *Parser { + return &Parser{ + ParseEnv: ParseEnv, + ParseBacktick: ParseBacktick, + Position: 0, + Dir: "", + } +} + +type argType int + +const ( + argNo argType = iota + argSingle + argQuoted +) + +func (p *Parser) Parse(line string) ([]string, error) { + args := []string{} + buf := "" + var escaped, doubleQuoted, singleQuoted, backQuote, dollarQuote bool + backtick := "" + + pos := -1 + got := argNo + + i := -1 +loop: + for _, r := range line { + i++ + if escaped { + buf += string(r) + escaped = false + got = argSingle + continue + } + + if r == '\\' { + if singleQuoted { + buf += string(r) + } else { + escaped = true + } + continue + } + + if isSpace(r) { + if singleQuoted || doubleQuoted || backQuote || dollarQuote { + buf += string(r) + backtick += string(r) + } else if got != argNo { + if p.ParseEnv { + if got == argSingle { + parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir} + strs, err := parser.Parse(replaceEnv(p.Getenv, buf)) + if err != nil { + return nil, err + } + args = append(args, strs...) + } else { + args = append(args, replaceEnv(p.Getenv, buf)) + } + } else { + args = append(args, buf) + } + buf = "" + got = argNo + } + continue + } + + switch r { + case '`': + if !singleQuoted && !doubleQuoted && !dollarQuote { + if p.ParseBacktick { + if backQuote { + out, err := shellRun(backtick, p.Dir) + if err != nil { + return nil, err + } + buf = buf[:len(buf)-len(backtick)] + out + } + backtick = "" + backQuote = !backQuote + continue + } + backtick = "" + backQuote = !backQuote + } + case ')': + if !singleQuoted && !doubleQuoted && !backQuote { + if p.ParseBacktick { + if dollarQuote { + out, err := shellRun(backtick, p.Dir) + if err != nil { + return nil, err + } + buf = buf[:len(buf)-len(backtick)-2] + out + } + backtick = "" + dollarQuote = !dollarQuote + continue + } + backtick = "" + dollarQuote = !dollarQuote + } + case '(': + if !singleQuoted && !doubleQuoted && !backQuote { + if !dollarQuote && strings.HasSuffix(buf, "$") { + dollarQuote = true + buf += "(" + continue + } else { + return nil, errors.New("invalid command line string") + } + } + case '"': + if !singleQuoted && !dollarQuote { + if doubleQuoted { + got = argQuoted + } + doubleQuoted = !doubleQuoted + continue + } + case '\'': + if !doubleQuoted && !dollarQuote { + if singleQuoted { + got = argQuoted + } + singleQuoted = !singleQuoted + continue + } + case ';', '&', '|', '<', '>': + if !(escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote) { + if r == '>' && len(buf) > 0 { + if c := buf[0]; '0' <= c && c <= '9' { + i -= 1 + got = argNo + } + } + pos = i + break loop + } + } + + got = argSingle + buf += string(r) + if backQuote || dollarQuote { + backtick += string(r) + } + } + + if got != argNo { + if p.ParseEnv { + if got == argSingle { + parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir} + strs, err := parser.Parse(replaceEnv(p.Getenv, buf)) + if err != nil { + return nil, err + } + args = append(args, strs...) + } else { + args = append(args, replaceEnv(p.Getenv, buf)) + } + } else { + args = append(args, buf) + } + } + + if escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote { + return nil, errors.New("invalid command line string") + } + + p.Position = pos + + return args, nil +} + +func (p *Parser) ParseWithEnvs(line string) (envs []string, args []string, err error) { + _args, err := p.Parse(line) + if err != nil { + return nil, nil, err + } + envs = []string{} + args = []string{} + parsingEnv := true + for _, arg := range _args { + if parsingEnv && isEnv(arg) { + envs = append(envs, arg) + } else { + if parsingEnv { + parsingEnv = false + } + args = append(args, arg) + } + } + return envs, args, nil +} + +func isEnv(arg string) bool { + return len(strings.Split(arg, "=")) == 2 +} + +func Parse(line string) ([]string, error) { + return NewParser().Parse(line) +} + +func ParseWithEnvs(line string) (envs []string, args []string, err error) { + return NewParser().ParseWithEnvs(line) +} diff --git a/vendor/github.com/mattn/go-shellwords/util_posix.go b/vendor/github.com/mattn/go-shellwords/util_posix.go new file mode 100644 index 000000000000..b56a90120a90 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_posix.go @@ -0,0 +1,29 @@ +// +build !windows + +package shellwords + +import ( + "fmt" + "os" + "os/exec" + "strings" +) + +func shellRun(line, dir string) (string, error) { + var shell string + if shell = os.Getenv("SHELL"); shell == "" { + shell = "/bin/sh" + } + cmd := exec.Command(shell, "-c", line) + if dir != "" { + cmd.Dir = dir + } + b, err := cmd.Output() + if err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + b = eerr.Stderr + } + return "", fmt.Errorf("%s: %w", string(b), err) + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/github.com/mattn/go-shellwords/util_windows.go b/vendor/github.com/mattn/go-shellwords/util_windows.go new file mode 100644 index 000000000000..fd738a721126 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_windows.go @@ -0,0 +1,29 @@ +// +build windows + +package shellwords + +import ( + "fmt" + "os" + "os/exec" + "strings" +) + +func shellRun(line, dir string) (string, error) { + var shell string + if shell = os.Getenv("COMSPEC"); shell == "" { + shell = "cmd" + } + cmd := exec.Command(shell, "/c", line) + if dir != "" { + cmd.Dir = dir + } + b, err := cmd.Output() + if err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + b = eerr.Stderr + } + return "", fmt.Errorf("%s: %w", string(b), err) + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE new file mode 100644 index 000000000000..229851590442 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md new file mode 100644 index 000000000000..f0fbd2e5c98c --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -0,0 +1,21 @@ +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go new file mode 100644 index 000000000000..db6a6aa1a1f4 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copier_time.go @@ -0,0 +1,15 @@ +package copystructure + +import ( + "reflect" + "time" +) + +func init() { + Copiers[reflect.TypeOf(time.Time{})] = timeCopier +} + +func timeCopier(v interface{}) (interface{}, error) { + // Just... copy it. + return v.(time.Time), nil +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go new file mode 100644 index 000000000000..8089e6670a34 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -0,0 +1,631 @@ +package copystructure + +import ( + "errors" + "reflect" + "sync" + + "github.com/mitchellh/reflectwalk" +) + +const tagKey = "copy" + +// Copy returns a deep copy of v. +// +// Copy is unable to copy unexported fields in a struct (lowercase field names). +// Unexported fields can't be reflected by the Go runtime and therefore +// copystructure can't perform any data copies. +// +// For structs, copy behavior can be controlled with struct tags. For example: +// +// struct { +// Name string +// Data *bytes.Buffer `copy:"shallow"` +// } +// +// The available tag values are: +// +// * "ignore" - The field will be ignored, effectively resulting in it being +// assigned the zero value in the copy. +// +// * "shallow" - The field will be be shallow copied. This means that references +// values such as pointers, maps, slices, etc. will be directly assigned +// versus deep copied. +// +func Copy(v interface{}) (interface{}, error) { + return Config{}.Copy(v) +} + +// CopierFunc is a function that knows how to deep copy a specific type. +// Register these globally with the Copiers variable. +type CopierFunc func(interface{}) (interface{}, error) + +// Copiers is a map of types that behave specially when they are copied. +// If a type is found in this map while deep copying, this function +// will be called to copy it instead of attempting to copy all fields. +// +// The key should be the type, obtained using: reflect.TypeOf(value with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) + +// ShallowCopiers is a map of pointer types that behave specially +// when they are copied. If a type is found in this map while deep +// copying, the pointer value will be shallow copied and not walked +// into. +// +// The key should be the type, obtained using: reflect.TypeOf(value +// with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var ShallowCopiers map[reflect.Type]struct{} = make(map[reflect.Type]struct{}) + +// Must is a helper that wraps a call to a function returning +// (interface{}, error) and panics if the error is non-nil. It is intended +// for use in variable initializations and should only be used when a copy +// error should be a crashing case. +func Must(v interface{}, err error) interface{} { + if err != nil { + panic("copy error: " + err.Error()) + } + + return v +} + +var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") + +type Config struct { + // Lock any types that are a sync.Locker and are not a mutex while copying. + // If there is an RLocker method, use that to get the sync.Locker. + Lock bool + + // Copiers is a map of types associated with a CopierFunc. Use the global + // Copiers map if this is nil. + Copiers map[reflect.Type]CopierFunc + + // ShallowCopiers is a map of pointer types that when they are + // shallow copied no matter where they are encountered. Use the + // global ShallowCopiers if this is nil. + ShallowCopiers map[reflect.Type]struct{} +} + +func (c Config) Copy(v interface{}) (interface{}, error) { + if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { + return nil, errPointerRequired + } + + w := new(walker) + if c.Lock { + w.useLocks = true + } + + if c.Copiers == nil { + c.Copiers = Copiers + } + w.copiers = c.Copiers + + if c.ShallowCopiers == nil { + c.ShallowCopiers = ShallowCopiers + } + w.shallowCopiers = c.ShallowCopiers + + err := reflectwalk.Walk(v, w) + if err != nil { + return nil, err + } + + // Get the result. If the result is nil, then we want to turn it + // into a typed nil if we can. + result := w.Result + if result == nil { + val := reflect.ValueOf(v) + result = reflect.Indirect(reflect.New(val.Type())).Interface() + } + + return result, nil +} + +// Return the key used to index interfaces types we've seen. Store the number +// of pointers in the upper 32bits, and the depth in the lower 32bits. This is +// easy to calculate, easy to match a key with our current depth, and we don't +// need to deal with initializing and cleaning up nested maps or slices. +func ifaceKey(pointers, depth int) uint64 { + return uint64(pointers)<<32 | uint64(depth) +} + +type walker struct { + Result interface{} + + copiers map[reflect.Type]CopierFunc + shallowCopiers map[reflect.Type]struct{} + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value + + // This stores the number of pointers we've walked over, indexed by depth. + ps []int + + // If an interface is indirected by a pointer, we need to know the type of + // interface to create when creating the new value. Store the interface + // types here, indexed by both the walk depth and the number of pointers + // already seen at that depth. Use ifaceKey to calculate the proper uint64 + // value. + ifaceTypes map[uint64]reflect.Type + + // any locks we've taken, indexed by depth + locks []sync.Locker + // take locks while walking the structure + useLocks bool +} + +func (w *walker) Enter(l reflectwalk.Location) error { + w.depth++ + + // ensure we have enough elements to index via w.depth + for w.depth >= len(w.locks) { + w.locks = append(w.locks, nil) + } + + for len(w.ps) < w.depth+1 { + w.ps = append(w.ps, 0) + } + + return nil +} + +func (w *walker) Exit(l reflectwalk.Location) error { + locker := w.locks[w.depth] + w.locks[w.depth] = nil + if locker != nil { + defer locker.Unlock() + } + + // clear out pointers and interfaces as we exit the stack + w.ps[w.depth] = 0 + + for k := range w.ifaceTypes { + mask := uint64(^uint32(0)) + if k&mask == uint64(w.depth) { + delete(w.ifaceTypes, k) + } + } + + w.depth-- + if w.ignoreDepth > w.depth { + w.ignoreDepth = 0 + } + + if w.ignoring() { + return nil + } + + switch l { + case reflectwalk.Array: + fallthrough + case reflectwalk.Map: + fallthrough + case reflectwalk.Slice: + w.replacePointerMaybe() + + // Pop map off our container + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + // Pop off the key and value + mv := w.valPop() + mk := w.valPop() + m := w.cs[len(w.cs)-1] + + // If mv is the zero value, SetMapIndex deletes the key form the map, + // or in this case never adds it. We need to create a properly typed + // zero value so that this key can be set. + if !mv.IsValid() { + mv = reflect.Zero(m.Elem().Type().Elem()) + } + m.Elem().SetMapIndex(mk, mv) + case reflectwalk.ArrayElem: + // Pop off the value and the index and set it on the array + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + a := w.cs[len(w.cs)-1] + ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call + if ae.CanSet() { + ae.Set(v) + } + } + case reflectwalk.SliceElem: + // Pop off the value and the index and set it on the slice + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + se := s.Elem().Index(i) + if se.CanSet() { + se.Set(v) + } + } + case reflectwalk.Struct: + w.replacePointerMaybe() + + // Remove the struct from the container stack + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.StructField: + // Pop off the value and the field + v := w.valPop() + f := w.valPop().Interface().(reflect.StructField) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + + if sf.CanSet() { + sf.Set(v) + } + } + case reflectwalk.WalkLoc: + // Clear out the slices for GC + w.cs = nil + w.vals = nil + } + + return nil +} + +func (w *walker) Map(m reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(m) + + // Create the map. If the map itself is nil, then just make a nil map + var newMap reflect.Value + if m.IsNil() { + newMap = reflect.New(m.Type()) + } else { + newMap = wrapPtr(reflect.MakeMap(m.Type())) + } + + w.cs = append(w.cs, newMap) + w.valPush(newMap) + return nil +} + +func (w *walker) MapElem(m, k, v reflect.Value) error { + return nil +} + +func (w *walker) PointerEnter(v bool) error { + if v { + w.ps[w.depth]++ + } + return nil +} + +func (w *walker) PointerExit(v bool) error { + if v { + w.ps[w.depth]-- + } + return nil +} + +func (w *walker) Pointer(v reflect.Value) error { + if _, ok := w.shallowCopiers[v.Type()]; ok { + // Shallow copy this value. Use the same logic as primitive, then + // return skip. + if err := w.Primitive(v); err != nil { + return err + } + + return reflectwalk.SkipEntry + } + + return nil +} + +func (w *walker) Interface(v reflect.Value) error { + if !v.IsValid() { + return nil + } + if w.ifaceTypes == nil { + w.ifaceTypes = make(map[uint64]reflect.Type) + } + + w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() + return nil +} + +func (w *walker) Primitive(v reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(v) + + // IsValid verifies the v is non-zero and CanInterface verifies + // that we're allowed to read this value (unexported fields). + var newV reflect.Value + if v.IsValid() && v.CanInterface() { + newV = reflect.New(v.Type()) + newV.Elem().Set(v) + } + + w.valPush(newV) + w.replacePointerMaybe() + return nil +} + +func (w *walker) Slice(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var newS reflect.Value + if s.IsNil() { + newS = reflect.New(s.Type()) + } else { + newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) + } + + w.cs = append(w.cs, newS) + w.valPush(newS) + return nil +} + +func (w *walker) SliceElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the slice here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Array(a reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(a) + + newA := reflect.New(a.Type()) + + w.cs = append(w.cs, newA) + w.valPush(newA) + return nil +} + +func (w *walker) ArrayElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the array here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Struct(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var v reflect.Value + if c, ok := w.copiers[s.Type()]; ok { + // We have a Copier for this struct, so we use that copier to + // get the copy, and we ignore anything deeper than this. + w.ignoreDepth = w.depth + + dup, err := c(s.Interface()) + if err != nil { + return err + } + + // We need to put a pointer to the value on the value stack, + // so allocate a new pointer and set it. + v = reflect.New(s.Type()) + reflect.Indirect(v).Set(reflect.ValueOf(dup)) + } else { + // No copier, we copy ourselves and allow reflectwalk to guide + // us deeper into the structure for copying. + v = reflect.New(s.Type()) + } + + // Push the value onto the value stack for setting the struct field, + // and add the struct itself to the containers stack in case we walk + // deeper so that its own fields can be modified. + w.valPush(v) + w.cs = append(w.cs, v) + + return nil +} + +func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { + if w.ignoring() { + return nil + } + + // If PkgPath is non-empty, this is a private (unexported) field. + // We do not set this unexported since the Go runtime doesn't allow us. + if f.PkgPath != "" { + return reflectwalk.SkipEntry + } + + switch f.Tag.Get(tagKey) { + case "shallow": + // If we're shallow copying then assign the value directly to the + // struct and skip the entry. + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + if sf.CanSet() { + sf.Set(v) + } + } + + return reflectwalk.SkipEntry + + case "ignore": + // Do nothing + return reflectwalk.SkipEntry + } + + // Push the field onto the stack, we'll handle it when we exit + // the struct field in Exit... + w.valPush(reflect.ValueOf(f)) + + return nil +} + +// ignore causes the walker to ignore any more values until we exit this on +func (w *walker) ignore() { + w.ignoreDepth = w.depth +} + +func (w *walker) ignoring() bool { + return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth +} + +func (w *walker) pointerPeek() bool { + return w.ps[w.depth] > 0 +} + +func (w *walker) valPop() reflect.Value { + result := w.vals[len(w.vals)-1] + w.vals = w.vals[:len(w.vals)-1] + + // If we're out of values, that means we popped everything off. In + // this case, we reset the result so the next pushed value becomes + // the result. + if len(w.vals) == 0 { + w.Result = nil + } + + return result +} + +func (w *walker) valPush(v reflect.Value) { + w.vals = append(w.vals, v) + + // If we haven't set the result yet, then this is the result since + // it is the first (outermost) value we're seeing. + if w.Result == nil && v.IsValid() { + w.Result = v.Interface() + } +} + +func (w *walker) replacePointerMaybe() { + // Determine the last pointer value. If it is NOT a pointer, then + // we need to push that onto the stack. + if !w.pointerPeek() { + w.valPush(reflect.Indirect(w.valPop())) + return + } + + v := w.valPop() + + // If the expected type is a pointer to an interface of any depth, + // such as *interface{}, **interface{}, etc., then we need to convert + // the value "v" from *CONCRETE to *interface{} so types match for + // Set. + // + // Example if v is type *Foo where Foo is a struct, v would become + // *interface{} instead. This only happens if we have an interface expectation + // at this depth. + // + // For more info, see GH-16 + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { + y := reflect.New(iType) // Create *interface{} + y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) + v = y // v is now typed *interface{} (where *v = Foo) + } + + for i := 1; i < w.ps[w.depth]; i++ { + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { + iface := reflect.New(iType).Elem() + iface.Set(v) + v = iface + } + + p := reflect.New(v.Type()) + p.Elem().Set(v) + v = p + } + + w.valPush(v) +} + +// if this value is a Locker, lock it and add it to the locks slice +func (w *walker) lock(v reflect.Value) { + if !w.useLocks { + return + } + + if !v.IsValid() || !v.CanInterface() { + return + } + + type rlocker interface { + RLocker() sync.Locker + } + + var locker sync.Locker + + // We can't call Interface() on a value directly, since that requires + // a copy. This is OK, since the pointer to a value which is a sync.Locker + // is also a sync.Locker. + if v.Kind() == reflect.Ptr { + switch l := v.Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } else if v.CanAddr() { + switch l := v.Addr().Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } + + // still no callable locker + if locker == nil { + return + } + + // don't lock a mutex directly + switch locker.(type) { + case *sync.Mutex, *sync.RWMutex: + return + } + + locker.Lock() + w.locks[w.depth] = locker +} + +// wrapPtr is a helper that takes v and always make it *v. copystructure +// stores things internally as pointers until the last moment before unwrapping +func wrapPtr(v reflect.Value) reflect.Value { + if !v.IsValid() { + return v + } + vPtr := reflect.New(v.Type()) + vPtr.Elem().Set(v) + return vPtr +} diff --git a/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/vendor/github.com/mitchellh/reflectwalk/.travis.yml new file mode 100644 index 000000000000..4f2ee4d97338 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE new file mode 100644 index 000000000000..f9c841a51e0d --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md new file mode 100644 index 000000000000..ac82cd2e159f --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/README.md @@ -0,0 +1,6 @@ +# reflectwalk + +reflectwalk is a Go library for "walking" a value in Go using reflection, +in the same way a directory tree can be "walked" on the filesystem. Walking +a complex structure can allow you to do manipulations on unknown structures +such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go new file mode 100644 index 000000000000..6a7f176117f9 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location.go @@ -0,0 +1,19 @@ +package reflectwalk + +//go:generate stringer -type=Location location.go + +type Location uint + +const ( + None Location = iota + Map + MapKey + MapValue + Slice + SliceElem + Array + ArrayElem + Struct + StructField + WalkLoc +) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go new file mode 100644 index 000000000000..70760cf4c705 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Location location.go"; DO NOT EDIT. + +package reflectwalk + +import "fmt" + +const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc" + +var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} + +func (i Location) String() string { + if i >= Location(len(_Location_index)-1) { + return fmt.Sprintf("Location(%d)", i) + } + return _Location_name[_Location_index[i]:_Location_index[i+1]] +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go new file mode 100644 index 000000000000..7fee7b050ba4 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -0,0 +1,420 @@ +// reflectwalk is a package that allows you to "walk" complex structures +// similar to how you may "walk" a filesystem: visiting every element one +// by one and calling callback functions allowing you to handle and manipulate +// those elements. +package reflectwalk + +import ( + "errors" + "reflect" +) + +// PrimitiveWalker implementations are able to handle primitive values +// within complex structures. Primitive values are numbers, strings, +// booleans, funcs, chans. +// +// These primitive values are often members of more complex +// structures (slices, maps, etc.) that are walkable by other interfaces. +type PrimitiveWalker interface { + Primitive(reflect.Value) error +} + +// InterfaceWalker implementations are able to handle interface values as they +// are encountered during the walk. +type InterfaceWalker interface { + Interface(reflect.Value) error +} + +// MapWalker implementations are able to handle individual elements +// found within a map structure. +type MapWalker interface { + Map(m reflect.Value) error + MapElem(m, k, v reflect.Value) error +} + +// SliceWalker implementations are able to handle slice elements found +// within complex structures. +type SliceWalker interface { + Slice(reflect.Value) error + SliceElem(int, reflect.Value) error +} + +// ArrayWalker implementations are able to handle array elements found +// within complex structures. +type ArrayWalker interface { + Array(reflect.Value) error + ArrayElem(int, reflect.Value) error +} + +// StructWalker is an interface that has methods that are called for +// structs when a Walk is done. +type StructWalker interface { + Struct(reflect.Value) error + StructField(reflect.StructField, reflect.Value) error +} + +// EnterExitWalker implementations are notified before and after +// they walk deeper into complex structures (into struct fields, +// into slice elements, etc.) +type EnterExitWalker interface { + Enter(Location) error + Exit(Location) error +} + +// PointerWalker implementations are notified when the value they're +// walking is a pointer or not. Pointer is called for _every_ value whether +// it is a pointer or not. +type PointerWalker interface { + PointerEnter(bool) error + PointerExit(bool) error +} + +// PointerValueWalker implementations are notified with the value of +// a particular pointer when a pointer is walked. Pointer is called +// right before PointerEnter. +type PointerValueWalker interface { + Pointer(reflect.Value) error +} + +// SkipEntry can be returned from walk functions to skip walking +// the value of this field. This is only valid in the following functions: +// +// - Struct: skips all fields from being walked +// - StructField: skips walking the struct value +// +var SkipEntry = errors.New("skip this entry") + +// Walk takes an arbitrary value and an interface and traverses the +// value, calling callbacks on the interface if they are supported. +// The interface should implement one or more of the walker interfaces +// in this package, such as PrimitiveWalker, StructWalker, etc. +func Walk(data, walker interface{}) (err error) { + v := reflect.ValueOf(data) + ew, ok := walker.(EnterExitWalker) + if ok { + err = ew.Enter(WalkLoc) + } + + if err == nil { + err = walk(v, walker) + } + + if ok && err == nil { + err = ew.Exit(WalkLoc) + } + + return +} + +func walk(v reflect.Value, w interface{}) (err error) { + // Determine if we're receiving a pointer and if so notify the walker. + // The logic here is convoluted but very important (tests will fail if + // almost any part is changed). I will try to explain here. + // + // First, we check if the value is an interface, if so, we really need + // to check the interface's VALUE to see whether it is a pointer. + // + // Check whether the value is then a pointer. If so, then set pointer + // to true to notify the user. + // + // If we still have a pointer or an interface after the indirections, then + // we unwrap another level + // + // At this time, we also set "v" to be the dereferenced value. This is + // because once we've unwrapped the pointer we want to use that value. + pointer := false + pointerV := v + + for { + if pointerV.Kind() == reflect.Interface { + if iw, ok := w.(InterfaceWalker); ok { + if err = iw.Interface(pointerV); err != nil { + return + } + } + + pointerV = pointerV.Elem() + } + + if pointerV.Kind() == reflect.Ptr { + if pw, ok := w.(PointerValueWalker); ok { + if err = pw.Pointer(pointerV); err != nil { + if err == SkipEntry { + // Skip the rest of this entry but clear the error + return nil + } + + return + } + } + + pointer = true + v = reflect.Indirect(pointerV) + } + if pw, ok := w.(PointerWalker); ok { + if err = pw.PointerEnter(pointer); err != nil { + return + } + + defer func(pointer bool) { + if err != nil { + return + } + + err = pw.PointerExit(pointer) + }(pointer) + } + + if pointer { + pointerV = v + } + pointer = false + + // If we still have a pointer or interface we have to indirect another level. + switch pointerV.Kind() { + case reflect.Ptr, reflect.Interface: + continue + } + break + } + + // We preserve the original value here because if it is an interface + // type, we want to pass that directly into the walkPrimitive, so that + // we can set it. + originalV := v + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + k := v.Kind() + if k >= reflect.Int && k <= reflect.Complex128 { + k = reflect.Int + } + + switch k { + // Primitives + case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: + err = walkPrimitive(originalV, w) + return + case reflect.Map: + err = walkMap(v, w) + return + case reflect.Slice: + err = walkSlice(v, w) + return + case reflect.Struct: + err = walkStruct(v, w) + return + case reflect.Array: + err = walkArray(v, w) + return + default: + panic("unsupported type: " + k.String()) + } +} + +func walkMap(v reflect.Value, w interface{}) error { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Map) + } + + if mw, ok := w.(MapWalker); ok { + if err := mw.Map(v); err != nil { + return err + } + } + + for _, k := range v.MapKeys() { + kv := v.MapIndex(k) + + if mw, ok := w.(MapWalker); ok { + if err := mw.MapElem(v, k, kv); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(MapKey) + } + + if err := walk(k, w); err != nil { + return err + } + + if ok { + ew.Exit(MapKey) + ew.Enter(MapValue) + } + + // get the map value again as it may have changed in the MapElem call + if err := walk(v.MapIndex(k), w); err != nil { + return err + } + + if ok { + ew.Exit(MapValue) + } + } + + if ewok { + ew.Exit(Map) + } + + return nil +} + +func walkPrimitive(v reflect.Value, w interface{}) error { + if pw, ok := w.(PrimitiveWalker); ok { + return pw.Primitive(v) + } + + return nil +} + +func walkSlice(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Slice) + } + + if sw, ok := w.(SliceWalker); ok { + if err := sw.Slice(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if sw, ok := w.(SliceWalker); ok { + if err := sw.SliceElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(SliceElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(SliceElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Slice) + } + + return nil +} + +func walkArray(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Array) + } + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.Array(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.ArrayElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(ArrayElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(ArrayElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Array) + } + + return nil +} + +func walkStruct(v reflect.Value, w interface{}) (err error) { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Struct) + } + + skip := false + if sw, ok := w.(StructWalker); ok { + err = sw.Struct(v) + if err == SkipEntry { + skip = true + err = nil + } + if err != nil { + return + } + } + + if !skip { + vt := v.Type() + for i := 0; i < vt.NumField(); i++ { + sf := vt.Field(i) + f := v.FieldByIndex([]int{i}) + + if sw, ok := w.(StructWalker); ok { + err = sw.StructField(sf, f) + + // SkipEntry just pretends this field doesn't even exist + if err == SkipEntry { + continue + } + + if err != nil { + return + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(StructField) + } + + err = walk(f, w) + if err != nil { + return + } + + if ok { + ew.Exit(StructField) + } + } + } + + if ewok { + ew.Exit(Struct) + } + + return nil +} diff --git a/vendor/dario.cat/mergo/LICENSE b/vendor/golang.org/x/exp/LICENSE similarity index 92% rename from vendor/dario.cat/mergo/LICENSE rename to vendor/golang.org/x/exp/LICENSE index 686680298da2..6a66aea5eafe 100644 --- a/vendor/dario.cat/mergo/LICENSE +++ b/vendor/golang.org/x/exp/LICENSE @@ -1,5 +1,4 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS new file mode 100644 index 000000000000..733099041f84 --- /dev/null +++ b/vendor/golang.org/x/exp/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go new file mode 100644 index 000000000000..2c033dff47e9 --- /dev/null +++ b/vendor/golang.org/x/exp/constraints/constraints.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go new file mode 100644 index 000000000000..ecc0dabb74d9 --- /dev/null +++ b/vendor/golang.org/x/exp/maps/maps.go @@ -0,0 +1,94 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package maps defines various functions useful with maps of any type. +package maps + +// Keys returns the keys of the map m. +// The keys will be in an indeterminate order. +func Keys[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} + +// Values returns the values of the map m. +// The values will be in an indeterminate order. +func Values[M ~map[K]V, K comparable, V any](m M) []V { + r := make([]V, 0, len(m)) + for _, v := range m { + r = append(r, v) + } + return r +} + +// Equal reports whether two maps contain the same key/value pairs. +// Values are compared using ==. +func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || v1 != v2 { + return false + } + } + return true +} + +// EqualFunc is like Equal, but compares values using eq. +// Keys are still compared with ==. +func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || !eq(v1, v2) { + return false + } + } + return true +} + +// Clear removes all entries from m, leaving it empty. +func Clear[M ~map[K]V, K comparable, V any](m M) { + for k := range m { + delete(m, k) + } +} + +// Clone returns a copy of m. This is a shallow clone: +// the new keys and values are set using ordinary assignment. +func Clone[M ~map[K]V, K comparable, V any](m M) M { + // Preserve nil in case it matters. + if m == nil { + return nil + } + r := make(M, len(m)) + for k, v := range m { + r[k] = v + } + return r +} + +// Copy copies all key/value pairs in src adding them to dst. +// When a key in src is already present in dst, +// the value in dst will be overwritten by the value associated +// with the key in src. +func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) { + for k, v := range src { + dst[k] = v + } +} + +// DeleteFunc deletes any key/value pairs from m for which del returns true. +func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) { + for k, v := range m { + if del(k, v) { + delete(m, k) + } + } +} diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go new file mode 100644 index 000000000000..fbf1934a0617 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/cmp.go @@ -0,0 +1,44 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// min is a version of the predeclared function from the Go 1.21 release. +func min[T constraints.Ordered](a, b T) T { + if a < b || isNaN(a) { + return a + } + return b +} + +// max is a version of the predeclared function from the Go 1.21 release. +func max[T constraints.Ordered](a, b T) T { + if a > b || isNaN(a) { + return a + } + return b +} + +// cmpLess is a copy of cmp.Less from the Go 1.21 release. +func cmpLess[T constraints.Ordered](x, y T) bool { + return (isNaN(x) && !isNaN(y)) || x < y +} + +// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. +func cmpCompare[T constraints.Ordered](x, y T) int { + xNaN := isNaN(x) + yNaN := isNaN(y) + if xNaN && yNaN { + return 0 + } + if xNaN || x < y { + return -1 + } + if yNaN || x > y { + return +1 + } + return 0 +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go new file mode 100644 index 000000000000..5e8158bba869 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -0,0 +1,499 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices defines various functions useful with slices of any type. +package slices + +import ( + "unsafe" + + "golang.org/x/exp/constraints" +) + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in increasing index order, and the +// comparison stops at the first unequal pair. +// Floating point NaNs are not considered equal. +func Equal[S ~[]E, E comparable](s1, s2 S) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// EqualFunc reports whether two slices are equal using an equality +// function on each pair of elements. If the lengths are different, +// EqualFunc returns false. Otherwise, the elements are compared in +// increasing index order, and the comparison stops at the first index +// for which eq returns false. +func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, v1 := range s1 { + v2 := s2[i] + if !eq(v1, v2) { + return false + } + } + return true +} + +// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair +// of elements. The elements are compared sequentially, starting at index 0, +// until one element is not equal to the other. +// The result of comparing the first non-matching elements is returned. +// If both slices are equal until one of them ends, the shorter slice is +// considered less than the longer one. +// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { + for i, v1 := range s1 { + if i >= len(s2) { + return +1 + } + v2 := s2[i] + if c := cmpCompare(v1, v2); c != 0 { + return c + } + } + if len(s1) < len(s2) { + return -1 + } + return 0 +} + +// CompareFunc is like [Compare] but uses a custom comparison function on each +// pair of elements. +// The result is the first non-zero result of cmp; if cmp always +// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), +// and +1 if len(s1) > len(s2). +func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { + for i, v1 := range s1 { + if i >= len(s2) { + return +1 + } + v2 := s2[i] + if c := cmp(v1, v2); c != 0 { + return c + } + } + if len(s1) < len(s2) { + return -1 + } + return 0 +} + +// Index returns the index of the first occurrence of v in s, +// or -1 if not present. +func Index[S ~[]E, E comparable](s S, v E) int { + for i := range s { + if v == s[i] { + return i + } + } + return -1 +} + +// IndexFunc returns the first index i satisfying f(s[i]), +// or -1 if none do. +func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { + for i := range s { + if f(s[i]) { + return i + } + } + return -1 +} + +// Contains reports whether v is present in s. +func Contains[S ~[]E, E comparable](s S, v E) bool { + return Index(s, v) >= 0 +} + +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + +// Insert inserts the values v... into s at index i, +// returning the modified slice. +// The elements at s[i:] are shifted up to make room. +// In the returned slice r, r[i] == v[0], +// and r[i+len(v)] == value originally at r[i]. +// Insert panics if i is out of range. +// This function is O(len(s) + len(v)). +func Insert[S ~[]E, E any](s S, i int, v ...E) S { + m := len(v) + if m == 0 { + return s + } + n := len(s) + if i == n { + return append(s, v...) + } + if n+m > cap(s) { + // Use append rather than make so that we bump the size of + // the slice up to the next storage class. + // This is what Grow does but we don't call Grow because + // that might copy the values twice. + s2 := append(s[:i], make(S, n+m-i)...) + copy(s2[i:], v) + copy(s2[i+m:], s[i:]) + return s2 + } + s = s[:n+m] + + // before: + // s: aaaaaaaabbbbccccccccdddd + // ^ ^ ^ ^ + // i i+m n n+m + // after: + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // + // a are the values that don't move in s. + // v are the values copied in from v. + // b and c are the values from s that are shifted up in index. + // d are the values that get overwritten, never to be seen again. + + if !overlaps(v, s[i+m:]) { + // Easy case - v does not overlap either the c or d regions. + // (It might be in some of a or b, or elsewhere entirely.) + // The data we copy up doesn't write to v at all, so just do it. + + copy(s[i+m:], s[i:]) + + // Now we have + // s: aaaaaaaabbbbbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // Note the b values are duplicated. + + copy(s[i:], v) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s + } + + // The hard case - v overlaps c or d. We can't just shift up + // the data because we'd move or clobber the values we're trying + // to insert. + // So instead, write v on top of d, then rotate. + copy(s[n:], v) + + // Now we have + // s: aaaaaaaabbbbccccccccvvvv + // ^ ^ ^ ^ + // i i+m n n+m + + rotateRight(s[i:], m) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s +} + +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if s[i:j] is not a valid slice of s. +// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those +// elements contain pointers you might consider zeroing those elements so that +// objects they reference can be garbage collected. +func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j] // bounds check + + return append(s[:i], s[j:]...) +} + +// DeleteFunc removes any elements from s for which del returns true, +// returning the modified slice. +// When DeleteFunc removes m elements, it might not modify the elements +// s[len(s)-m:len(s)]. If those elements contain pointers you might consider +// zeroing those elements so that objects they reference can be garbage +// collected. +func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + i := IndexFunc(s, del) + if i == -1 { + return s + } + // Don't start copying elements until we find one to delete. + for j := i + 1; j < len(s); j++ { + if v := s[j]; !del(v) { + s[i] = v + i++ + } + } + return s[:i] +} + +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + + if i == j { + return Insert(s, i, v...) + } + if j == len(s) { + return append(s[:i], v...) + } + + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot > cap(s) { + // Too big to fit, allocate and copy over. + s2 := append(s[:i], make(S, tot-i)...) // See Insert + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 + } + + r := s[:tot] + + if i+len(v) <= j { + // Easy, as v fits in the deleted portion. + copy(r[i:], v) + if i+len(v) != j { + copy(r[i+len(v):], s[j:]) + } + return r + } + + // We are expanding (v is bigger than j-i). + // The situation is something like this: + // (example has i=4,j=8,len(s)=16,len(v)=6) + // s: aaaaxxxxbbbbbbbbyy + // ^ ^ ^ ^ + // i j len(s) tot + // a: prefix of s + // x: deleted range + // b: more of s + // y: area to expand into + + if !overlaps(r[i+len(v):], v) { + // Easy, as v is not clobbered by the first copy. + copy(r[i+len(v):], s[j:]) + copy(r[i:], v) + return r + } + + // This is a situation where we don't have a single place to which + // we can copy v. Parts of it need to go to two different places. + // We want to copy the prefix of v into y and the suffix into x, then + // rotate |y| spots to the right. + // + // v[2:] v[:2] + // | | + // s: aaaavvvvbbbbbbbbvv + // ^ ^ ^ ^ + // i j len(s) tot + // + // If either of those two destinations don't alias v, then we're good. + y := len(v) - (j - i) // length of y portion + + if !overlaps(r[i:j], v) { + copy(r[i:j], v[y:]) + copy(r[len(s):], v[:y]) + rotateRight(r[i:], y) + return r + } + if !overlaps(r[len(s):], v) { + copy(r[len(s):], v[:y]) + copy(r[i:j], v[y:]) + rotateRight(r[i:], y) + return r + } + + // Now we know that v overlaps both x and y. + // That means that the entirety of b is *inside* v. + // So we don't need to preserve b at all; instead we + // can copy v first, then copy the b part of v out of + // v to the right destination. + k := startIdx(v, s[j:]) + copy(r[i:], v) + copy(r[i+len(v):], r[i+k:]) + return r +} + +// Clone returns a copy of the slice. +// The elements are copied using assignment, so this is a shallow clone. +func Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Compact replaces consecutive runs of equal elements with a single copy. +// This is like the uniq command found on Unix. +// Compact modifies the contents of the slice s and returns the modified slice, +// which may have a smaller length. +// When Compact discards m elements in total, it might not modify the elements +// s[len(s)-m:len(s)]. If those elements contain pointers you might consider +// zeroing those elements so that objects they reference can be garbage collected. +func Compact[S ~[]E, E comparable](s S) S { + if len(s) < 2 { + return s + } + i := 1 + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } + i++ + } + } + return s[:i] +} + +// CompactFunc is like [Compact] but uses an equality function to compare elements. +// For runs of elements that compare equal, CompactFunc keeps the first one. +func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { + if len(s) < 2 { + return s + } + i := 1 + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } + i++ + } + } + return s[:i] +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. If n is negative or too large to +// allocate the memory, Grow panics. +func Grow[S ~[]E, E any](s S, n int) S { + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s +} + +// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +func Clip[S ~[]E, E any](s S) S { + return s[:len(s):len(s)] +} + +// Rotation algorithm explanation: +// +// rotate left by 2 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join first parts +// 89234567 01 +// recursively rotate first left part by 2 +// 23456789 01 +// join at the end +// 2345678901 +// +// rotate left by 8 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join last parts +// 89 23456701 +// recursively rotate second part left by 6 +// 89 01234567 +// join at the end +// 8901234567 + +// TODO: There are other rotate algorithms. +// This algorithm has the desirable property that it moves each element exactly twice. +// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. +// The follow-cycles algorithm can be 1-write but it is not very cache friendly. + +// rotateLeft rotates b left by n spaces. +// s_final[i] = s_orig[i+r], wrapping around. +func rotateLeft[E any](s []E, r int) { + for r != 0 && r != len(s) { + if r*2 <= len(s) { + swap(s[:r], s[len(s)-r:]) + s = s[:len(s)-r] + } else { + swap(s[:len(s)-r], s[r:]) + s, r = s[len(s)-r:], r*2-len(s) + } + } +} +func rotateRight[E any](s []E, r int) { + rotateLeft(s, len(s)-r) +} + +// swap swaps the contents of x and y. x and y must be equal length and disjoint. +func swap[E any](x, y []E) { + for i := 0; i < len(x); i++ { + x[i], y[i] = y[i], x[i] + } +} + +// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. +func overlaps[E any](a, b []E) bool { + if len(a) == 0 || len(b) == 0 { + return false + } + elemSize := unsafe.Sizeof(a[0]) + if elemSize == 0 { + return false + } + // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. + // Also see crypto/internal/alias/alias.go:AnyOverlap + return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && + uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) +} + +// startIdx returns the index in haystack where the needle starts. +// prerequisite: the needle must be aliased entirely inside the haystack. +func startIdx[E any](haystack, needle []E) int { + p := &needle[0] + for i := range haystack { + if p == &haystack[i] { + return i + } + } + // TODO: what if the overlap is by a non-integral number of Es? + panic("needle not found") +} + +// Reverse reverses the elements of the slice in place. +func Reverse[S ~[]E, E any](s S) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go new file mode 100644 index 000000000000..b67897f76b5d --- /dev/null +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -0,0 +1,195 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp + +package slices + +import ( + "math/bits" + + "golang.org/x/exp/constraints" +) + +// Sort sorts a slice of any ordered type in ascending order. +// When sorting floating-point numbers, NaNs are ordered before other values. +func Sort[S ~[]E, E constraints.Ordered](x S) { + n := len(x) + pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +} + +// SortFunc sorts the slice x in ascending order as determined by the cmp +// function. This sort is not guaranteed to be stable. +// cmp(a, b) should return a negative number when a < b, a positive number when +// a > b and zero when a == b. +// +// SortFunc requires that cmp is a strict weak ordering. +// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + n := len(x) + pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) +} + +// SortStableFunc sorts the slice x while keeping the original order of equal +// elements, using cmp to compare elements in the same way as [SortFunc]. +func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + stableCmpFunc(x, len(x), cmp) +} + +// IsSorted reports whether x is sorted in ascending order. +func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { + for i := len(x) - 1; i > 0; i-- { + if cmpLess(x[i], x[i-1]) { + return false + } + } + return true +} + +// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the +// comparison function as defined by [SortFunc]. +func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { + for i := len(x) - 1; i > 0; i-- { + if cmp(x[i], x[i-1]) < 0 { + return false + } + } + return true +} + +// Min returns the minimal value in x. It panics if x is empty. +// For floating-point numbers, Min propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Min[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Min: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = min(m, x[i]) + } + return m +} + +// MinFunc returns the minimal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one minimal element +// according to the cmp function, MinFunc returns the first one. +func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MinFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) < 0 { + m = x[i] + } + } + return m +} + +// Max returns the maximal value in x. It panics if x is empty. +// For floating-point E, Max propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Max[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Max: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = max(m, x[i]) + } + return m +} + +// MaxFunc returns the maximal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one maximal element +// according to the cmp function, MaxFunc returns the first one. +func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MaxFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) > 0 { + m = x[i] + } + } + return m +} + +// BinarySearch searches for target in a sorted slice and returns the position +// where target is found, or the position where target would appear in the +// sort order; it also returns a bool saying whether the target is really found +// in the slice. The slice must be sorted in increasing order. +func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { + // Inlining is faster than calling BinarySearchFunc with a lambda. + n := len(x) + // Define x[-1] < target and x[n] >= target. + // Invariant: x[i-1] < target, x[j] >= target. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmpLess(x[h], target) { + i = h + 1 // preserves x[i-1] < target + } else { + j = h // preserves x[j] >= target + } + } + // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. + return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) +} + +// BinarySearchFunc works like [BinarySearch], but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { + n := len(x) + // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . + // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmp(x[h], target) < 0 { + i = h + 1 // preserves cmp(x[i - 1], target) < 0 + } else { + j = h // preserves cmp(x[j], target) >= 0 + } + } + // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. + return i, i < n && cmp(x[i], target) == 0 +} + +type sortedHint int // hint for pdqsort when choosing the pivot + +const ( + unknownHint sortedHint = iota + increasingHint + decreasingHint +) + +// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +type xorshift uint64 + +func (r *xorshift) Next() uint64 { + *r ^= *r << 13 + *r ^= *r >> 17 + *r ^= *r << 5 + return uint64(*r) +} + +func nextPowerOfTwo(length int) uint { + return 1 << bits.Len(uint(length)) +} + +// isNaN reports whether x is a NaN without requiring the math package. +// This will always return false if T is not floating-point. +func isNaN[T constraints.Ordered](x T) bool { + return x != x +} diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go new file mode 100644 index 000000000000..06f2c7a2481b --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go @@ -0,0 +1,479 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +// insertionSortCmpFunc sorts data[a:b] using insertion sort. +func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownCmpFunc implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { + child++ + } + if !(cmp(data[first+root], data[first+child]) < 0) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownCmpFunc(data, i, hi, first, cmp) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownCmpFunc(data, lo, i, first, cmp) + } +} + +// pdqsortCmpFunc sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortCmpFunc(data, a, b, cmp) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortCmpFunc(data, a, b, cmp) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsCmpFunc(data, a, b, cmp) + limit-- + } + + pivot, hint := choosePivotCmpFunc(data, a, b, cmp) + if hint == decreasingHint { + reverseRangeCmpFunc(data, a, b, cmp) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortCmpFunc(data, a, b, cmp) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { + mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) + a = mid + continue + } + + mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortCmpFunc(data, a, mid, limit, cmp) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortCmpFunc(data, mid+1, b, limit, cmp) + b = mid + } + } +} + +// partitionCmpFunc does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && (cmp(data[i], data[a]) < 0) { + i++ + } + for i <= j && !(cmp(data[j], data[a]) < 0) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && (cmp(data[i], data[a]) < 0) { + i++ + } + for i <= j && !(cmp(data[j], data[a]) < 0) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !(cmp(data[a], data[i]) < 0) { + i++ + } + for i <= j && (cmp(data[a], data[j]) < 0) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !(cmp(data[i], data[i-1]) < 0) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !(cmp(data[j], data[j-1]) < 0) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !(cmp(data[j], data[j-1]) < 0) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotCmpFunc chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentCmpFunc(data, i, &swaps, cmp) + j = medianAdjacentCmpFunc(data, j, &swaps, cmp) + k = medianAdjacentCmpFunc(data, k, &swaps, cmp) + } + // Find the median among i, j, k and stores it into j. + j = medianCmpFunc(data, i, j, k, &swaps, cmp) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { + if cmp(data[b], data[a]) < 0 { + *swaps++ + return b, a + } + return a, b +} + +// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { + a, b = order2CmpFunc(data, a, b, swaps, cmp) + b, c = order2CmpFunc(data, b, c, swaps, cmp) + a, b = order2CmpFunc(data, a, b, swaps, cmp) + return b +} + +// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { + return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) +} + +func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortCmpFunc(data, a, b, cmp) + a = b + b += blockSize + } + insertionSortCmpFunc(data, a, n, cmp) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeCmpFunc(data, a, a+blockSize, b, cmp) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeCmpFunc(data, a, m, n, cmp) + } + blockSize *= 2 + } +} + +// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if cmp(data[h], data[a]) < 0 { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !(cmp(data[m], data[h]) < 0) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !(cmp(data[p-c], data[c]) < 0) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateCmpFunc(data, start, m, end, cmp) + } + if a < start && start < mid { + symMergeCmpFunc(data, a, start, mid, cmp) + } + if mid < end && end < b { + symMergeCmpFunc(data, mid, end, b, cmp) + } +} + +// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeCmpFunc(data, m-i, m, j, cmp) + i -= j + } else { + swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) + j -= i + } + } + // i == j + swapRangeCmpFunc(data, m-i, m, i, cmp) +} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go new file mode 100644 index 000000000000..99b47c3986a4 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -0,0 +1,481 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// insertionSortOrdered sorts data[a:b] using insertion sort. +func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownOrdered implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { + child++ + } + if !cmpLess(data[first+root], data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownOrdered(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownOrdered(data, lo, i, first) + } +} + +// pdqsortOrdered sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortOrdered(data, a, b) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortOrdered(data, a, b) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsOrdered(data, a, b) + limit-- + } + + pivot, hint := choosePivotOrdered(data, a, b) + if hint == decreasingHint { + reverseRangeOrdered(data, a, b) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortOrdered(data, a, b) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !cmpLess(data[a-1], data[pivot]) { + mid := partitionEqualOrdered(data, a, b, pivot) + a = mid + continue + } + + mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortOrdered(data, a, mid, limit) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortOrdered(data, mid+1, b, limit) + b = mid + } + } +} + +// partitionOrdered does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && cmpLess(data[i], data[a]) { + i++ + } + for i <= j && !cmpLess(data[j], data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && cmpLess(data[i], data[a]) { + i++ + } + for i <= j && !cmpLess(data[j], data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !cmpLess(data[a], data[i]) { + i++ + } + for i <= j && cmpLess(data[a], data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !cmpLess(data[i], data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !cmpLess(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !cmpLess(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsOrdered scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotOrdered chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentOrdered(data, i, &swaps) + j = medianAdjacentOrdered(data, j, &swaps) + k = medianAdjacentOrdered(data, k, &swaps) + } + // Find the median among i, j, k and stores it into j. + j = medianOrdered(data, i, j, k, &swaps) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { + if cmpLess(data[b], data[a]) { + *swaps++ + return b, a + } + return a, b +} + +// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { + a, b = order2Ordered(data, a, b, swaps) + b, c = order2Ordered(data, b, c, swaps) + a, b = order2Ordered(data, a, b, swaps) + return b +} + +// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { + return medianOrdered(data, a-1, a, a+1, swaps) +} + +func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableOrdered[E constraints.Ordered](data []E, n int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortOrdered(data, a, b) + a = b + b += blockSize + } + insertionSortOrdered(data, a, n) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeOrdered(data, a, a+blockSize, b) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeOrdered(data, a, m, n) + } + blockSize *= 2 + } +} + +// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if cmpLess(data[h], data[a]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !cmpLess(data[m], data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !cmpLess(data[p-c], data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateOrdered(data, start, m, end) + } + if a < start && start < mid { + symMergeOrdered(data, a, start, mid) + } + if mid < end && end < b { + symMergeOrdered(data, mid, end, b) + } +} + +// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeOrdered(data, m-i, m, j) + i -= j + } else { + swapRangeOrdered(data, m-i, m+j-i, i) + j -= i + } + } + // i == j + swapRangeOrdered(data, m-i, m, i) +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index a7a8f73e3d11..b2a0b7c6a678 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -5,12 +5,32 @@ /* Package packages loads Go packages for inspection and analysis. -The Load function takes as input a list of patterns and return a list of Package -structs describing individual packages matched by those patterns. -The LoadMode controls the amount of detail in the loaded packages. - -Load passes most patterns directly to the underlying build tool, -but all patterns with the prefix "query=", where query is a +The [Load] function takes as input a list of patterns and returns a +list of [Package] values describing individual packages matched by those +patterns. +A [Config] specifies configuration options, the most important of which is +the [LoadMode], which controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool. +The default build tool is the go command. +Its supported patterns are described at +https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. + +Load may be used in Go projects that use alternative build systems, by +installing an appropriate "driver" program for the build system and +specifying its location in the GOPACKAGESDRIVER environment variable. +For example, +https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration +explains how to use the driver for Bazel. +The driver program is responsible for interpreting patterns in its +preferred notation and reporting information about the packages that +they identify. +(See driverRequest and driverResponse types for the JSON +schema used by the protocol. +Though the protocol is supported, these types are currently unexported; +see #64608 for a proposal to publish them.) + +Regardless of driver, all patterns with the prefix "query=", where query is a non-empty string of letters from [a-z], are reserved and may be interpreted as query operators. @@ -64,7 +84,7 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to the loader, so that the loader can interpret them +uninterpreted to [Load], so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. */ diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index bd79efc1aaf0..81e9e6a727da 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -27,7 +27,6 @@ import ( "golang.org/x/tools/go/gcexportdata" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" - "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/versions" ) @@ -1015,10 +1014,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { Defs: make(map[*ast.Ident]types.Object), Uses: make(map[*ast.Ident]types.Object), Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), Scopes: make(map[ast.Node]*types.Scope), Selections: make(map[*ast.SelectorExpr]*types.Selection), } - typeparams.InitInstanceInfo(lpkg.TypesInfo) versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index e742ecc46440..11d5c8c3adf1 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -223,7 +223,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := obj.Type().(*typeparams.TypeParam); !ok { + if _, ok := obj.Type().(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -283,7 +283,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } } else { if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil { // generic named type return Path(r), nil } @@ -462,7 +462,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] } return find(obj, T.Elem(), append(path, opElem), seen) case *types.Signature: - if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil { + if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil { return r } if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { @@ -505,7 +505,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] } } return nil - case *typeparams.TypeParam: + case *types.TypeParam: name := T.Obj() if name == obj { return append(path, opObj) @@ -525,7 +525,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] panic(T) } -func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) path2 := appendOpArg(path, opTypeParam, i) @@ -562,7 +562,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } // abstraction of *types.{Named,Signature} type hasTypeParams interface { - TypeParams() *typeparams.TypeParamList + TypeParams() *types.TypeParamList } // abstraction of *types.{Named,TypeParam} type hasObj interface { @@ -664,7 +664,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { t = tparams.At(index) case opConstraint: - tparam, ok := t.(*typeparams.TypeParam) + tparam, ok := t.(*types.TypeParam) if !ok { return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) } diff --git a/vendor/golang.org/x/tools/internal/event/keys/util.go b/vendor/golang.org/x/tools/internal/event/keys/util.go new file mode 100644 index 000000000000..c0e8e731c900 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/util.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "sort" + "strings" +) + +// Join returns a canonical join of the keys in S: +// a sorted comma-separated string list. +func Join[S ~[]T, T ~string](s S) string { + strs := make([]string, 0, len(s)) + for _, v := range s { + strs = append(strs, string(v)) + } + sort.Strings(strs) + return strings.Join(strs, ",") +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 6103dd7102b3..2ee8c70164f8 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -24,7 +24,6 @@ import ( "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/tokeninternal" - "golang.org/x/tools/internal/typeparams" ) // IExportShallow encodes "shallow" export data for the specified package. @@ -481,7 +480,7 @@ func (p *iexporter) doDecl(obj types.Object) { } // Function. - if typeparams.ForSignature(sig).Len() == 0 { + if sig.TypeParams().Len() == 0 { w.tag('F') } else { w.tag('G') @@ -494,7 +493,7 @@ func (p *iexporter) doDecl(obj types.Object) { // // While importing the type parameters, tparamList computes and records // their export name, so that it can be later used when writing the index. - if tparams := typeparams.ForSignature(sig); tparams.Len() > 0 { + if tparams := sig.TypeParams(); tparams.Len() > 0 { w.tparamList(obj.Name(), tparams, obj.Pkg()) } w.signature(sig) @@ -507,14 +506,14 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.TypeName: t := obj.Type() - if tparam, ok := t.(*typeparams.TypeParam); ok { + if tparam, ok := t.(*types.TypeParam); ok { w.tag('P') w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false if iface, _ := constraint.(*types.Interface); iface != nil { - implicit = typeparams.IsImplicit(iface) + implicit = iface.IsImplicit() } w.bool(implicit) } @@ -535,17 +534,17 @@ func (p *iexporter) doDecl(obj types.Object) { panic(internalErrorf("%s is not a defined type", t)) } - if typeparams.ForNamed(named).Len() == 0 { + if named.TypeParams().Len() == 0 { w.tag('T') } else { w.tag('U') } w.pos(obj.Pos()) - if typeparams.ForNamed(named).Len() > 0 { + if named.TypeParams().Len() > 0 { // While importing the type parameters, tparamList computes and records // their export name, so that it can be later used when writing the index. - w.tparamList(obj.Name(), typeparams.ForNamed(named), obj.Pkg()) + w.tparamList(obj.Name(), named.TypeParams(), obj.Pkg()) } underlying := obj.Type().Underlying() @@ -565,7 +564,7 @@ func (p *iexporter) doDecl(obj types.Object) { // Receiver type parameters are type arguments of the receiver type, so // their name must be qualified before exporting recv. - if rparams := typeparams.RecvTypeParams(sig); rparams.Len() > 0 { + if rparams := sig.RecvTypeParams(); rparams.Len() > 0 { prefix := obj.Name() + "." + m.Name() for i := 0; i < rparams.Len(); i++ { rparam := rparams.At(i) @@ -740,19 +739,19 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { } switch t := t.(type) { case *types.Named: - if targs := typeparams.NamedTypeArgs(t); targs.Len() > 0 { + if targs := t.TypeArgs(); targs.Len() > 0 { w.startType(instanceType) // TODO(rfindley): investigate if this position is correct, and if it // matters. w.pos(t.Obj().Pos()) w.typeList(targs, pkg) - w.typ(typeparams.NamedTypeOrigin(t), pkg) + w.typ(t.Origin(), pkg) return } w.startType(definedType) w.qualifiedType(t.Obj()) - case *typeparams.TypeParam: + case *types.TypeParam: w.startType(typeParamType) w.qualifiedType(t.Obj()) @@ -868,7 +867,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { w.signature(sig) } - case *typeparams.Union: + case *types.Union: w.startType(unionType) nt := t.Len() w.uint64(uint64(nt)) @@ -948,14 +947,14 @@ func (w *exportWriter) signature(sig *types.Signature) { } } -func (w *exportWriter) typeList(ts *typeparams.TypeList, pkg *types.Package) { +func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) { w.uint64(uint64(ts.Len())) for i := 0; i < ts.Len(); i++ { w.typ(ts.At(i), pkg) } } -func (w *exportWriter) tparamList(prefix string, list *typeparams.TypeParamList, pkg *types.Package) { +func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) { ll := uint64(list.Len()) w.uint64(ll) for i := 0; i < list.Len(); i++ { @@ -973,7 +972,7 @@ const blankMarker = "$" // differs from its actual object name: it is prefixed with a qualifier, and // blank type parameter names are disambiguated by their index in the type // parameter list. -func tparamExportName(prefix string, tparam *typeparams.TypeParam) string { +func tparamExportName(prefix string, tparam *types.TypeParam) string { assert(prefix != "") name := tparam.Obj().Name() if name == "_" { diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 8e64cf644fce..9bde15e3bc66 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -22,7 +22,6 @@ import ( "strings" "golang.org/x/tools/go/types/objectpath" - "golang.org/x/tools/internal/typeparams" ) type intReader struct { @@ -321,7 +320,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte // Therefore, we defer calling SetConstraint there, and call it here instead // after all types are complete. for _, d := range p.later { - typeparams.SetTypeParamConstraint(d.t, d.constraint) + d.t.SetConstraint(d.constraint) } for _, typ := range p.interfaceList { @@ -339,7 +338,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte } type setConstraintArgs struct { - t *typeparams.TypeParam + t *types.TypeParam constraint types.Type } @@ -549,7 +548,7 @@ func (r *importReader) obj(name string) { r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) case 'F', 'G': - var tparams []*typeparams.TypeParam + var tparams []*types.TypeParam if tag == 'G' { tparams = r.tparamList() } @@ -566,7 +565,7 @@ func (r *importReader) obj(name string) { r.declare(obj) if tag == 'U' { tparams := r.tparamList() - typeparams.SetForNamed(named, tparams) + named.SetTypeParams(tparams) } underlying := r.p.typAt(r.uint64(), named).Underlying() @@ -583,12 +582,12 @@ func (r *importReader) obj(name string) { // typeparams being used in the method sig/body). base := baseType(recv.Type()) assert(base != nil) - targs := typeparams.NamedTypeArgs(base) - var rparams []*typeparams.TypeParam + targs := base.TypeArgs() + var rparams []*types.TypeParam if targs.Len() > 0 { - rparams = make([]*typeparams.TypeParam, targs.Len()) + rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = targs.At(i).(*typeparams.TypeParam) + rparams[i] = targs.At(i).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -606,7 +605,7 @@ func (r *importReader) obj(name string) { } name0 := tparamName(name) tn := types.NewTypeName(pos, r.currPkg, name0, nil) - t := typeparams.NewTypeParam(tn, nil) + t := types.NewTypeParam(tn, nil) // To handle recursive references to the typeparam within its // bound, save the partial type in tparamIndex before reading the bounds. @@ -622,7 +621,7 @@ func (r *importReader) obj(name string) { if iface == nil { errorf("non-interface constraint marked implicit") } - typeparams.MarkImplicit(iface) + iface.MarkImplicit() } // The constraint type may not be complete, if we // are in the middle of a type recursion involving type @@ -966,7 +965,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { // The imported instantiated type doesn't include any methods, so // we must always use the methods of the base (orig) type. // TODO provide a non-nil *Environment - t, _ := typeparams.Instantiate(nil, baseType, targs, false) + t, _ := types.Instantiate(nil, baseType, targs, false) // Workaround for golang/go#61561. See the doc for instanceList for details. r.p.instanceList = append(r.p.instanceList, t) @@ -976,11 +975,11 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { if r.p.version < iexportVersionGenerics { errorf("unexpected instantiation type") } - terms := make([]*typeparams.Term, r.uint64()) + terms := make([]*types.Term, r.uint64()) for i := range terms { - terms[i] = typeparams.NewTerm(r.bool(), r.typ()) + terms[i] = types.NewTerm(r.bool(), r.typ()) } - return typeparams.NewUnion(terms) + return types.NewUnion(terms) } } @@ -1008,23 +1007,23 @@ func (r *importReader) objectPathObject() types.Object { return obj } -func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature { +func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { params := r.paramList() results := r.paramList() variadic := params.Len() > 0 && r.bool() - return typeparams.NewSignatureType(recv, rparams, tparams, params, results, variadic) + return types.NewSignatureType(recv, rparams, tparams, params, results, variadic) } -func (r *importReader) tparamList() []*typeparams.TypeParam { +func (r *importReader) tparamList() []*types.TypeParam { n := r.uint64() if n == 0 { return nil } - xs := make([]*typeparams.TypeParam, n) + xs := make([]*types.TypeParam, n) for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = r.typ().(*typeparams.TypeParam) + xs[i] = r.typ().(*types.TypeParam) } return xs } diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index d0d0649fe2ac..cdab9885314f 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -42,7 +42,7 @@ func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Ex switch e := n.(type) { case *ast.IndexExpr: return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack - case *IndexListExpr: + case *ast.IndexListExpr: return e.X, e.Lbrack, e.Indices, e.Rbrack } return nil, token.NoPos, nil, token.NoPos @@ -63,7 +63,7 @@ func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack toke Rbrack: rbrack, } default: - return &IndexListExpr{ + return &ast.IndexListExpr{ X: x, Lbrack: lbrack, Indices: indices, @@ -74,7 +74,7 @@ func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack toke // IsTypeParam reports whether t is a type parameter. func IsTypeParam(t types.Type) bool { - _, ok := t.(*TypeParam) + _, ok := t.(*types.TypeParam) return ok } @@ -100,11 +100,11 @@ func OriginMethod(fn *types.Func) *types.Func { // Receiver is a *types.Interface. return fn } - if ForNamed(named).Len() == 0 { + if named.TypeParams().Len() == 0 { // Receiver base has no type parameters, so we can avoid the lookup below. return fn } - orig := NamedTypeOrigin(named) + orig := named.Origin() gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) // This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In: @@ -157,7 +157,7 @@ func OriginMethod(fn *types.Func) *types.Func { // // In this case, GenericAssignableTo reports that instantiations of Container // are assignable to the corresponding instantiation of Interface. -func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { +func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { // If V and T are not both named, or do not have matching non-empty type // parameter lists, fall back on types.AssignableTo. @@ -167,9 +167,9 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { return types.AssignableTo(V, T) } - vtparams := ForNamed(VN) - ttparams := ForNamed(TN) - if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 { + vtparams := VN.TypeParams() + ttparams := TN.TypeParams() + if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 { return types.AssignableTo(V, T) } @@ -182,7 +182,7 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { // Minor optimization: ensure we share a context across the two // instantiations below. if ctxt == nil { - ctxt = NewContext() + ctxt = types.NewContext() } var targs []types.Type @@ -190,12 +190,12 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { targs = append(targs, vtparams.At(i)) } - vinst, err := Instantiate(ctxt, V, targs, true) + vinst, err := types.Instantiate(ctxt, V, targs, true) if err != nil { panic("type parameters should satisfy their own constraints") } - tinst, err := Instantiate(ctxt, T, targs, true) + tinst, err := types.Instantiate(ctxt, T, targs, true) if err != nil { return false } diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go index 71248209ee5e..7ea8840eab7c 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -108,15 +108,15 @@ func CoreType(T types.Type) types.Type { // // _NormalTerms makes no guarantees about the order of terms, except that it // is deterministic. -func _NormalTerms(typ types.Type) ([]*Term, error) { +func _NormalTerms(typ types.Type) ([]*types.Term, error) { switch typ := typ.(type) { - case *TypeParam: + case *types.TypeParam: return StructuralTerms(typ) - case *Union: + case *types.Union: return UnionTermSet(typ) case *types.Interface: return InterfaceTermSet(typ) default: - return []*Term{NewTerm(false, typ)}, nil + return []*types.Term{types.NewTerm(false, typ)}, nil } } diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go deleted file mode 100644 index 18212390e192..000000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = false diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go deleted file mode 100644 index d67148823c4d..000000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -// Note: this constant is in a separate file as this is the only acceptable -// diff between the <1.18 API of this package and the 1.18 API. - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = true diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go index 9c631b6512de..93c80fdc96ce 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -60,7 +60,7 @@ var ErrEmptyTypeSet = errors.New("empty type set") // // StructuralTerms makes no guarantees about the order of terms, except that it // is deterministic. -func StructuralTerms(tparam *TypeParam) ([]*Term, error) { +func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) { constraint := tparam.Constraint() if constraint == nil { return nil, fmt.Errorf("%s has nil constraint", tparam) @@ -78,7 +78,7 @@ func StructuralTerms(tparam *TypeParam) ([]*Term, error) { // // See the documentation of StructuralTerms for more information on // normalization. -func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { +func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) { return computeTermSet(iface) } @@ -88,11 +88,11 @@ func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { // // See the documentation of StructuralTerms for more information on // normalization. -func UnionTermSet(union *Union) ([]*Term, error) { +func UnionTermSet(union *types.Union) ([]*types.Term, error) { return computeTermSet(union) } -func computeTermSet(typ types.Type) ([]*Term, error) { +func computeTermSet(typ types.Type) ([]*types.Term, error) { tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) if err != nil { return nil, err @@ -103,9 +103,9 @@ func computeTermSet(typ types.Type) ([]*Term, error) { if tset.terms.isAll() { return nil, nil } - var terms []*Term + var terms []*types.Term for _, term := range tset.terms { - terms = append(terms, NewTerm(term.tilde, term.typ)) + terms = append(terms, types.NewTerm(term.tilde, term.typ)) } return terms, nil } @@ -162,7 +162,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in tset.terms = allTermlist for i := 0; i < u.NumEmbeddeds(); i++ { embedded := u.EmbeddedType(i) - if _, ok := embedded.Underlying().(*TypeParam); ok { + if _, ok := embedded.Underlying().(*types.TypeParam); ok { return nil, fmt.Errorf("invalid embedded type %T", embedded) } tset2, err := computeTermSetInternal(embedded, seen, depth+1) @@ -171,7 +171,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in } tset.terms = tset.terms.intersect(tset2.terms) } - case *Union: + case *types.Union: // The term set of a union is the union of term sets of its terms. tset.terms = nil for i := 0; i < u.Len(); i++ { @@ -184,7 +184,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in return nil, err } terms = tset2.terms - case *TypeParam, *Union: + case *types.TypeParam, *types.Union: // A stand-alone type parameter or union is not permitted as union // term. return nil, fmt.Errorf("invalid union term %T", t) @@ -199,7 +199,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) } } - case *TypeParam: + case *types.TypeParam: panic("unreachable") default: // For all other types, the term set is just a single non-tilde term diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go deleted file mode 100644 index 7ed86e1711b9..000000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -import ( - "go/ast" - "go/token" - "go/types" -) - -func unsupported() { - panic("type parameters are unsupported at this go version") -} - -// IndexListExpr is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type IndexListExpr struct { - ast.Expr - X ast.Expr // expression - Lbrack token.Pos // position of "[" - Indices []ast.Expr // index expressions - Rbrack token.Pos // position of "]" -} - -// ForTypeSpec returns an empty field list, as type parameters on not supported -// at this Go version. -func ForTypeSpec(*ast.TypeSpec) *ast.FieldList { - return nil -} - -// ForFuncType returns an empty field list, as type parameters are not -// supported at this Go version. -func ForFuncType(*ast.FuncType) *ast.FieldList { - return nil -} - -// TypeParam is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type TypeParam struct{ types.Type } - -func (*TypeParam) Index() int { unsupported(); return 0 } -func (*TypeParam) Constraint() types.Type { unsupported(); return nil } -func (*TypeParam) Obj() *types.TypeName { unsupported(); return nil } - -// TypeParamList is a placeholder for an empty type parameter list. -type TypeParamList struct{} - -func (*TypeParamList) Len() int { return 0 } -func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil } - -// TypeList is a placeholder for an empty type list. -type TypeList struct{} - -func (*TypeList) Len() int { return 0 } -func (*TypeList) At(int) types.Type { unsupported(); return nil } - -// NewTypeParam is unsupported at this Go version, and panics. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - unsupported() - return nil -} - -// SetTypeParamConstraint is unsupported at this Go version, and panics. -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - unsupported() -} - -// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or -// typeParams is non-empty. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - if len(recvTypeParams) != 0 || len(typeParams) != 0 { - panic("signatures cannot have type parameters at this Go version") - } - return types.NewSignature(recv, params, results, variadic) -} - -// ForSignature returns an empty slice. -func ForSignature(*types.Signature) *TypeParamList { - return nil -} - -// RecvTypeParams returns a nil slice. -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return nil -} - -// IsComparable returns false, as no interfaces are type-restricted at this Go -// version. -func IsComparable(*types.Interface) bool { - return false -} - -// IsMethodSet returns true, as no interfaces are type-restricted at this Go -// version. -func IsMethodSet(*types.Interface) bool { - return true -} - -// IsImplicit returns false, as no interfaces are implicit at this Go version. -func IsImplicit(*types.Interface) bool { - return false -} - -// MarkImplicit does nothing, because this Go version does not have implicit -// interfaces. -func MarkImplicit(*types.Interface) {} - -// ForNamed returns an empty type parameter list, as type parameters are not -// supported at this Go version. -func ForNamed(*types.Named) *TypeParamList { - return nil -} - -// SetForNamed panics if tparams is non-empty. -func SetForNamed(_ *types.Named, tparams []*TypeParam) { - if len(tparams) > 0 { - unsupported() - } -} - -// NamedTypeArgs returns nil. -func NamedTypeArgs(*types.Named) *TypeList { - return nil -} - -// NamedTypeOrigin is the identity method at this Go version. -func NamedTypeOrigin(named *types.Named) *types.Named { - return named -} - -// Term holds information about a structural type restriction. -type Term struct { - tilde bool - typ types.Type -} - -func (m *Term) Tilde() bool { return m.tilde } -func (m *Term) Type() types.Type { return m.typ } -func (m *Term) String() string { - pre := "" - if m.tilde { - pre = "~" - } - return pre + m.typ.String() -} - -// NewTerm is unsupported at this Go version, and panics. -func NewTerm(tilde bool, typ types.Type) *Term { - return &Term{tilde, typ} -} - -// Union is a placeholder type, as type parameters are not supported at this Go -// version. Its methods panic on use. -type Union struct{ types.Type } - -func (*Union) Len() int { return 0 } -func (*Union) Term(i int) *Term { unsupported(); return nil } - -// NewUnion is unsupported at this Go version, and panics. -func NewUnion(terms []*Term) *Union { - unsupported() - return nil -} - -// InitInstanceInfo is a noop at this Go version. -func InitInstanceInfo(*types.Info) {} - -// Instance is a placeholder type, as type parameters are not supported at this -// Go version. -type Instance struct { - TypeArgs *TypeList - Type types.Type -} - -// GetInstances returns a nil map, as type parameters are not supported at this -// Go version. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil } - -// Context is a placeholder type, as type parameters are not supported at -// this Go version. -type Context struct{} - -// NewContext returns a placeholder Context instance. -func NewContext() *Context { - return &Context{} -} - -// Instantiate is unsupported on this Go version, and panics. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - unsupported() - return nil, nil -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go deleted file mode 100644 index cf301af1dbe8..000000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -import ( - "go/ast" - "go/types" -) - -// IndexListExpr is an alias for ast.IndexListExpr. -type IndexListExpr = ast.IndexListExpr - -// ForTypeSpec returns n.TypeParams. -func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// ForFuncType returns n.TypeParams. -func ForFuncType(n *ast.FuncType) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// TypeParam is an alias for types.TypeParam -type TypeParam = types.TypeParam - -// TypeParamList is an alias for types.TypeParamList -type TypeParamList = types.TypeParamList - -// TypeList is an alias for types.TypeList -type TypeList = types.TypeList - -// NewTypeParam calls types.NewTypeParam. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - return types.NewTypeParam(name, constraint) -} - -// SetTypeParamConstraint calls tparam.SetConstraint(constraint). -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - tparam.SetConstraint(constraint) -} - -// NewSignatureType calls types.NewSignatureType. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic) -} - -// ForSignature returns sig.TypeParams() -func ForSignature(sig *types.Signature) *TypeParamList { - return sig.TypeParams() -} - -// RecvTypeParams returns sig.RecvTypeParams(). -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return sig.RecvTypeParams() -} - -// IsComparable calls iface.IsComparable(). -func IsComparable(iface *types.Interface) bool { - return iface.IsComparable() -} - -// IsMethodSet calls iface.IsMethodSet(). -func IsMethodSet(iface *types.Interface) bool { - return iface.IsMethodSet() -} - -// IsImplicit calls iface.IsImplicit(). -func IsImplicit(iface *types.Interface) bool { - return iface.IsImplicit() -} - -// MarkImplicit calls iface.MarkImplicit(). -func MarkImplicit(iface *types.Interface) { - iface.MarkImplicit() -} - -// ForNamed extracts the (possibly empty) type parameter object list from -// named. -func ForNamed(named *types.Named) *TypeParamList { - return named.TypeParams() -} - -// SetForNamed sets the type params tparams on n. Each tparam must be of -// dynamic type *types.TypeParam. -func SetForNamed(n *types.Named, tparams []*TypeParam) { - n.SetTypeParams(tparams) -} - -// NamedTypeArgs returns named.TypeArgs(). -func NamedTypeArgs(named *types.Named) *TypeList { - return named.TypeArgs() -} - -// NamedTypeOrigin returns named.Orig(). -func NamedTypeOrigin(named *types.Named) *types.Named { - return named.Origin() -} - -// Term is an alias for types.Term. -type Term = types.Term - -// NewTerm calls types.NewTerm. -func NewTerm(tilde bool, typ types.Type) *Term { - return types.NewTerm(tilde, typ) -} - -// Union is an alias for types.Union -type Union = types.Union - -// NewUnion calls types.NewUnion. -func NewUnion(terms []*Term) *Union { - return types.NewUnion(terms) -} - -// InitInstanceInfo initializes info to record information about type and -// function instances. -func InitInstanceInfo(info *types.Info) { - info.Instances = make(map[*ast.Ident]types.Instance) -} - -// Instance is an alias for types.Instance. -type Instance = types.Instance - -// GetInstances returns info.Instances. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { - return info.Instances -} - -// Context is an alias for types.Context. -type Context = types.Context - -// NewContext calls types.NewContext. -func NewContext() *Context { - return types.NewContext() -} - -// Instantiate calls types.Instantiate. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - return types.Instantiate(ctxt, typ, targs, validate) -} diff --git a/vendor/golang.org/x/tools/internal/versions/versions_go121.go b/vendor/golang.org/x/tools/internal/versions/versions.go similarity index 80% rename from vendor/golang.org/x/tools/internal/versions/versions_go121.go rename to vendor/golang.org/x/tools/internal/versions/versions.go index cf4a7d0360f1..e16f6c33a523 100644 --- a/vendor/golang.org/x/tools/internal/versions/versions_go121.go +++ b/vendor/golang.org/x/tools/internal/versions/versions.go @@ -2,11 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !go1.22 -// +build !go1.22 - package versions +// Note: If we use build tags to use go/versions when go >=1.22, +// we run into go.dev/issue/53737. Under some operations users would see an +// import of "go/versions" even if they would not compile the file. +// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include +// For this reason, this library just a clone of go/versions for the moment. + // Lang returns the Go language version for version x. // If x is not a valid version, Lang returns the empty string. // For example: diff --git a/vendor/golang.org/x/tools/internal/versions/versions_go122.go b/vendor/golang.org/x/tools/internal/versions/versions_go122.go deleted file mode 100644 index c1c1814b28dd..000000000000 --- a/vendor/golang.org/x/tools/internal/versions/versions_go122.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.22 -// +build go1.22 - -package versions - -import ( - "go/version" -) - -// Lang returns the Go language version for version x. -// If x is not a valid version, Lang returns the empty string. -// For example: -// -// Lang("go1.21rc2") = "go1.21" -// Lang("go1.21.2") = "go1.21" -// Lang("go1.21") = "go1.21" -// Lang("go1") = "go1" -// Lang("bad") = "" -// Lang("1.21") = "" -func Lang(x string) string { return version.Lang(x) } - -// Compare returns -1, 0, or +1 depending on whether -// x < y, x == y, or x > y, interpreted as Go versions. -// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". -// Invalid versions, including the empty string, compare less than -// valid versions and equal to each other. -// The language version "go1.21" compares less than the -// release candidate and eventual releases "go1.21rc1" and "go1.21.0". -// Custom toolchain suffixes are ignored during comparison: -// "go1.21.0" and "go1.21.0-bigcorp" are equal. -func Compare(x, y string) int { return version.Compare(x, y) } - -// IsValid reports whether the version x is valid. -func IsValid(x string) bool { return version.IsValid(x) } diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE new file mode 100644 index 000000000000..2683e4bb1f24 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE new file mode 100644 index 000000000000..866d74a7ad79 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md new file mode 100644 index 000000000000..08eb1babddfa --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/README.md @@ -0,0 +1,150 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.2, but preserves some behavior +from 1.1 for backwards compatibility. + +Specifically, as of v3 of the yaml package: + + - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being + decoded into a typed bool value. Otherwise they behave as a string. Booleans + in YAML 1.2 are _true/false_ only. + - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ + as specified in YAML 1.2, because most parsers still use the old format. + Octals in the _0o777_ format are supported though, so new files work. + - Does not support base-60 floats. These are gone from YAML 1.2, and were + actually never supported by this package as it's clearly a poor choice. + +and offers backwards +compatibility with YAML 1.1 in some cases. +1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v3*. + +To install it, run: + + go get gopkg.in/yaml.v3 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) + +API stability +------------- + +The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go new file mode 100644 index 000000000000..ae7d049f182a --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/apic.go @@ -0,0 +1,747 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go new file mode 100644 index 000000000000..0173b6982e84 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -0,0 +1,1000 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool + textless bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int + + mergedFields map[interface{}]bool +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough + default: + failf("cannot decode node with unknown kind %d", n.Kind) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return d.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + mapIsNew = true + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if mergedFields[ki] { + continue + } + mergedFields[ki] = true + } + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) + } + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + mergeNode = n.Content[i+1] + continue + } + if !d.unmarshal(ni, name) { + continue + } + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.mergedFields[k.Interface()] = true + } + } + } + + switch merge.Kind { + case MappingNode: + d.unmarshal(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } + + d.mergedFields = mergedFields +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go new file mode 100644 index 000000000000..0f47c9ca8add --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/emitterc.go @@ -0,0 +1,2020 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) + } + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical || true { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + if len(emitter.line_comment) == 0 { + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + tab_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if len(value) > 0 && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + //emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + + //emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go new file mode 100644 index 000000000000..de9e72a3e638 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/encode.go @@ -0,0 +1,577 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshalled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) + } +} diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go new file mode 100644 index 000000000000..268558a0d632 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -0,0 +1,1258 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token + } + return nil +} + +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + + head_comment: head_comment, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } + return true +} + +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go new file mode 100644 index 000000000000..b7de0a89c462 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/readerc.go @@ -0,0 +1,434 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go new file mode 100644 index 000000000000..64ae888057a5 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/resolve.go @@ -0,0 +1,326 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go new file mode 100644 index 000000000000..ca0070108f4e --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/scannerc.go @@ -0,0 +1,3038 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.newlines++ + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: block_mark, + end_mark: block_mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_comments(parser, scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = read(parser, text) + } else { + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line-parser.newlines+1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { + // Got line break or terminator. + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. + if len(text) > 0 { + if start_mark.column-1 < next_indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) + } else { + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go new file mode 100644 index 000000000000..9210ece7e972 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go new file mode 100644 index 000000000000..b8a116bf9a22 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go new file mode 100644 index 000000000000..8cec6da48d3e --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yaml.go @@ -0,0 +1,698 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +// +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go new file mode 100644 index 000000000000..7c6d00770619 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yamlh.go @@ -0,0 +1,807 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 + + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +type yaml_comment_t struct { + + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + key_line_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go new file mode 100644 index 000000000000..e88f9c54aecb --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go @@ -0,0 +1,198 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( + // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 60a28b389e53..7569a2cd32a2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,6 +1,3 @@ -# dario.cat/mergo v1.0.0 -## explicit; go 1.13 -dario.cat/mergo # github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 ## explicit; go 1.16 github.com/Azure/go-ansiterm @@ -21,6 +18,24 @@ github.com/beorn7/perks/quantile # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 +# github.com/compose-spec/compose-go/v2 v2.0.0-rc.7 +## explicit; go 1.21 +github.com/compose-spec/compose-go/v2/consts +github.com/compose-spec/compose-go/v2/dotenv +github.com/compose-spec/compose-go/v2/errdefs +github.com/compose-spec/compose-go/v2/format +github.com/compose-spec/compose-go/v2/graph +github.com/compose-spec/compose-go/v2/interpolation +github.com/compose-spec/compose-go/v2/loader +github.com/compose-spec/compose-go/v2/override +github.com/compose-spec/compose-go/v2/paths +github.com/compose-spec/compose-go/v2/schema +github.com/compose-spec/compose-go/v2/template +github.com/compose-spec/compose-go/v2/transform +github.com/compose-spec/compose-go/v2/tree +github.com/compose-spec/compose-go/v2/types +github.com/compose-spec/compose-go/v2/utils +github.com/compose-spec/compose-go/v2/validation # github.com/containerd/containerd v1.7.12 ## explicit; go 1.19 github.com/containerd/containerd/errdefs @@ -171,15 +186,24 @@ github.com/klauspost/compress/zstd/internal/xxhash # github.com/mattn/go-runewidth v0.0.15 ## explicit; go 1.9 github.com/mattn/go-runewidth +# github.com/mattn/go-shellwords v1.0.12 +## explicit; go 1.13 +github.com/mattn/go-shellwords # github.com/matttproud/golang_protobuf_extensions v1.0.4 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/miekg/pkcs11 v1.1.1 ## explicit; go 1.12 github.com/miekg/pkcs11 +# github.com/mitchellh/copystructure v1.2.0 +## explicit; go 1.15 +github.com/mitchellh/copystructure # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure +# github.com/mitchellh/reflectwalk v1.0.2 +## explicit +github.com/mitchellh/reflectwalk # github.com/moby/patternmatcher v0.6.0 ## explicit; go 1.19 github.com/moby/patternmatcher @@ -305,14 +329,19 @@ go.opentelemetry.io/otel/metric/embedded # go.opentelemetry.io/otel/trace v1.19.0 ## explicit; go 1.20 go.opentelemetry.io/otel/trace -# golang.org/x/crypto v0.17.0 +# golang.org/x/crypto v0.18.0 ## explicit; go 1.18 golang.org/x/crypto/ed25519 golang.org/x/crypto/pbkdf2 +# golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 +## explicit; go 1.20 +golang.org/x/exp/constraints +golang.org/x/exp/maps +golang.org/x/exp/slices # golang.org/x/mod v0.14.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.19.0 +# golang.org/x/net v0.20.0 ## explicit; go 1.18 golang.org/x/net/http/httpguts golang.org/x/net/http2 @@ -328,7 +357,7 @@ golang.org/x/sync/errgroup golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.15.0 +# golang.org/x/term v0.16.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 @@ -341,7 +370,7 @@ golang.org/x/text/width # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# golang.org/x/tools v0.16.0 +# golang.org/x/tools v0.17.0 ## explicit; go 1.18 golang.org/x/tools/cmd/stringer golang.org/x/tools/go/gcexportdata @@ -452,6 +481,9 @@ google.golang.org/protobuf/types/known/timestamppb # gopkg.in/yaml.v2 v2.4.0 ## explicit; go 1.15 gopkg.in/yaml.v2 +# gopkg.in/yaml.v3 v3.0.1 +## explicit +gopkg.in/yaml.v3 # gotest.tools/v3 v3.5.1 ## explicit; go 1.17 gotest.tools/v3/assert