From 785f737095b4ecbb1a48e0e1d53da0167970c324 Mon Sep 17 00:00:00 2001 From: lgtm <1gtm@users.noreply.github.com> Date: Mon, 29 May 2023 10:35:54 -0700 Subject: [PATCH] Prepare for release v0.12.0 (#27) ProductLine: Stash Release: v2023.05.31 Release-tracker: https://github.com/stashed/CHANGELOG/pull/66 Signed-off-by: 1gtm <1gtm@appscode.com> Signed-off-by: Tamal Saha --- go.mod | 39 +- go.sum | 84 +- .../Masterminds/semver/v3/.golangci.yml | 23 +- .../Masterminds/semver/v3/CHANGELOG.md | 20 + .../github.com/Masterminds/semver/v3/Makefile | 17 +- .../Masterminds/semver/v3/README.md | 22 +- .../Masterminds/semver/v3/SECURITY.md | 19 + .../Masterminds/semver/v3/constraints.go | 32 +- .../github.com/Masterminds/semver/v3/doc.go | 178 +- .../github.com/Masterminds/semver/v3/fuzz.go | 22 - .../Masterminds/semver/v3/version.go | 47 +- .../github.com/PuerkitoBio/purell/.travis.yml | 12 - vendor/github.com/PuerkitoBio/purell/LICENSE | 3 +- .../github.com/PuerkitoBio/purell/README.md | 2 +- .../github.com/PuerkitoBio/purell/purell.go | 5 +- .../PuerkitoBio/{urlesc => purell}/urlesc.go | 12 +- .../github.com/PuerkitoBio/urlesc/.travis.yml | 15 - vendor/github.com/PuerkitoBio/urlesc/LICENSE | 27 - .../github.com/PuerkitoBio/urlesc/README.md | 16 - .../github.com/fsnotify/fsnotify/.gitignore | 10 +- vendor/github.com/fsnotify/fsnotify/AUTHORS | 62 - .../github.com/fsnotify/fsnotify/CHANGELOG.md | 113 + .../fsnotify/fsnotify/CONTRIBUTING.md | 72 +- vendor/github.com/fsnotify/fsnotify/LICENSE | 47 +- vendor/github.com/fsnotify/fsnotify/README.md | 205 +- .../fsnotify/fsnotify/backend_fen.go | 162 + .../fsnotify/fsnotify/backend_inotify.go | 459 ++ .../fsnotify/fsnotify/backend_kqueue.go | 707 ++ .../fsnotify/fsnotify/backend_other.go | 66 + .../fsnotify/fsnotify/backend_windows.go | 746 ++ vendor/github.com/fsnotify/fsnotify/fen.go | 38 - .../github.com/fsnotify/fsnotify/fsnotify.go | 80 +- .../fsnotify/fsnotify/fsnotify_unsupported.go | 36 - .../github.com/fsnotify/fsnotify/inotify.go | 351 - .../fsnotify/fsnotify/inotify_poller.go | 187 - vendor/github.com/fsnotify/fsnotify/kqueue.go | 535 -- vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 208 + .../{open_mode_bsd.go => system_bsd.go} | 4 - .../{open_mode_darwin.go => system_darwin.go} | 4 - .../github.com/fsnotify/fsnotify/windows.go | 586 -- .../inconshreveable/mousetrap/trap_others.go | 1 + .../inconshreveable/mousetrap/trap_windows.go | 88 +- .../mousetrap/trap_windows_1.4.go | 46 - vendor/github.com/spf13/cobra/.golangci.yml | 2 +- vendor/github.com/spf13/cobra/Makefile | 8 +- vendor/github.com/spf13/cobra/README.md | 4 +- vendor/github.com/spf13/cobra/active_help.go | 2 +- vendor/github.com/spf13/cobra/args.go | 4 +- .../spf13/cobra/bash_completions.go | 4 +- .../spf13/cobra/bash_completionsV2.go | 71 +- vendor/github.com/spf13/cobra/cobra.go | 6 +- vendor/github.com/spf13/cobra/command.go | 75 +- .../github.com/spf13/cobra/command_notwin.go | 2 +- vendor/github.com/spf13/cobra/command_win.go | 2 +- vendor/github.com/spf13/cobra/completions.go | 13 +- .../spf13/cobra/fish_completions.go | 78 +- vendor/github.com/spf13/cobra/flag_groups.go | 2 +- .../spf13/cobra/powershell_completions.go | 27 +- .../spf13/cobra/projects_using_cobra.md | 6 +- .../spf13/cobra/shell_completions.go | 2 +- .../spf13/cobra/shell_completions.md | 30 +- vendor/github.com/spf13/cobra/user_guide.md | 46 +- .../github.com/spf13/cobra/zsh_completions.go | 17 +- .../license-verifier/info/lib.go | 85 +- vendor/golang.org/x/crypto/cryptobyte/asn1.go | 8 + vendor/golang.org/x/net/http2/pipe.go | 6 +- vendor/golang.org/x/net/http2/server.go | 7 +- vendor/golang.org/x/net/http2/transport.go | 41 +- vendor/golang.org/x/sys/cpu/hwcap_linux.go | 15 + vendor/golang.org/x/sys/cpu/runtime_auxv.go | 16 + .../x/sys/cpu/runtime_auxv_go121.go | 19 + vendor/golang.org/x/sys/unix/ioctl_signed.go | 70 + .../sys/unix/{ioctl.go => ioctl_unsigned.go} | 21 +- vendor/golang.org/x/sys/unix/ioctl_zos.go | 20 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 5 +- vendor/golang.org/x/sys/unix/ptrace_darwin.go | 6 + vendor/golang.org/x/sys/unix/ptrace_ios.go | 6 + vendor/golang.org/x/sys/unix/syscall_aix.go | 7 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 1 - .../x/sys/unix/syscall_aix_ppc64.go | 1 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 3 +- .../golang.org/x/sys/unix/syscall_darwin.go | 15 +- .../x/sys/unix/syscall_darwin_amd64.go | 1 + .../x/sys/unix/syscall_darwin_arm64.go | 1 + .../x/sys/unix/syscall_dragonfly.go | 2 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 44 +- .../x/sys/unix/syscall_freebsd_386.go | 17 +- .../x/sys/unix/syscall_freebsd_amd64.go | 17 +- .../x/sys/unix/syscall_freebsd_arm.go | 15 +- .../x/sys/unix/syscall_freebsd_arm64.go | 15 +- .../x/sys/unix/syscall_freebsd_riscv64.go | 15 +- vendor/golang.org/x/sys/unix/syscall_hurd.go | 8 + vendor/golang.org/x/sys/unix/syscall_linux.go | 46 +- .../x/sys/unix/syscall_linux_386.go | 27 - .../x/sys/unix/syscall_linux_amd64.go | 1 - .../x/sys/unix/syscall_linux_arm.go | 27 - .../x/sys/unix/syscall_linux_arm64.go | 10 - .../x/sys/unix/syscall_linux_loong64.go | 5 - .../x/sys/unix/syscall_linux_mips64x.go | 1 - .../x/sys/unix/syscall_linux_mipsx.go | 27 - .../x/sys/unix/syscall_linux_ppc.go | 27 - .../x/sys/unix/syscall_linux_ppc64x.go | 1 - .../x/sys/unix/syscall_linux_riscv64.go | 1 - .../x/sys/unix/syscall_linux_s390x.go | 1 - .../x/sys/unix/syscall_linux_sparc64.go | 1 - .../golang.org/x/sys/unix/syscall_netbsd.go | 7 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 2 +- .../golang.org/x/sys/unix/syscall_solaris.go | 36 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 7 + .../x/sys/unix/syscall_zos_s390x.go | 6 +- .../x/sys/unix/zerrors_darwin_amd64.go | 19 + .../x/sys/unix/zerrors_darwin_arm64.go | 19 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 24 +- .../x/sys/unix/zptrace_armnn_linux.go | 8 +- .../x/sys/unix/zptrace_linux_arm64.go | 4 +- .../x/sys/unix/zptrace_mipsnn_linux.go | 8 +- .../x/sys/unix/zptrace_mipsnnle_linux.go | 8 +- .../x/sys/unix/zptrace_x86_linux.go | 8 +- .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 23 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 24 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 17 +- .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 18 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 55 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 11 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 55 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 11 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 20 +- .../x/sys/unix/zsyscall_freebsd_386.go | 30 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 30 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 30 +- .../x/sys/unix/zsyscall_freebsd_arm64.go | 30 +- .../x/sys/unix/zsyscall_freebsd_riscv64.go | 30 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 20 +- .../x/sys/unix/zsyscall_linux_386.go | 10 - .../x/sys/unix/zsyscall_linux_amd64.go | 10 - .../x/sys/unix/zsyscall_linux_arm.go | 10 - .../x/sys/unix/zsyscall_linux_arm64.go | 10 - .../x/sys/unix/zsyscall_linux_mips.go | 10 - .../x/sys/unix/zsyscall_linux_mips64.go | 10 - .../x/sys/unix/zsyscall_linux_mips64le.go | 10 - .../x/sys/unix/zsyscall_linux_mipsle.go | 10 - .../x/sys/unix/zsyscall_linux_ppc.go | 10 - .../x/sys/unix/zsyscall_linux_ppc64.go | 10 - .../x/sys/unix/zsyscall_linux_ppc64le.go | 10 - .../x/sys/unix/zsyscall_linux_riscv64.go | 10 - .../x/sys/unix/zsyscall_linux_s390x.go | 10 - .../x/sys/unix/zsyscall_linux_sparc64.go | 10 - .../x/sys/unix/zsyscall_netbsd_386.go | 20 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 20 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 20 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 20 +- .../x/sys/unix/zsyscall_openbsd_386.go | 22 +- .../x/sys/unix/zsyscall_openbsd_386.s | 5 - .../x/sys/unix/zsyscall_openbsd_amd64.go | 22 +- .../x/sys/unix/zsyscall_openbsd_amd64.s | 5 - .../x/sys/unix/zsyscall_openbsd_arm.go | 22 +- .../x/sys/unix/zsyscall_openbsd_arm.s | 5 - .../x/sys/unix/zsyscall_openbsd_arm64.go | 22 +- .../x/sys/unix/zsyscall_openbsd_arm64.s | 5 - .../x/sys/unix/zsyscall_openbsd_mips64.go | 22 +- .../x/sys/unix/zsyscall_openbsd_mips64.s | 5 - .../x/sys/unix/zsyscall_openbsd_ppc64.go | 22 +- .../x/sys/unix/zsyscall_openbsd_ppc64.s | 6 - .../x/sys/unix/zsyscall_openbsd_riscv64.go | 22 +- .../x/sys/unix/zsyscall_openbsd_riscv64.s | 5 - .../x/sys/unix/zsyscall_solaris_amd64.go | 26 +- .../x/sys/unix/zsyscall_zos_s390x.go | 12 +- .../x/sys/unix/ztypes_darwin_amd64.go | 11 + .../x/sys/unix/ztypes_darwin_arm64.go | 11 + .../x/sys/unix/ztypes_freebsd_386.go | 2 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 2 +- .../x/sys/unix/ztypes_freebsd_arm.go | 2 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 2 +- .../x/sys/unix/ztypes_freebsd_riscv64.go | 2 +- vendor/golang.org/x/sys/unix/ztypes_linux.go | 140 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 2 +- .../x/sys/unix/ztypes_linux_amd64.go | 2 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 2 +- .../x/sys/unix/ztypes_linux_arm64.go | 2 +- .../x/sys/unix/ztypes_linux_loong64.go | 2 +- .../x/sys/unix/ztypes_linux_mips.go | 2 +- .../x/sys/unix/ztypes_linux_mips64.go | 2 +- .../x/sys/unix/ztypes_linux_mips64le.go | 2 +- .../x/sys/unix/ztypes_linux_mipsle.go | 2 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 2 +- .../x/sys/unix/ztypes_linux_ppc64.go | 2 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 2 +- .../x/sys/unix/ztypes_linux_riscv64.go | 2 +- .../x/sys/unix/ztypes_linux_s390x.go | 2 +- .../x/sys/unix/ztypes_linux_sparc64.go | 2 +- .../golang.org/x/sys/windows/env_windows.go | 6 +- .../golang.org/x/sys/windows/exec_windows.go | 7 +- vendor/golang.org/x/sys/windows/service.go | 7 + .../x/sys/windows/syscall_windows.go | 6 +- .../golang.org/x/sys/windows/types_windows.go | 95 +- .../x/sys/windows/zsyscall_windows.go | 36 + .../x/text/encoding/internal/internal.go | 2 +- .../x/text/unicode/norm/forminfo.go | 2 +- .../gomodules.xyz/jsonpatch/v2/jsonpatch.go | 160 +- vendor/kmodules.xyz/client-go/Makefile | 22 +- .../kmodules.xyz/client-go/api/v1/object.go | 30 +- .../client-go/api/v1/object_enum.go | 145 + .../kmodules.xyz/client-go/meta/conditions.go | 164 + .../appcatalog/v1alpha1/appbinding_types.go | 82 +- .../apis/appcatalog/v1alpha1/generated.pb.go | 4362 ---------- .../apis/appcatalog/v1alpha1/generated.proto | 54 +- .../appcatalog/v1alpha1/openapi_generated.go | 820 +- .../apis/appcatalog/v1alpha1/stash_types.go | 16 +- .../v1alpha1/zz_generated.deepcopy.go | 7 + .../appcatalog.appscode.com_appbindings.yaml | 16 + .../offshoot-api/api/v1/generated.pb.go | 7064 +++++++++++------ .../offshoot-api/api/v1/generated.proto | 414 +- .../offshoot-api/api/v1/openapi_generated.go | 653 +- .../kmodules.xyz/offshoot-api/api/v1/pvc.go | 273 +- .../kmodules.xyz/offshoot-api/api/v1/types.go | 198 +- .../api/v1/zz_generated.deepcopy.go | 290 +- vendor/kmodules.xyz/resource-metrics/Makefile | 6 +- .../resource-metrics/calculator.go | 1 + .../kubedb.com/v1alpha2/kafka.go | 181 + .../kubedb.com/v1alpha2/redis.go | 3 + .../kubevault.com/v1alpha2/constant.go | 19 + .../kubevault.com/v1alpha2/vaultserver.go | 96 + vendor/modules.txt | 50 +- .../apis/stash/v1alpha1/openapi_generated.go | 635 +- .../stash/v1beta1/backup_session_types.go | 2 +- .../apis/stash/v1beta1/openapi_generated.go | 637 +- .../apis/ui/v1alpha1/openapi_generated.go | 635 +- .../stash.appscode.com_backupsessions.yaml | 2 +- 228 files changed, 13892 insertions(+), 10987 deletions(-) create mode 100644 vendor/github.com/Masterminds/semver/v3/SECURITY.md delete mode 100644 vendor/github.com/Masterminds/semver/v3/fuzz.go delete mode 100644 vendor/github.com/PuerkitoBio/purell/.travis.yml rename vendor/github.com/PuerkitoBio/{urlesc => purell}/urlesc.go (94%) delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/.travis.yml delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/LICENSE delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/README.md delete mode 100644 vendor/github.com/fsnotify/fsnotify/AUTHORS create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_fen.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_inotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_other.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_windows.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/fen.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/inotify.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_poller.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/mkdoc.zsh rename vendor/github.com/fsnotify/fsnotify/{open_mode_bsd.go => system_bsd.go} (57%) rename vendor/github.com/fsnotify/fsnotify/{open_mode_darwin.go => system_darwin.go} (52%) delete mode 100644 vendor/github.com/fsnotify/fsnotify/windows.go delete mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go create mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv.go create mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go create mode 100644 vendor/golang.org/x/sys/unix/ioctl_signed.go rename vendor/golang.org/x/sys/unix/{ioctl.go => ioctl_unsigned.go} (76%) create mode 100644 vendor/kmodules.xyz/client-go/api/v1/object_enum.go create mode 100644 vendor/kmodules.xyz/client-go/meta/conditions.go delete mode 100644 vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/generated.pb.go create mode 100644 vendor/kmodules.xyz/resource-metrics/kubedb.com/v1alpha2/kafka.go create mode 100644 vendor/kmodules.xyz/resource-metrics/kubevault.com/v1alpha2/constant.go create mode 100644 vendor/kmodules.xyz/resource-metrics/kubevault.com/v1alpha2/vaultserver.go diff --git a/go.mod b/go.mod index 9c0fbea6..e9b2c579 100644 --- a/go.mod +++ b/go.mod @@ -5,23 +5,23 @@ go 1.19 require ( github.com/lnquy/cron v1.1.1 github.com/robfig/cron/v3 v3.0.1 - github.com/spf13/cobra v1.6.0 + github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - go.bytebuilders.dev/license-verifier v0.12.1 + go.bytebuilders.dev/license-verifier v0.13.2 gomodules.xyz/logs v0.0.6 gomodules.xyz/pointer v0.1.0 - gomodules.xyz/x v0.0.14 + gomodules.xyz/x v0.0.15 k8s.io/api v0.25.1 k8s.io/apimachinery v0.25.3 k8s.io/apiserver v0.25.1 k8s.io/client-go v0.25.1 k8s.io/klog/v2 v2.80.1 kmodules.xyz/authorizer v0.25.0 - kmodules.xyz/client-go v0.25.19 - kmodules.xyz/custom-resources v0.25.0 - kmodules.xyz/resource-metrics v0.25.0 + kmodules.xyz/client-go v0.25.23 + kmodules.xyz/custom-resources v0.25.2 + kmodules.xyz/resource-metrics v0.25.2 sigs.k8s.io/controller-runtime v0.13.1 - stash.appscode.dev/apimachinery v0.29.0 + stash.appscode.dev/apimachinery v0.30.0 ) require ( @@ -33,10 +33,9 @@ require ( github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/Masterminds/semver/v3 v3.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/PuerkitoBio/purell v1.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect @@ -48,7 +47,7 @@ require ( github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fatih/structs v1.1.0 // indirect github.com/felixge/httpsnoop v1.0.1 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-errors/errors v1.0.1 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect @@ -69,7 +68,7 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/imdario/mergo v0.3.13 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect @@ -109,18 +108,18 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/crypto v0.6.0 // indirect - golang.org/x/net v0.7.0 // indirect + golang.org/x/crypto v0.9.0 // indirect + golang.org/x/net v0.10.0 // indirect golang.org/x/oauth2 v0.5.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/term v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/term v0.8.0 // indirect + golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.1.0 // indirect gomodules.xyz/clock v0.0.0-20200817085942-06523dba733f // indirect gomodules.xyz/encoding v0.0.7 // indirect gomodules.xyz/flags v0.1.3 // indirect - gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect gomodules.xyz/sets v0.2.1 // indirect gomodules.xyz/wait v0.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect @@ -137,8 +136,8 @@ require ( k8s.io/component-helpers v0.25.1 // indirect k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect k8s.io/utils v0.0.0-20220823124924-e9cbc92d1a73 // indirect - kmodules.xyz/objectstore-api v0.25.1-0.20221104003322-f0289b5b6ca2 // indirect - kmodules.xyz/offshoot-api v0.25.0 // indirect + kmodules.xyz/objectstore-api v0.25.1 // indirect + kmodules.xyz/offshoot-api v0.25.4 // indirect kmodules.xyz/prober v0.25.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.32 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect diff --git a/go.sum b/go.sum index 8a9745d2..9cb4440f 100644 --- a/go.sum +++ b/go.sum @@ -55,14 +55,14 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/purell v1.2.0 h1:/Jdm5QfyM8zdlqT6WVZU4cfP23sot6CEHA4CS49Ezig= +github.com/PuerkitoBio/purell v1.2.0/go.mod h1:OhLRTaaIzhvIyofkJfB24gokC7tM42Px5UhoT32THBk= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -122,7 +122,6 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= @@ -134,8 +133,8 @@ github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8S github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= @@ -279,8 +278,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= @@ -324,7 +323,7 @@ github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -428,8 +427,8 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= -github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -445,7 +444,7 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= @@ -463,8 +462,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.bytebuilders.dev/license-verifier v0.12.1 h1:2bQJ8naHTv0a04/x+luoabCGQ37VuaccA7Yz5fVPHaU= -go.bytebuilders.dev/license-verifier v0.12.1/go.mod h1:47HPt3RVp9gsujYpOeXi28IsTV5lilZ+EOpuhz/DIJM= +go.bytebuilders.dev/license-verifier v0.13.2 h1:wV1ynl+GR+zKb3dh19WEzuC0uzTdiSGgVg9G78Nh4XU= +go.bytebuilders.dev/license-verifier v0.13.2/go.mod h1:PTTlWgokzoisBezn2zt+JeGkhTJZ0flvLzdhHVBy86M= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc= @@ -530,8 +529,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -603,8 +602,8 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -673,13 +672,13 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -688,8 +687,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -753,8 +752,8 @@ gomodules.xyz/encoding v0.0.7 h1:Y4PaflVS+vkYgkw6FwyF1S0ab4Y1BAdOqB3Uwjcx8qI= gomodules.xyz/encoding v0.0.7/go.mod h1:blYJWy456Bzxj3L2G7G5WV1l5rFXl6uYXrmeFqxxqk8= gomodules.xyz/flags v0.1.3 h1:jQ06+EfmoMv5NvjXvJon03dOhLU+FF0TQMWN7I6qpzs= gomodules.xyz/flags v0.1.3/go.mod h1:e+kvBLnqdEWGG670SKOYag1CXStM2Slrxq01OIK3tFs= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc= +gomodules.xyz/jsonpatch/v2 v2.3.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gomodules.xyz/logs v0.0.6 h1:8+9Wkud5yBPtIvkVszubyTeFxNII30lWODom0+GZD8U= gomodules.xyz/logs v0.0.6/go.mod h1:Q+fFtZFLEB5q86KmDehXCGuMP72Rv+Rwz0KuVxK+Gi4= gomodules.xyz/pointer v0.1.0 h1:sG2UKrYVSo6E3r4itAjXfPfe4fuXMi0KdyTHpR3vGCg= @@ -762,10 +761,11 @@ gomodules.xyz/pointer v0.1.0/go.mod h1:sPLsC0+yLTRecUiC5yVlyvXhZ6LAGojNCRWNNqopl gomodules.xyz/sets v0.2.0/go.mod h1:jKgNp01/iDs+svOWXaPk5cKP3VXy0mWUoTF/ore+aMc= gomodules.xyz/sets v0.2.1 h1:vK3oUWoGVrZKLDKO/bzEo/ucHFdCE7+DxWPeWxK72KQ= gomodules.xyz/sets v0.2.1/go.mod h1:jKgNp01/iDs+svOWXaPk5cKP3VXy0mWUoTF/ore+aMc= +gomodules.xyz/testing v0.0.4 h1:XGKt4B64mBe7P9kPR0Rz1nCQpWoSpBEFdTGkfU1RLe4= gomodules.xyz/wait v0.2.0 h1:HnRIh+cvIrrKIFaXoYznCVVirv2/2xu3KzjSzsQmYAY= gomodules.xyz/wait v0.2.0/go.mod h1:g/epKzZQuCqgvhzhaoG4cSBNGHqnOrhFR4Q7szDJ1JM= -gomodules.xyz/x v0.0.14 h1:eD1+3YjTEt46dhErbuPEr1Vw+7xbFBNx6T26E7nCRwM= -gomodules.xyz/x v0.0.14/go.mod h1:AgHPywNbRKtogAdk08vCqsfwjtXtXX1/FUPyG7OWdOs= +gomodules.xyz/x v0.0.15 h1:n2aGD3cnpvNTvUALUE30sJMqT5g/G6BS5EnZojzw0tw= +gomodules.xyz/x v0.0.15/go.mod h1:M4tV13Y/0ZxrKEkmUcLkPdh8C4TjCAc5uXchDcqGYrw= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -916,19 +916,19 @@ k8s.io/utils v0.0.0-20220823124924-e9cbc92d1a73 h1:H9TCJUUx+2VA0ZiD9lvtaX8fthFsM k8s.io/utils v0.0.0-20220823124924-e9cbc92d1a73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= kmodules.xyz/authorizer v0.25.0 h1:yRrLtMOdlU1p4mLzaSz5pmSLpBLsVXLQHkUfiME12iQ= kmodules.xyz/authorizer v0.25.0/go.mod h1:Jb99YsLRJE4R4d8F5fFtlxEaxk0prdSk2LApZl4JdyI= -kmodules.xyz/client-go v0.25.19 h1:IYWixMIGMqS17LHYyiDxw8U/3ml6igI8QsYAdToYBGc= -kmodules.xyz/client-go v0.25.19/go.mod h1:KWVpgjAj/VNzCzj/BihpJkY39aWIqe8qMp5RuPsuSJA= +kmodules.xyz/client-go v0.25.23 h1:qz5XJYHLVZUowqfRXEJD7JQ4iaLLzQ1O1zPMmsdrkJw= +kmodules.xyz/client-go v0.25.23/go.mod h1:wbdzLEoDYiCPI6dTW0mIAGNwkwFV4lC5BN1FJxiDsbw= kmodules.xyz/crd-schema-fuzz v0.25.0 h1:c5ZxNRqJak1bkGhECmyrKpzKGThFMB4088Kynyvngbc= -kmodules.xyz/custom-resources v0.25.0 h1:5JQTEuiv6lC/+VVFNKqzfX4YtJCmN5E7mcNtGnHGVQM= -kmodules.xyz/custom-resources v0.25.0/go.mod h1:ULwzvLmOqZJcPSXKI7iLclYL5eYRlKx8Nbex28Ht19E= -kmodules.xyz/objectstore-api v0.25.1-0.20221104003322-f0289b5b6ca2 h1:efc0glYeBw+ok5s5ZecKdB9zgnRo/IvsLlSaQUPQjZE= -kmodules.xyz/objectstore-api v0.25.1-0.20221104003322-f0289b5b6ca2/go.mod h1:X5aCkyU91p9TOn4jcWw0cfcJL0HCKd/Z6FJHdzKz1ZU= -kmodules.xyz/offshoot-api v0.25.0 h1:Svq9da/+sg5afOjpgo9vx2J/Lu90Mo0aFxkdQmgKnGI= -kmodules.xyz/offshoot-api v0.25.0/go.mod h1:ysEBn7LJuT3+s8ynAQA/OG0BSsJugXa6KGtDLMRjlKo= +kmodules.xyz/custom-resources v0.25.2 h1:+PJgUZvbbSgyNT7EX9gUZ3PIzY2LAW03TDW8cevvXqo= +kmodules.xyz/custom-resources v0.25.2/go.mod h1:b9XjjKQMZ6KrLHXKqQz7YwV3M3BK8Hwi4KEwu5RadCo= +kmodules.xyz/objectstore-api v0.25.1 h1:lYQlxk+edgZYakhq+OoRBXTbHbZTGKhatGZWnKixgEQ= +kmodules.xyz/objectstore-api v0.25.1/go.mod h1:6wBtktN7/EXyE429OTCB9nwEe+d0ADaoCtm6+IZnJso= +kmodules.xyz/offshoot-api v0.25.4 h1:IjJNvkphcdYUG8XO/pBwXpuP8W+jxAWJZ3yH8vgI/as= +kmodules.xyz/offshoot-api v0.25.4/go.mod h1:PUk4EuJFhhyQykCflHj7EgXcljGIqs9vi0IN0RpxtY4= kmodules.xyz/prober v0.25.0 h1:R5uRLHJEvEtEoogj+vaTAob0Btph6+PX5IlS6hPh8PA= kmodules.xyz/prober v0.25.0/go.mod h1:z4RTnjaajNQa/vPltsiOnO3xI716I/ziD2ac2Exm+1M= -kmodules.xyz/resource-metrics v0.25.0 h1:wj3npVZZQr+Yysg/XdGn4LdT5kQizWeO0zq7QlSVx50= -kmodules.xyz/resource-metrics v0.25.0/go.mod h1:4a49npnu73c9LGDWHWQsPWoXWXU9rpCcknoH1+HHesI= +kmodules.xyz/resource-metrics v0.25.2 h1:BwCb6qyunvQBa0u8UUkw+wYG5/T4qtNtAKcHjSsk0JU= +kmodules.xyz/resource-metrics v0.25.2/go.mod h1:ZK/52NLuwMk+Jt0bmUtGQHtSxPLYYpsFILG7SJhYPg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= @@ -947,5 +947,5 @@ sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ih sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= -stash.appscode.dev/apimachinery v0.29.0 h1:36ijvg31s2UDQIAN2lnYY76E9dydUADutTkkErqokAQ= -stash.appscode.dev/apimachinery v0.29.0/go.mod h1:7ao2U0Jgntc10uRf9KRmzXuabO12Nm7+2WpcRE/ZXWo= +stash.appscode.dev/apimachinery v0.30.0 h1:imc3OuXaCI8B9ImB1e4QtDYhZCc2VyiXQHW5gheKG/s= +stash.appscode.dev/apimachinery v0.30.0/go.mod h1:xEWpZn0wmhP0Acpq7cpebNbMXwIui8Crh4dpuE9FHSQ= diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml index fdbdf144..fbc63325 100644 --- a/vendor/github.com/Masterminds/semver/v3/.golangci.yml +++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -4,23 +4,24 @@ run: linters: disable-all: true enable: - - deadcode - - dupl - - errcheck - - gofmt - - goimports - - golint - - gosimple + - misspell - govet + - staticcheck + - errcheck + - unparam - ineffassign - - misspell - nakedret - - structcheck + - gocyclo + - dupl + - goimports + - revive + - gosec + - gosimple + - typecheck - unused - - varcheck linters-settings: gofmt: simplify: true dupl: - threshold: 400 + threshold: 600 diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md index 1f90c38d..f1262642 100644 --- a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -1,5 +1,25 @@ # Changelog +## 3.2.0 (2022-11-28) + +### Added + +- #190: Added text marshaling and unmarshaling +- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg) +- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker) +- #179: Added New() version constructor (thanks @kazhuravlev) + +### Changed + +- #182/#183: Updated CI testing setup + +### Fixed + +- #186: Fixing issue where validation of constraint section gave false positives +- #176: Fix constraints check with *-0 (thanks @mtt0) +- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni) +- #161: Fixed godoc (thanks @afirth) + ## 3.1.1 (2020-11-23) ### Fixed diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile index eac19178..0e7b5c71 100644 --- a/vendor/github.com/Masterminds/semver/v3/Makefile +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -1,7 +1,5 @@ GOPATH=$(shell go env GOPATH) GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint -GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build -GOFUZZ = $(GOPATH)/bin/go-fuzz .PHONY: lint lint: $(GOLANGCI_LINT) @@ -19,19 +17,14 @@ test-cover: GO111MODULE=on go test -cover . .PHONY: fuzz -fuzz: $(GOFUZZBUILD) $(GOFUZZ) - @echo "==> Fuzz testing" - $(GOFUZZBUILD) - $(GOFUZZ) -workdir=_fuzz +fuzz: + @echo "==> Running Fuzz Tests" + go test -fuzz=FuzzNewVersion -fuzztime=15s . + go test -fuzz=FuzzStrictNewVersion -fuzztime=15s . + go test -fuzz=FuzzNewConstraint -fuzztime=15s . $(GOLANGCI_LINT): # Install golangci-lint. The configuration for it is in the .golangci.yml # file in the root of the repository echo ${GOPATH} curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 - -$(GOFUZZBUILD): - cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build - -$(GOFUZZ): - cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md index d8f54dcb..eab8cac3 100644 --- a/vendor/github.com/Masterminds/semver/v3/README.md +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -18,18 +18,20 @@ If you are looking for a command line tool for version comparisons please see ## Package Versions +Note, import `github.com/github.com/Masterminds/semver/v3` to use the latest version. + There are three major versions fo the `semver` package. -* 3.x.x is the new stable and active version. This version is focused on constraint +* 3.x.x is the stable and active version. This version is focused on constraint compatibility for range handling in other tools from other languages. It has a similar API to the v1 releases. The development of this version is on the master branch. The documentation for this version is below. * 2.x was developed primarily for [dep](https://github.com/golang/dep). There are no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). -* 1.x.x is the most widely used version with numerous tagged releases. This is the - previous stable and is still maintained for bug fixes. The development, to fix - bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md). +* 1.x.x is the original release. It is no longer maintained. You should use the + v3 release instead. You can read the documentation for the 1.x.x release + [here](https://github.com/Masterminds/semver/blob/release-1/README.md). ## Parsing Semantic Versions @@ -242,3 +244,15 @@ for _, m := range msgs { If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) or [create a pull request](https://github.com/Masterminds/semver/pulls). + +## Security + +Security is an important consideration for this project. The project currently +uses the following tools to help discover security issues: + +* [CodeQL](https://github.com/Masterminds/semver) +* [gosec](https://github.com/securego/gosec) +* Daily Fuzz testing + +If you believe you have found a security vulnerability you can privately disclose +it through the [GitHub security page](https://github.com/Masterminds/semver/security). diff --git a/vendor/github.com/Masterminds/semver/v3/SECURITY.md b/vendor/github.com/Masterminds/semver/v3/SECURITY.md new file mode 100644 index 00000000..a30a66b1 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +The following versions of semver are currently supported: + +| Version | Supported | +| ------- | ------------------ | +| 3.x | :white_check_mark: | +| 2.x | :x: | +| 1.x | :x: | + +Fixes are only released for the latest minor version in the form of a patch release. + +## Reporting a Vulnerability + +You can privately disclose a vulnerability through GitHubs +[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories) +mechanism. diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go index 547613f0..8461c7ed 100644 --- a/vendor/github.com/Masterminds/semver/v3/constraints.go +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -134,6 +134,23 @@ func (cs Constraints) String() string { return strings.Join(buf, " || ") } +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (cs *Constraints) UnmarshalText(text []byte) error { + temp, err := NewConstraint(string(text)) + if err != nil { + return err + } + + *cs = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (cs Constraints) MarshalText() ([]byte, error) { + return []byte(cs.String()), nil +} + var constraintOps map[string]cfunc var constraintRegex *regexp.Regexp var constraintRangeRegex *regexp.Regexp @@ -180,8 +197,13 @@ func init() { ops, cvRegex)) + // The first time a constraint shows up will look slightly different from + // future times it shows up due to a leading space or comma in a given + // string. validConstraintRegex = regexp.MustCompile(fmt.Sprintf( - `^(\s*(%s)\s*(%s)\s*\,?)+$`, + `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`, + ops, + cvRegex, ops, cvRegex)) } @@ -233,7 +255,7 @@ func parseConstraint(c string) (*constraint, error) { patchDirty := false dirty := false if isX(m[3]) || m[3] == "" { - ver = "0.0.0" + ver = fmt.Sprintf("0.0.0%s", m[6]) dirty = true } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { minorDirty = true @@ -534,6 +556,10 @@ func constraintCaret(v *Version, c *constraint) (bool, error) { } return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) } + // ^ when the minor is 0 and minor > 0 is =0.0.z + if c.con.Minor() == 0 && v.Minor() > 0 { + return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig) + } // At this point the major is 0 and the minor is 0 and not dirty. The patch // is not dirty so we need to check if they are equal. If they are not equal @@ -560,7 +586,7 @@ func rewriteRange(i string) string { } o := i for _, v := range m { - t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11]) o = strings.Replace(o, v[0], t, 1) } diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go index 391aa46b..74f97caa 100644 --- a/vendor/github.com/Masterminds/semver/v3/doc.go +++ b/vendor/github.com/Masterminds/semver/v3/doc.go @@ -3,12 +3,12 @@ Package semver provides the ability to work with Semantic Versions (http://semve Specifically it provides the ability to: - * Parse semantic versions - * Sort semantic versions - * Check if a semantic version fits within a set of constraints - * Optionally work with a `v` prefix + - Parse semantic versions + - Sort semantic versions + - Check if a semantic version fits within a set of constraints + - Optionally work with a `v` prefix -Parsing Semantic Versions +# Parsing Semantic Versions There are two functions that can parse semantic versions. The `StrictNewVersion` function only parses valid version 2 semantic versions as outlined in the @@ -21,48 +21,48 @@ that can be sorted, compared, and used in constraints. When parsing a version an optional error can be returned if there is an issue parsing the version. For example, - v, err := semver.NewVersion("1.2.3-beta.1+b345") + v, err := semver.NewVersion("1.2.3-beta.1+b345") The version object has methods to get the parts of the version, compare it to other versions, convert the version back into a string, and get the original string. For more details please see the documentation at https://godoc.org/github.com/Masterminds/semver. -Sorting Semantic Versions +# Sorting Semantic Versions A set of versions can be sorted using the `sort` package from the standard library. For example, - raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} - vs := make([]*semver.Version, len(raw)) - for i, r := range raw { - v, err := semver.NewVersion(r) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } - vs[i] = v - } + vs[i] = v + } - sort.Sort(semver.Collection(vs)) + sort.Sort(semver.Collection(vs)) -Checking Version Constraints and Comparing Versions +# Checking Version Constraints and Comparing Versions There are two methods for comparing versions. One uses comparison methods on `Version` instances and the other is using Constraints. There are some important differences to notes between these two methods of comparison. -1. When two versions are compared using functions such as `Compare`, `LessThan`, - and others it will follow the specification and always include prereleases - within the comparison. It will provide an answer valid with the comparison - spec section at https://semver.org/#spec-item-11 -2. When constraint checking is used for checks or validation it will follow a - different set of rules that are common for ranges with tools like npm/js - and Rust/Cargo. This includes considering prereleases to be invalid if the - ranges does not include on. If you want to have it include pre-releases a - simple solution is to include `-0` in your range. -3. Constraint ranges can have some complex rules including the shorthard use of - ~ and ^. For more details on those see the options below. + 1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer valid with the comparison + spec section at https://semver.org/#spec-item-11 + 2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include on. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. + 3. Constraint ranges can have some complex rules including the shorthard use of + ~ and ^. For more details on those see the options below. There are differences between the two methods or checking versions because the comparison methods on `Version` follow the specification while comparison ranges @@ -76,19 +76,19 @@ patters with their versions. Checking a version against version constraints is one of the most featureful parts of the package. - c, err := semver.NewConstraint(">= 1.2.3") - if err != nil { - // Handle constraint not being parsable. - } + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parsable. + } - v, err := semver.NewVersion("1.3") - if err != nil { - // Handle version not being parsable. - } - // Check if the version meets the constraints. The a variable will be true. - a := c.Check(v) + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parsable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) -Basic Comparisons +# Basic Comparisons There are two elements to the comparisons. First, a comparison string is a list of comma or space separated AND comparisons. These are then separated by || (OR) @@ -99,31 +99,31 @@ greater than or equal to 4.2.3. This can also be written as The basic comparisons are: - * `=`: equal (aliased to no operator) - * `!=`: not equal - * `>`: greater than - * `<`: less than - * `>=`: greater than or equal to - * `<=`: less than or equal to + - `=`: equal (aliased to no operator) + - `!=`: not equal + - `>`: greater than + - `<`: less than + - `>=`: greater than or equal to + - `<=`: less than or equal to -Hyphen Range Comparisons +# Hyphen Range Comparisons There are multiple methods to handle ranges and the first is hyphens ranges. These look like: - * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` - * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` -Wildcards In Comparisons +# Wildcards In Comparisons The `x`, `X`, and `*` characters can be used as a wildcard character. This works for all comparison operators. When used on the `=` operator it falls back to the tilde operation. For example, - * `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` - * `>= 1.2.x` is equivalent to `>= 1.2.0` - * `<= 2.x` is equivalent to `<= 3` - * `*` is equivalent to `>= 0.0.0` + - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `>= 1.2.x` is equivalent to `>= 1.2.0` + - `<= 2.x` is equivalent to `<= 3` + - `*` is equivalent to `>= 0.0.0` Tilde Range Comparisons (Patch) @@ -131,11 +131,11 @@ The tilde (`~`) comparison operator is for patch level ranges when a minor version is specified and major level changes when the minor number is missing. For example, - * `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` - * `~1` is equivalent to `>= 1, < 2` - * `~2.3` is equivalent to `>= 2.3 < 2.4` - * `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` - * `~1.x` is equivalent to `>= 1 < 2` + - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` + - `~1` is equivalent to `>= 1, < 2` + - `~2.3` is equivalent to `>= 2.3 < 2.4` + - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `~1.x` is equivalent to `>= 1 < 2` Caret Range Comparisons (Major) @@ -144,41 +144,41 @@ The caret (`^`) comparison operator is for major level changes once a stable as the API stability level. This is useful when comparisons of API versions as a major change is API breaking. For example, - * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` - * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` - * `^2.3` is equivalent to `>= 2.3, < 3` - * `^2.x` is equivalent to `>= 2.0.0, < 3` - * `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` - * `^0.2` is equivalent to `>=0.2.0 <0.3.0` - * `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` - * `^0.0` is equivalent to `>=0.0.0 <0.1.0` - * `^0` is equivalent to `>=0.0.0 <1.0.0` + - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + - `^2.3` is equivalent to `>= 2.3, < 3` + - `^2.x` is equivalent to `>= 2.0.0, < 3` + - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` + - `^0.2` is equivalent to `>=0.2.0 <0.3.0` + - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` + - `^0.0` is equivalent to `>=0.0.0 <0.1.0` + - `^0` is equivalent to `>=0.0.0 <1.0.0` -Validation +# Validation In addition to testing a version against a constraint, a version can be validated against a constraint. When validation fails a slice of errors containing why a version didn't meet the constraint is returned. For example, - c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") - if err != nil { - // Handle constraint not being parseable. - } - - v, _ := semver.NewVersion("1.3") - if err != nil { - // Handle version not being parseable. - } - - // Validate a version against a constraint. - a, msgs := c.Validate(v) - // a is false - for _, m := range msgs { - fmt.Println(m) - - // Loops over the errors which would read - // "1.3 is greater than 1.2.3" - // "1.3 is less than 1.4" - } + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } */ package semver diff --git a/vendor/github.com/Masterminds/semver/v3/fuzz.go b/vendor/github.com/Masterminds/semver/v3/fuzz.go deleted file mode 100644 index a242ad70..00000000 --- a/vendor/github.com/Masterminds/semver/v3/fuzz.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build gofuzz - -package semver - -func Fuzz(data []byte) int { - d := string(data) - - // Test NewVersion - _, _ = NewVersion(d) - - // Test StrictNewVersion - _, _ = StrictNewVersion(d) - - // Test NewConstraint - _, _ = NewConstraint(d) - - // The return value should be 0 normally, 1 if the priority in future tests - // should be increased, and -1 if future tests should skip passing in that - // data. We do not have a reason to change priority so 0 is always returned. - // There are example tests that do this. - return 0 -} diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go index d6b9cda3..7c4bed33 100644 --- a/vendor/github.com/Masterminds/semver/v3/version.go +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -55,14 +55,16 @@ func init() { versionRegex = regexp.MustCompile("^" + semVerRegex + "$") } -const num string = "0123456789" -const allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num +const ( + num string = "0123456789" + allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num +) // StrictNewVersion parses a given version and returns an instance of Version or // an error if unable to parse the version. Only parses valid semantic versions. // Performs checking that can find errors within the version. -// If you want to coerce a version, such as 1 or 1.2, and perse that as the 1.x -// releases of semver provided use the NewSemver() function. +// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x +// releases of semver did, use the NewVersion() function. func StrictNewVersion(v string) (*Version, error) { // Parsing here does not use RegEx in order to increase performance and reduce // allocations. @@ -207,6 +209,23 @@ func NewVersion(v string) (*Version, error) { return sv, nil } +// New creates a new instance of Version with each of the parts passed in as +// arguments instead of parsing a version string. +func New(major, minor, patch uint64, pre, metadata string) *Version { + v := Version{ + major: major, + minor: minor, + patch: patch, + pre: pre, + metadata: metadata, + original: "", + } + + v.original = v.String() + + return &v +} + // MustParse parses a given version and panics on error. func MustParse(v string) *Version { sv, err := NewVersion(v) @@ -267,7 +286,6 @@ func (v Version) Metadata() string { // originalVPrefix returns the original 'v' prefix if any. func (v Version) originalVPrefix() string { - // Note, only lowercase v is supported as a prefix by the parser. if v.original != "" && v.original[:1] == "v" { return v.original[:1] @@ -436,6 +454,23 @@ func (v Version) MarshalJSON() ([]byte, error) { return json.Marshal(v.String()) } +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(text []byte) error { + temp, err := NewVersion(string(text)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (v Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} + // Scan implements the SQL.Scanner interface. func (v *Version) Scan(value interface{}) error { var s string @@ -470,7 +505,6 @@ func compareSegment(v, o uint64) int { } func comparePrerelease(v, o string) int { - // split the prelease versions by their part. The separator, per the spec, // is a . sparts := strings.Split(v, ".") @@ -562,7 +596,6 @@ func comparePrePart(s, o string) int { return 1 } return -1 - } // Like strings.ContainsAny but does an only instead of any. diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml deleted file mode 100644 index cf31e6af..00000000 --- a/vendor/github.com/PuerkitoBio/purell/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - "1.10.x" - - "1.11.x" - - tip diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE index 4b9986de..640c3ac5 100644 --- a/vendor/github.com/PuerkitoBio/purell/LICENSE +++ b/vendor/github.com/PuerkitoBio/purell/LICENSE @@ -1,4 +1,5 @@ -Copyright (c) 2012, Martin Angers +Copyright (c) 2012-2022, The Go Authors +Copyright (c) 2012-2022, Martin Angers, Yuki Okushi & Contributors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md index 07de0c49..84612790 100644 --- a/vendor/github.com/PuerkitoBio/purell/README.md +++ b/vendor/github.com/PuerkitoBio/purell/README.md @@ -4,7 +4,7 @@ Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. -[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell) +[![CI](https://github.com/PuerkitoBio/purell/actions/workflows/ci.yml/badge.svg)](https://github.com/PuerkitoBio/purell/actions/workflows/ci.yml) ## Install diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go index 6d0fc190..74c82724 100644 --- a/vendor/github.com/PuerkitoBio/purell/purell.go +++ b/vendor/github.com/PuerkitoBio/purell/purell.go @@ -13,7 +13,6 @@ import ( "strconv" "strings" - "github.com/PuerkitoBio/urlesc" "golang.org/x/net/idna" "golang.org/x/text/unicode/norm" "golang.org/x/text/width" @@ -180,7 +179,7 @@ func NormalizeURL(u *url.URL, f NormalizationFlags) string { flags[k](u) } } - return urlesc.Escape(u) + return escapeURL(u) } func lowercaseScheme(u *url.URL) { @@ -311,7 +310,7 @@ func sortQuery(u *url.URL) { if buf.Len() > 0 { buf.WriteRune('&') } - buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v))) + buf.WriteString(fmt.Sprintf("%s=%s", k, url.QueryEscape(v))) } } diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/purell/urlesc.go similarity index 94% rename from vendor/github.com/PuerkitoBio/urlesc/urlesc.go rename to vendor/github.com/PuerkitoBio/purell/urlesc.go index 1b846245..53d815fd 100644 --- a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go +++ b/vendor/github.com/PuerkitoBio/purell/urlesc.go @@ -2,11 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package urlesc implements query escaping as per RFC 3986. +// This file implements query escaping as per RFC 3986. // It contains some parts of the net/url package, modified so as to allow // some reserved characters incorrectly escaped by net/url. // See https://github.com/golang/go/issues/5684 -package urlesc +package purell import ( "bytes" @@ -69,12 +69,6 @@ func shouldEscape(c byte, mode encoding) bool { return true } -// QueryEscape escapes the string so it can be safely placed -// inside a URL query. -func QueryEscape(s string) string { - return escape(s, encodeQueryComponent) -} - func escape(s string, mode encoding) string { spaceCount, hexCount := 0, 0 for i := 0; i < len(s); i++ { @@ -144,7 +138,7 @@ func unescapeUserinfo(s string) string { // the form host/path does not add its own /. // - if u.RawQuery is empty, ?query is omitted. // - if u.Fragment is empty, #fragment is omitted. -func Escape(u *url.URL) string { +func escapeURL(u *url.URL) string { var buf bytes.Buffer if u.Scheme != "" { buf.WriteString(u.Scheme) diff --git a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml deleted file mode 100644 index ba6b225f..00000000 --- a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - tip - -install: - - go build . - -script: - - go test -v diff --git a/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/vendor/github.com/PuerkitoBio/urlesc/LICENSE deleted file mode 100644 index 74487567..00000000 --- a/vendor/github.com/PuerkitoBio/urlesc/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md deleted file mode 100644 index 57aff0a5..00000000 --- a/vendor/github.com/PuerkitoBio/urlesc/README.md +++ /dev/null @@ -1,16 +0,0 @@ -urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc) -====== - -Package urlesc implements query escaping as per RFC 3986. - -It contains some parts of the net/url package, modified so as to allow -some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)). - -## Install - - go get github.com/PuerkitoBio/urlesc - -## License - -Go license (BSD-3-Clause) - diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 4cd0cbaf..1d89d85c 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -1,6 +1,6 @@ -# Setup a Global .gitignore for OS and editor generated files: -# https://help.github.com/articles/ignoring-files -# git config --global core.excludesfile ~/.gitignore_global +# go test -c output +*.test +*.test.exe -.vagrant -*.sublime-project +# Output of go build ./cmd/fsnotify +/fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS deleted file mode 100644 index 6cbabe5e..00000000 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ /dev/null @@ -1,62 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Alexey Kazakov -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Brian Goff -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Eric Lin -Evan Phoenix -Francisco Souza -Gautam Dey -Hari haran -Ichinose Shogo -Johannes Ebke -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Matthias Stone -Nathan Youngman -Nickolai Zeldovich -Oliver Bristow -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pratik Shinde -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tobias Klauser -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index cc01c08f..77f9593b 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -7,6 +7,95 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +Nothing yet. + +## [1.6.0] - 2022-10-13 + +This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, +but not documented). It also increases the minimum Linux version to 2.6.32. + +### Additions + +- all: add `Event.Has()` and `Op.Has()` ([#477]) + + This makes checking events a lot easier; for example: + + if event.Op&Write == Write && !(event.Op&Remove == Remove) { + } + + Becomes: + + if event.Has(Write) && !event.Has(Remove) { + } + +- all: add cmd/fsnotify ([#463]) + + A command-line utility for testing and some examples. + +### Changes and fixes + +- inotify: don't ignore events for files that don't exist ([#260], [#470]) + + Previously the inotify watcher would call `os.Lstat()` to check if a file + still exists before emitting events. + + This was inconsistent with other platforms and resulted in inconsistent event + reporting (e.g. when a file is quickly removed and re-created), and generally + a source of confusion. It was added in 2013 to fix a memory leak that no + longer exists. + +- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's + not watched ([#460]) + +- inotify: replace epoll() with non-blocking inotify ([#434]) + + Non-blocking inotify was not generally available at the time this library was + written in 2014, but now it is. As a result, the minimum Linux version is + bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster. + +- kqueue: don't check for events every 100ms ([#480]) + + The watcher would wake up every 100ms, even when there was nothing to do. Now + it waits until there is something to do. + +- macos: retry opening files on EINTR ([#475]) + +- kqueue: skip unreadable files ([#479]) + + kqueue requires a file descriptor for every file in a directory; this would + fail if a file was unreadable by the current user. Now these files are simply + skipped. + +- windows: fix renaming a watched directory if the parent is also watched ([#370]) + +- windows: increase buffer size from 4K to 64K ([#485]) + +- windows: close file handle on Remove() ([#288]) + +- kqueue: put pathname in the error if watching a file fails ([#471]) + +- inotify, windows: calling Close() more than once could race ([#465]) + +- kqueue: improve Close() performance ([#233]) + +- all: various documentation additions and clarifications. + +[#233]: https://github.com/fsnotify/fsnotify/pull/233 +[#260]: https://github.com/fsnotify/fsnotify/pull/260 +[#288]: https://github.com/fsnotify/fsnotify/pull/288 +[#370]: https://github.com/fsnotify/fsnotify/pull/370 +[#434]: https://github.com/fsnotify/fsnotify/pull/434 +[#460]: https://github.com/fsnotify/fsnotify/pull/460 +[#463]: https://github.com/fsnotify/fsnotify/pull/463 +[#465]: https://github.com/fsnotify/fsnotify/pull/465 +[#470]: https://github.com/fsnotify/fsnotify/pull/470 +[#471]: https://github.com/fsnotify/fsnotify/pull/471 +[#475]: https://github.com/fsnotify/fsnotify/pull/475 +[#477]: https://github.com/fsnotify/fsnotify/pull/477 +[#479]: https://github.com/fsnotify/fsnotify/pull/479 +[#480]: https://github.com/fsnotify/fsnotify/pull/480 +[#485]: https://github.com/fsnotify/fsnotify/pull/485 + ## [1.5.4] - 2022-04-25 * Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) @@ -40,6 +129,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 [#385](https://github.com/fsnotify/fsnotify/pull/385) * Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) +## [1.4.9] - 2020-03-11 + +* Move example usage to the readme #329. This may resolve #328. + +## [1.4.8] - 2020-03-10 + +* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216) +* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265) +* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266) +* CI: Less verbosity (@nathany #267) +* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267) +* Tests: Check if channels are closed in the example (@alexeykazakov #244) +* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284) +* CI: Add windows to travis matrix (@cpuguy83 #284) +* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93) +* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219) +* Linux: open files with close-on-exec (@linxiulei #273) +* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 ) +* Project: Add go.mod (@nathany #309) +* Project: Revise editor config (@nathany #309) +* Project: Update copyright for 2019 (@nathany #309) +* CI: Drop go1.8 from CI matrix (@nathany #309) +* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e ) + ## [1.4.7] - 2018-01-09 * BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index 8a642563..ea379759 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,60 +1,26 @@ -# Contributing +Thank you for your interest in contributing to fsnotify! We try to review and +merge PRs in a reasonable timeframe, but please be aware that: -## Issues +- To avoid "wasted" work, please discus changes on the issue tracker first. You + can just send PRs, but they may end up being rejected for one reason or the + other. -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. +- fsnotify is a cross-platform library, and changes must work reasonably well on + all supported platforms. -## Pull Requests +- Changes will need to be compatible; old code should still compile, and the + runtime behaviour can't change in ways that are likely to lead to problems for + users. -### Contributor License Agreement +Testing +------- +Just `go test ./...` runs all the tests; the CI runs this on all supported +platforms. Testing different platforms locally can be done with something like +[goon] or [Vagrant], but this isn't super-easy to set up at the moment. -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). +Use the `-short` flag to make the "stress test" run faster. -Please indicate that you have signed the CLA in your pull request. -### How fsnotify is Developed - -* Development is done on feature branches. -* Tests are run on BSD, Linux, macOS and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork - -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. - -1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) - -Contribute upstream: - -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub - -This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). - -### Testing - -fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. - -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. - -### Maintainers - -Help maintaining fsnotify is welcome. To be a maintainer: - -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. - -All code changes should be internal pull requests. - -Releases are tagged using [Semantic Versioning](http://semver.org/). +[goon]: https://github.com/arp242/goon +[Vagrant]: https://www.vagrantup.com/ +[integration_test.go]: /integration_test.go diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE index e180c8fb..fb03ade7 100644 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -1,28 +1,25 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. +Copyright © 2012 The Go Authors. All rights reserved. +Copyright © fsnotify Authors. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of Google Inc. nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index 0731c5ef..d4e6080f 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -1,120 +1,161 @@ -# File system notifications for Go +fsnotify is a Go library to provide cross-platform filesystem notifications on +Windows, Linux, macOS, and BSD systems. -[![Go Reference](https://pkg.go.dev/badge/github.com/fsnotify/fsnotify.svg)](https://pkg.go.dev/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [![Maintainers Wanted](https://img.shields.io/badge/maintainers-wanted-red.svg)](https://github.com/fsnotify/fsnotify/issues/413) +Go 1.16 or newer is required; the full documentation is at +https://pkg.go.dev/github.com/fsnotify/fsnotify -fsnotify utilizes [`golang.org/x/sys`](https://pkg.go.dev/golang.org/x/sys) rather than [`syscall`](https://pkg.go.dev/syscall) from the standard library. +**It's best to read the documentation at pkg.go.dev, as it's pinned to the last +released version, whereas this README is for the last development version which +may include additions/changes.** -Cross platform: Windows, Linux, BSD and macOS. +--- -| Adapter | OS | Status | -| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported | -| kqueue | BSD, macOS, iOS\* | Supported | -| ReadDirectoryChangesW | Windows | Supported | -| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | -| fanotify | Linux 2.6.37+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | -| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | -| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | +Platform support: -\* Android and iOS are untested. +| Adapter | OS | Status | +| --------------------- | ---------------| -------------------------------------------------------------| +| inotify | Linux 2.6.32+ | Supported | +| kqueue | BSD, macOS | Supported | +| ReadDirectoryChangesW | Windows | Supported | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | +| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | -Please see [the documentation](https://pkg.go.dev/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. +Linux and macOS should include Android and iOS, but these are currently untested. -## API stability - -fsnotify is a fork of [howeyc/fsnotify](https://github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). - -All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). - -## Usage +Usage +----- +A basic example: ```go package main import ( - "log" + "log" - "github.com/fsnotify/fsnotify" + "github.com/fsnotify/fsnotify" ) func main() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - done := make(chan bool) - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() - - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done + // Create new watcher. + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + // Start listening for events. + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Has(fsnotify.Write) { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + // Add a path. + err = watcher.Add("/tmp") + if err != nil { + log.Fatal(err) + } + + // Block main goroutine forever. + <-make(chan struct{}) } ``` -## Contributing +Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be +run with: -Please refer to [CONTRIBUTING][] before opening an issue or pull request. + % go run ./cmd/fsnotify -## FAQ +FAQ +--- +### Will a file still be watched when it's moved to another directory? +No, not unless you are watching the location it was moved to. -**When a file is moved to another directory is it still being watched?** +### Are subdirectories watched too? +No, you must add watches for any directory you want to watch (a recursive +watcher is on the roadmap: [#18]). -No (it shouldn't be, unless you are watching where it was moved to). +[#18]: https://github.com/fsnotify/fsnotify/issues/18 -**When I watch a directory, are all subdirectories watched as well?** +### Do I have to watch the Error and Event channels in a goroutine? +As of now, yes (you can read both channels in the same goroutine using `select`, +you don't need a separate goroutine for both channels; see the example). -No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). +### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? +fsnotify requires support from underlying OS to work. The current NFS and SMB +protocols does not provide network level support for file notifications, and +neither do the /proc and /sys virtual filesystems. -**Do I have to watch the Error and Event channels in a separate goroutine?** +This could be fixed with a polling watcher ([#9]), but it's not yet implemented. -As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) +[#9]: https://github.com/fsnotify/fsnotify/issues/9 -**Why am I receiving multiple events for the same file on OS X?** +Platform-specific notes +----------------------- +### Linux +When a file is removed a REMOVE event won't be emitted until all file +descriptors are closed; it will emit a CHMOD instead: -Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + fp := os.Open("file") + os.Remove("file") // CHMOD + fp.Close() // REMOVE -**How many files can be watched at once?** +This is the event that inotify sends, so not much can be changed about this. -There are OS-specific limits as to how many watches can be created: -* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. -* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. +The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for +the number of watches per user, and `fs.inotify.max_user_instances` specifies +the maximum number of inotify instances per user. Every Watcher you create is an +"instance", and every path you add is a "watch". -**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** +These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and +`/proc/sys/fs/inotify/max_user_instances` -fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. +To increase them you can use `sysctl` or write the value to proc file: -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#18]: https://github.com/fsnotify/fsnotify/issues/18 -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#7]: https://github.com/howeyc/fsnotify/issues/7 + # The default values on Linux 5.18 + sysctl fs.inotify.max_user_watches=124983 + sysctl fs.inotify.max_user_instances=128 + +To make the changes persist on reboot edit `/etc/sysctl.conf` or +`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your +distro's documentation): + + fs.inotify.max_user_watches=124983 + fs.inotify.max_user_instances=128 -[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md +Reaching the limit will result in a "no space left on device" or "too many open +files" error. -## Related Projects +### kqueue (macOS, all BSD systems) +kqueue requires opening a file descriptor for every file that's being watched; +so if you're watching a directory with five files then that's six file +descriptors. You will run in to your system's "max open files" limit faster on +these platforms. -* [notify](https://github.com/rjeczalik/notify) -* [fsevents](https://github.com/fsnotify/fsevents) +The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to +control the maximum number of open files. +### macOS +Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary +workaround is to add your folder(s) to the *Spotlight Privacy settings* until we +have a native FSEvents implementation (see [#11]). + +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#15]: https://github.com/fsnotify/fsnotify/issues/15 diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go new file mode 100644 index 00000000..1a95ad8e --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -0,0 +1,162 @@ +//go:build solaris +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go new file mode 100644 index 00000000..54c77fbb --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -0,0 +1,459 @@ +//go:build linux +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + // Store fd here as os.File.Read() will no longer return on close after + // calling Fd(). See: https://github.com/golang/go/issues/26439 + fd int + mu sync.Mutex // Map access + inotifyFile *os.File + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + // Need to set the FD to nonblocking mode in order for SetDeadline methods to work + // Otherwise, blocking i/o operations won't terminate on close + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) + if fd == -1 { + return nil, errno + } + + w := &Watcher{ + fd: fd, + inotifyFile: os.NewFile(uintptr(fd), ""), + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + return false + } +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed() { + w.mu.Unlock() + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + w.mu.Unlock() + + // Causes any blocking reads to return with an error, provided the file + // still supports deadline operations. + err := w.inotifyFile.Close() + if err != nil { + return err + } + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case; + // The only two possible errors are: + // + // - EBADF, which happens when w.fd is not a valid file descriptor + // of any kind. + // - EINVAL, which is when fd is not an inotify descriptor or wd + // is not a valid watch descriptor. Watch descriptors are + // invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they + // are watching is deleted. + return errno + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + defer func() { + close(w.doneResp) + close(w.Errors) + close(w.Events) + }() + + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + errno error // Syscall errno + ) + for { + // See if we have been closed. + if w.isClosed() { + return + } + + n, err := w.inotifyFile.Read(buf[:]) + switch { + case errors.Unwrap(err) == os.ErrClosed: + return + case err != nil: + if !w.sendError(err) { + return + } + continue + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + if !w.sendError(err) { + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + var ( + // Point "raw" to the event in the buffer + raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + mask = uint32(raw.Mask) + nameLen = uint32(raw.Len) + ) + + if mask&unix.IN_Q_OVERFLOW != 0 { + if !w.sendError(ErrEventOverflow) { + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := w.newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if mask&unix.IN_IGNORED == 0 { + if !w.sendEvent(event) { + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go new file mode 100644 index 00000000..29087469 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -0,0 +1,707 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + done chan struct{} + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing. + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Watched file descriptors (key: path). + watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). + userWatches map[string]struct{} // Watches added with Watcher.Add() + dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. + paths map[int]pathInfo // File descriptors to path names for processing kqueue events. + fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + kq, closepipe, err := newKqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + closepipe: closepipe, + watches: make(map[string]int), + watchesByDir: make(map[string]map[int]struct{}), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]struct{}), + userWatches: make(map[string]struct{}), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// newKqueue creates a new kernel event queue and returns a descriptor. +// +// This registers a new event on closepipe, which will trigger an event when +// it's closed. This way we can use kevent() without timeout/polling; without +// the closepipe, it would block forever and we wouldn't be able to stop it at +// all. +func newKqueue() (kq int, closepipe [2]int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, closepipe, err + } + + // Register the close pipe. + err = unix.Pipe(closepipe[:]) + if err != nil { + unix.Close(kq) + return kq, closepipe, err + } + + // Register changes to listen on the closepipe. + changes := make([]unix.Kevent_t, 1) + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, + unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) + + ok, err := unix.Kevent(kq, changes, nil, nil) + if ok == -1 { + unix.Close(kq) + unix.Close(closepipe[0]) + unix.Close(closepipe[1]) + return kq, closepipe, err + } + return kq, closepipe, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + pathsToRemove := make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() // Unlock before calling Remove, which also locks + for _, name := range pathsToRemove { + w.Remove(name) + } + + // Send "quit" message to the reader goroutine. + unix.Close(w.closepipe[1]) + close(w.done) + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.userWatches[name] = struct{}{} + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + if err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.userWatches, name) + + parentName := filepath.Dir(name) + delete(w.watchesByDir[parentName], watchfd) + + if len(w.watchesByDir[parentName]) == 0 { + delete(w.watchesByDir, parentName) + } + + delete(w.paths, watchfd) + delete(w.dirFlags, name) + delete(w.fileExists, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for fd := range w.watchesByDir[name] { + path := w.paths[fd] + if _, ok := w.userWatches[path.name]; !ok { + pathsToRemove = append(pathsToRemove, path.name) + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.userWatches)) + for pathname := range w.userWatches { + entries = append(entries, pathname) + } + + return entries +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets or named pipes + if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { + return "", nil + } + + // Follow Symlinks + // + // Linux can add unresolvable symlinks to the watch list without issue, + // and Windows can't do symlinks period. To maintain consistency, we + // will act like everything is fine if the link can't be resolved. + // There will simply be no file events for broken symlinks. Hence the + // returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + // Retry on EINTR; open() can return EINTR in practice on macOS. + // See #354, and go issues 11180 and 39237. + for { + watchfd, err = unix.Open(name, openMode, 0) + if err == nil { + break + } + if errors.Is(err, unix.EINTR) { + continue + } + + return "", err + } + + isDir = fi.IsDir() + } + + err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + if err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + parentName := filepath.Dir(name) + w.watches[name] = watchfd + + watchesByDir, ok := w.watchesByDir[parentName] + if !ok { + watchesByDir = make(map[int]struct{}, 1) + w.watchesByDir[parentName] = watchesByDir + } + watchesByDir[watchfd] = struct{}{} + + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + defer func() { + err := unix.Close(w.kq) + if err != nil { + w.Errors <- err + } + unix.Close(w.closepipe[0]) + close(w.Events) + close(w.Errors) + }() + + eventBuffer := make([]unix.Kevent_t, 10) + for closed := false; !closed; { + kevents, err := w.read(eventBuffer) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { + closed = true + } + continue + } + + // Flush the events we received to the Events channel + for _, kevent := range kevents { + var ( + watchfd = int(kevent.Ident) + mask = uint32(kevent.Fflags) + ) + + // Shut down the loop when the pipe is closed, but only after all + // other events have been processed. + if watchfd == w.closepipe[0] { + closed = true + continue + } + + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + + event := w.newEvent(path.name, mask) + + if path.isDir && !event.Has(Remove) { + // Double check to make sure the directory exists. This can + // happen when we do a rm -fr on a recursively watched folders + // and we receive a modification event first but the folder has + // been deleted and later receive the delete event. + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + event.Op |= Remove + } + } + + if event.Has(Rename) || event.Has(Remove) { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Has(Write) && !event.Has(Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + if !w.sendEvent(event) { + closed = true + continue + } + } + + if event.Has(Remove) { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + } + } +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + path := filepath.Join(dirPath, fileInfo.Name()) + + cleanPath, err := w.internalWatch(path, fileInfo) + if err != nil { + // No permission to read the file; that's not a problem: just skip. + // But do add it to w.fileExists to prevent it from being picked up + // as a "new" file later (it still shows up in the directory + // listing). + switch { + case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): + cleanPath = filepath.Clean(path) + default: + return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err) + } + } + + w.mu.Lock() + w.fileExists[cleanPath] = struct{}{} + w.mu.Unlock() + } + + return nil +} + +// Search the directory for new files and send an event for them. +// +// This functionality is to have the BSD watcher match the inotify, which sends +// a create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dir string) { + // Get all files + files, err := ioutil.ReadDir(dir) + if err != nil { + if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) { + return + } + } + + // Search for new files + for _, fi := range files { + err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + if !w.sendEvent(Event{Name: filePath, Op: Create}) { + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = struct{}{} + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// Register events with the queue. +func (w *Watcher) register(fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + for i, fd := range fds { + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // Register the events. + success, err := unix.Kevent(w.kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(w.kq, nil, events, nil) + if err != nil { + return nil, err + } + return events[0:n], nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go new file mode 100644 index 00000000..a9bb1c3c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -0,0 +1,66 @@ +//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows + +package fsnotify + +import ( + "fmt" + "runtime" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct{} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go new file mode 100644 index 00000000..ae392867 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -0,0 +1,746 @@ +//go:build windows +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + port windows.Handle // Handle to completion port + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error + + mu sync.Mutex // Protects access to watches, isClosed + watches watchMap // Map of watches (key: i-number) + isClosed bool // Set to true when Close() is first called +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) + if err != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", err) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + + event := w.newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.quit: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + w.mu.Unlock() + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return errors.New("watcher already closed") + } + w.mu.Unlock() + + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for _, entry := range w.watches { + for _, watchEntry := range entry { + entries = append(entries, watchEntry.path) + } + } + + return entries +} + +// These options are from the old golang.org/x/exp/winfsnotify, where you could +// add various options to the watch. This has long since been removed. +// +// The "sys" in the name is misleading as they're not part of any "system". +// +// This should all be removed at some point, and just use windows.FILE_NOTIFY_* +const ( + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + sysFSIGNORED = 0x8000 +) + +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle windows.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov windows.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [65536]byte // 64K buffer +} + +type ( + indexMap map[uint64]*watch + watchMap map[uint32]indexMap +) + +func (w *Watcher) wakeupReader() error { + err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if err != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", err) + } + return nil +} + +func (w *Watcher) getDir(pathname string) (dir string, err error) { + attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) + if err != nil { + return "", os.NewSyscallError("GetFileAttributes", err) + } + if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func (w *Watcher) getIno(path string) (ino *inode, err error) { + h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), + windows.FILE_LIST_DIRECTORY, + windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, + nil, windows.OPEN_EXISTING, + windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) + if err != nil { + return nil, os.NewSyscallError("CreateFile", err) + } + + var fi windows.ByHandleFileInformation + err = windows.GetFileInformationByHandle(h, &fi) + if err != nil { + windows.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", err) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + + ino, err := w.getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) + if err != nil { + windows.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", err) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + windows.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + + err = w.startRead(watchEntry) + if err != nil { + return err + } + + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + ino, err := w.getIno(dir) + if err != nil { + return err + } + + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + + err = windows.CloseHandle(ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + if watch == nil { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + err := windows.CancelIo(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CancelIo", err)) + w.deleteWatch(watch) + } + mask := w.toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= w.toWindowsFlags(m) + } + if mask == 0 { + err := windows.CloseHandle(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + + rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if rdErr != nil { + err := os.NewSyscallError("ReadDirectoryChanges", rdErr) + if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n uint32 + key uintptr + ov *windows.Overlapped + ) + runtime.LockOSThread() + + for { + qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) + // This error is handled after the watch == nil check below. NOTE: this + // seems odd, note sure if it's correct. + + watch := (*watch)(unsafe.Pointer(ov)) + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + + err := windows.CloseHandle(w.port) + if err != nil { + err = os.NewSyscallError("CloseHandle", err) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch qErr { + case windows.ERROR_MORE_DATA: + if watch == nil { + w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case windows.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case windows.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.sendError(errors.New("short read in readEvents()")) + break + } + + // Point "raw" to the event in the buffer + raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + + // Create a buf that is the size of the path name + size := int(raw.FileNameLength / 2) + var buf []uint16 + // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 + sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) + sh.Len = size + sh.Cap = size + name := windows.UTF16ToString(buf) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case windows.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case windows.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case windows.FILE_ACTION_RENAMED_NEW_NAME: + // Update saved path of all sub-watches. + old := filepath.Join(watch.path, watch.rename) + w.mu.Lock() + for _, watchMap := range w.watches { + for _, ww := range watchMap { + if strings.HasPrefix(ww.path, old) { + ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) + } + } + } + w.mu.Unlock() + + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + w.sendEvent(fullname, watch.names[name]&mask) + } + if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == windows.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.sendError(errors.New( + "Windows system assumed buffer larger than it is, events have likely been missed.")) + break + } + } + + if err := w.startRead(watch); err != nil { + w.sendError(err) + } + } +} + +func (w *Watcher) toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSMODIFY != 0 { + m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { + switch action { + case windows.FILE_ACTION_ADDED: + return sysFSCREATE + case windows.FILE_ACTION_REMOVED: + return sysFSDELETE + case windows.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case windows.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go deleted file mode 100644 index b3ac3d8f..00000000 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build solaris -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 0f4ee52e..30a5bf0f 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -1,29 +1,37 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build !plan9 // +build !plan9 -// Package fsnotify provides a platform-independent interface for file system notifications. +// Package fsnotify provides a cross-platform interface for file system +// notifications. package fsnotify import ( - "bytes" "errors" "fmt" + "strings" ) -// Event represents a single file system notification. +// Event represents a file system notification. type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. + // Path to the file or directory. + // + // Paths are relative to the input; for example with Add("dir") the Name + // will be set to "dir/file" if you create that file, but if you use + // Add("/path/to/dir") it will be "/path/to/dir/file". + Name string + + // File operation that triggered the event. + // + // This is a bitmask and some systems may send multiple operations at once. + // Use the Event.Has() method instead of comparing with ==. + Op Op } // Op describes a set of file operations. type Op uint32 -// These are the generalized file operations that can trigger a notification. +// The operations fsnotify can trigger; see the documentation on [Watcher] for a +// full description, and check them with [Event.Has]. const ( Create Op = 1 << iota Write @@ -32,38 +40,42 @@ const ( Chmod ) -func (op Op) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer +// Common errors that can be reported by a watcher +var ( + ErrNonExistentWatch = errors.New("can't remove non-existent watcher") + ErrEventOverflow = errors.New("fsnotify queue overflow") +) - if op&Create == Create { - buffer.WriteString("|CREATE") +func (op Op) String() string { + var b strings.Builder + if op.Has(Create) { + b.WriteString("|CREATE") } - if op&Remove == Remove { - buffer.WriteString("|REMOVE") + if op.Has(Remove) { + b.WriteString("|REMOVE") } - if op&Write == Write { - buffer.WriteString("|WRITE") + if op.Has(Write) { + b.WriteString("|WRITE") } - if op&Rename == Rename { - buffer.WriteString("|RENAME") + if op.Has(Rename) { + b.WriteString("|RENAME") } - if op&Chmod == Chmod { - buffer.WriteString("|CHMOD") + if op.Has(Chmod) { + b.WriteString("|CHMOD") } - if buffer.Len() == 0 { - return "" + if b.Len() == 0 { + return "[no events]" } - return buffer.String()[1:] // Strip leading pipe + return b.String()[1:] } -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." +// Has reports if this operation has the given operation. +func (o Op) Has(h Op) bool { return o&h == h } + +// Has reports if this event has the given operation. +func (e Event) Has(op Op) bool { return e.Op.Has(op) } + +// String returns a string representation of the event with their path. func (e Event) String() string { - return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) + return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } - -// Common errors that can be reported by a watcher -var ( - ErrEventOverflow = errors.New("fsnotify queue overflow") -) diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go b/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go deleted file mode 100644 index 59688559..00000000 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows -// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -package fsnotify - -import ( - "fmt" - "runtime" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct{} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go deleted file mode 100644 index a6d0e0ec..00000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - unix.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - - return nil -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer unix.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = unix.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == unix.EINTR { - continue - } - - // unix.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - - if mask&unix.IN_Q_OVERFLOW != 0 { - select { - case w.Errors <- ErrEventOverflow: - case <-w.done: - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&unix.IN_IGNORED == unix.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go deleted file mode 100644 index b572a37c..00000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - - // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := unix.EpollEvent{ - Fd: int32(poller.fd), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = unix.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]unix.EpollEvent, 7) - for { - n, errno := unix.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == unix.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&unix.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let unix.Read pick up the error. - epollerr = true - } - if event.Events&unix.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&unix.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&unix.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := unix.Write(poller.pipe[1], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := unix.Read(poller.pipe[0], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - unix.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - unix.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - unix.Close(poller.epfd) - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go deleted file mode 100644 index 6fb8d853..00000000 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ /dev/null @@ -1,535 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - var pathsToRemove = make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() - // unlock before calling Remove, which also locks - - for _, name := range pathsToRemove { - w.Remove(name) - } - - // send a "quit" message to the reader goroutine - close(w.done) - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = unix.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return "", nil - } - - // Don't watch named pipes. - if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return "", nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - watchfd, err = unix.Open(name, openMode, 0700) - if watchfd == -1 { - return "", err - } - - isDir = fi.IsDir() - } - - const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]unix.Kevent_t, 10) - -loop: - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - break loop - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - select { - case w.Errors <- err: - case <-w.done: - break loop - } - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel. - select { - case w.Events <- event: - case <-w.done: - break loop - } - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } - - // cleanup - err := unix.Close(w.kq) - if err != nil { - // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. - select { - case w.Errors <- err: - default: - } - } - close(w.Events) - close(w.Errors) -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - select { - case w.Errors <- err: - case <-w.done: - return - } - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - err := w.sendFileCreatedEventIfNew(filePath, fileInfo) - - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - select { - case w.Events <- newCreateEvent(filePath): - case <-w.done: - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := unix.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) unix.Timespec { - return unix.NsecToTimespec(d.Nanoseconds()) -} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh new file mode 100644 index 00000000..b09ef768 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh @@ -0,0 +1,208 @@ +#!/usr/bin/env zsh +[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 +setopt err_exit no_unset pipefail extended_glob + +# Simple script to update the godoc comments on all watchers. Probably took me +# more time to write this than doing it manually, but ah well 🙃 + +watcher=$(</tmp/x + print -r -- $cmt >>/tmp/x + tail -n+$(( end + 1 )) $file >>/tmp/x + mv /tmp/x $file + done +} + +set-cmt '^type Watcher struct ' $watcher +set-cmt '^func NewWatcher(' $new +set-cmt '^func (w \*Watcher) Add(' $add +set-cmt '^func (w \*Watcher) Remove(' $remove +set-cmt '^func (w \*Watcher) Close(' $close +set-cmt '^func (w \*Watcher) WatchList(' $watchlist +set-cmt '^[[:space:]]*Events *chan Event$' $events +set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go similarity index 57% rename from vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go rename to vendor/github.com/fsnotify/fsnotify/system_bsd.go index 36cc3845..4322b0b8 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,7 +1,3 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build freebsd || openbsd || netbsd || dragonfly // +build freebsd openbsd netbsd dragonfly diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go similarity index 52% rename from vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go rename to vendor/github.com/fsnotify/fsnotify/system_darwin.go index 98cd8476..5da5ffa7 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,7 +1,3 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build darwin // +build darwin diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go deleted file mode 100644 index 02ce7deb..00000000 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ /dev/null @@ -1,586 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "reflect" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for _, entry := range w.watches { - for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) - } - } - - return entries -} - -const ( - // Options for AddWatch - sysFSONESHOT = 0x80000000 - sysFSONLYDIR = 0x1000000 - - // Events - sysFSACCESS = 0x1 - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCLOSE = 0x18 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - - // Special events - sysFSIGNORED = 0x8000 - sysFSQOVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sysFSONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sysFSQOVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - // TODO: Consider using unsafe.Slice that is available from go1.17 - // https://stackoverflow.com/questions/51187973/how-to-create-an-array-or-a-slice-from-an-array-unsafe-pointer-in-golang - // instead of using a fixed syscall.MAX_PATH buf, we create a buf that is the size of the path name - size := int(raw.FileNameLength / 2) - var buf []uint16 - sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) - sh.Len = size - sh.Cap = size - name := syscall.UTF16ToString(buf) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case syscall.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sysFSONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sysFSMODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sysFSCREATE - case syscall.FILE_ACTION_REMOVED: - return sysFSDELETE - case syscall.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go index 9d2d8a4b..06a91f08 100644 --- a/vendor/github.com/inconshreveable/mousetrap/trap_others.go +++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package mousetrap diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go index 336142a5..0c568802 100644 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go @@ -1,81 +1,32 @@ -// +build windows -// +build !go1.4 - package mousetrap import ( - "fmt" - "os" "syscall" "unsafe" ) -const ( - // defined by the Win32 API - th32cs_snapprocess uintptr = 0x2 -) - -var ( - kernel = syscall.MustLoadDLL("kernel32.dll") - CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") - Process32First = kernel.MustFindProc("Process32FirstW") - Process32Next = kernel.MustFindProc("Process32NextW") -) - -// ProcessEntry32 structure defined by the Win32 API -type processEntry32 struct { - dwSize uint32 - cntUsage uint32 - th32ProcessID uint32 - th32DefaultHeapID int - th32ModuleID uint32 - cntThreads uint32 - th32ParentProcessID uint32 - pcPriClassBase int32 - dwFlags uint32 - szExeFile [syscall.MAX_PATH]uint16 -} - -func getProcessEntry(pid int) (pe *processEntry32, err error) { - snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) - if snapshot == uintptr(syscall.InvalidHandle) { - err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) - return +func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { + snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) + if err != nil { + return nil, err } - defer syscall.CloseHandle(syscall.Handle(snapshot)) - - var processEntry processEntry32 - processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) - ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32First: %v", e1) - return + defer syscall.CloseHandle(snapshot) + var procEntry syscall.ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = syscall.Process32First(snapshot, &procEntry); err != nil { + return nil, err } - for { - if processEntry.th32ProcessID == uint32(pid) { - pe = &processEntry - return + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil } - - ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32Next: %v", e1) - return + err = syscall.Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err } } } -func getppid() (pid int, err error) { - pe, err := getProcessEntry(os.Getpid()) - if err != nil { - return - } - - pid = int(pe.th32ParentProcessID) - return -} - // StartedByExplorer returns true if the program was invoked by the user double-clicking // on the executable from explorer.exe // @@ -83,16 +34,9 @@ func getppid() (pid int, err error) { // It does not guarantee that the program was run from a terminal. It only can tell you // whether it was launched from explorer.exe func StartedByExplorer() bool { - ppid, err := getppid() + pe, err := getProcessEntry(syscall.Getppid()) if err != nil { return false } - - pe, err := getProcessEntry(ppid) - if err != nil { - return false - } - - name := syscall.UTF16ToString(pe.szExeFile[:]) - return name == "explorer.exe" + return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) } diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go deleted file mode 100644 index 9a28e57c..00000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build windows -// +build go1.4 - -package mousetrap - -import ( - "os" - "syscall" - "unsafe" -) - -func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { - snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(snapshot) - var procEntry syscall.ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = syscall.Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = syscall.Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - pe, err := getProcessEntry(os.Getppid()) - if err != nil { - return false - } - return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) -} diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 439d3e1d..2578d94b 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -1,4 +1,4 @@ -# Copyright 2013-2022 The Cobra Authors +# Copyright 2013-2023 The Cobra Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile index c433a01b..0da8d7aa 100644 --- a/vendor/github.com/spf13/cobra/Makefile +++ b/vendor/github.com/spf13/cobra/Makefile @@ -5,10 +5,6 @@ ifeq (, $(shell which golangci-lint)) $(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh") endif -ifeq (, $(shell which richgo)) -$(warning "could not find richgo in $(PATH), run: go install github.com/kyoh86/richgo@latest") -endif - .PHONY: fmt lint test install_deps clean default: all @@ -25,6 +21,10 @@ lint: test: install_deps $(info ******************** running tests ********************) + go test -v ./... + +richtest: install_deps + $(info ******************** running tests with kyoh86/richgo ********************) richgo test -v ./... install_deps: diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 7cc726be..592c0b8a 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -1,4 +1,4 @@ -![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) +![cobra logo](assets/CobraMain.png) Cobra is a library for creating powerful modern CLI applications. @@ -6,7 +6,7 @@ Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), [Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. -[![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) +[![](https://img.shields.io/github/actions/workflow/status/spf13/cobra/test.yml?branch=main&longCache=true&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) [![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) [![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go index 95e03aec..2d023943 100644 --- a/vendor/github.com/spf13/cobra/active_help.go +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index 2c1f99e7..e79ec33a 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ import ( type PositionalArgs func(cmd *Command, args []string) error -// Legacy arg validation has the following behaviour: +// legacyArgs validation has the following behaviour: // - root commands with no subcommands can take arbitrary arguments // - root commands with subcommands will do subcommand validity checking // - subcommands will always accept arbitrary arguments diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 3acdb279..10c78847 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -532,7 +532,7 @@ func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) { } } -// Setup annotations for go completions for registered flags +// prepareCustomAnnotationsForFlags setup annotations for go completions for registered flags func prepareCustomAnnotationsForFlags(cmd *Command) { flagCompletionMutex.RLock() defer flagCompletionMutex.RUnlock() diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go index bb4b7189..19b09560 100644 --- a/vendor/github.com/spf13/cobra/bash_completionsV2.go +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -38,7 +38,7 @@ func genBashComp(buf io.StringWriter, name string, includeDesc bool) { __%[1]s_debug() { - if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then + if [[ -n ${BASH_COMP_DEBUG_FILE-} ]]; then echo "$*" >> "${BASH_COMP_DEBUG_FILE}" fi } @@ -65,7 +65,7 @@ __%[1]s_get_completion_results() { lastChar=${lastParam:$((${#lastParam}-1)):1} __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}" - if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then + if [[ -z ${cur} && ${lastChar} != = ]]; then # If the last parameter is complete (there is a space following it) # We add an extra empty parameter so we can indicate this to the go method. __%[1]s_debug "Adding extra empty parameter" @@ -75,7 +75,7 @@ __%[1]s_get_completion_results() { # When completing a flag with an = (e.g., %[1]s -n=) # bash focuses on the part after the =, so we need to remove # the flag part from $cur - if [[ "${cur}" == -*=* ]]; then + if [[ ${cur} == -*=* ]]; then cur="${cur#*=}" fi @@ -87,7 +87,7 @@ __%[1]s_get_completion_results() { directive=${out##*:} # Remove the directive out=${out%%:*} - if [ "${directive}" = "${out}" ]; then + if [[ ${directive} == "${out}" ]]; then # There is not directive specified directive=0 fi @@ -101,22 +101,36 @@ __%[1]s_process_completion_results() { local shellCompDirectiveNoFileComp=%[5]d local shellCompDirectiveFilterFileExt=%[6]d local shellCompDirectiveFilterDirs=%[7]d + local shellCompDirectiveKeepOrder=%[8]d - if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then + if (((directive & shellCompDirectiveError) != 0)); then # Error code. No completion. __%[1]s_debug "Received error from custom completion go code" return else - if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then + if (((directive & shellCompDirectiveNoSpace) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then __%[1]s_debug "Activating no space" compopt -o nospace else __%[1]s_debug "No space directive not supported in this version of bash" fi fi - if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then + if (((directive & shellCompDirectiveKeepOrder) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then + # no sort isn't supported for bash less than < 4.4 + if [[ ${BASH_VERSINFO[0]} -lt 4 || ( ${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -lt 4 ) ]]; then + __%[1]s_debug "No sort directive not supported in this version of bash" + else + __%[1]s_debug "Activating keep order" + compopt -o nosort + fi + else + __%[1]s_debug "No sort directive not supported in this version of bash" + fi + fi + if (((directive & shellCompDirectiveNoFileComp) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then __%[1]s_debug "Activating no file completion" compopt +o default else @@ -130,7 +144,7 @@ __%[1]s_process_completion_results() { local activeHelp=() __%[1]s_extract_activeHelp - if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then + if (((directive & shellCompDirectiveFilterFileExt) != 0)); then # File extension filtering local fullFilter filter filteringCmd @@ -143,13 +157,12 @@ __%[1]s_process_completion_results() { filteringCmd="_filedir $fullFilter" __%[1]s_debug "File filtering command: $filteringCmd" $filteringCmd - elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then + elif (((directive & shellCompDirectiveFilterDirs) != 0)); then # File completion for directories only - # Use printf to strip any trailing newline local subdir - subdir=$(printf "%%s" "${completions[0]}") - if [ -n "$subdir" ]; then + subdir=${completions[0]} + if [[ -n $subdir ]]; then __%[1]s_debug "Listing directories in $subdir" pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return else @@ -164,7 +177,7 @@ __%[1]s_process_completion_results() { __%[1]s_handle_special_char "$cur" = # Print the activeHelp statements before we finish - if [ ${#activeHelp[*]} -ne 0 ]; then + if ((${#activeHelp[*]} != 0)); then printf "\n"; printf "%%s\n" "${activeHelp[@]}" printf "\n" @@ -184,21 +197,21 @@ __%[1]s_process_completion_results() { # Separate activeHelp lines from real completions. # Fills the $activeHelp and $completions arrays. __%[1]s_extract_activeHelp() { - local activeHelpMarker="%[8]s" + local activeHelpMarker="%[9]s" local endIndex=${#activeHelpMarker} while IFS='' read -r comp; do - if [ "${comp:0:endIndex}" = "$activeHelpMarker" ]; then + if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then comp=${comp:endIndex} __%[1]s_debug "ActiveHelp found: $comp" - if [ -n "$comp" ]; then + if [[ -n $comp ]]; then activeHelp+=("$comp") fi else # Not an activeHelp line but a normal completion completions+=("$comp") fi - done < <(printf "%%s\n" "${out}") + done <<<"${out}" } __%[1]s_handle_completion_types() { @@ -254,7 +267,7 @@ __%[1]s_handle_standard_completion_case() { done < <(printf "%%s\n" "${completions[@]}") # If there is a single completion left, remove the description text - if [ ${#COMPREPLY[*]} -eq 1 ]; then + if ((${#COMPREPLY[*]} == 1)); then __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" comp="${COMPREPLY[0]%%%%$tab*}" __%[1]s_debug "Removed description from single completion, which is now: ${comp}" @@ -271,8 +284,8 @@ __%[1]s_handle_special_char() if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then local word=${comp%%"${comp##*${char}}"} local idx=${#COMPREPLY[*]} - while [[ $((--idx)) -ge 0 ]]; do - COMPREPLY[$idx]=${COMPREPLY[$idx]#"$word"} + while ((--idx >= 0)); do + COMPREPLY[idx]=${COMPREPLY[idx]#"$word"} done fi } @@ -298,7 +311,7 @@ __%[1]s_format_comp_descriptions() # Make sure we can fit a description of at least 8 characters # if we are to align the descriptions. - if [[ $maxdesclength -gt 8 ]]; then + if ((maxdesclength > 8)); then # Add the proper number of spaces to align the descriptions for ((i = ${#comp} ; i < longest ; i++)); do comp+=" " @@ -310,8 +323,8 @@ __%[1]s_format_comp_descriptions() # If there is enough space for any description text, # truncate the descriptions that are too long for the shell width - if [ $maxdesclength -gt 0 ]; then - if [ ${#desc} -gt $maxdesclength ]; then + if ((maxdesclength > 0)); then + if ((${#desc} > maxdesclength)); then desc=${desc:0:$(( maxdesclength - 1 ))} desc+="…" fi @@ -332,9 +345,9 @@ __start_%[1]s() # Call _init_completion from the bash-completion package # to prepare the arguments properly if declare -F _init_completion >/dev/null 2>&1; then - _init_completion -n "=:" || return + _init_completion -n =: || return else - __%[1]s_init_completion -n "=:" || return + __%[1]s_init_completion -n =: || return fi __%[1]s_debug @@ -361,7 +374,7 @@ fi # ex: ts=4 sw=4 et filetype=sh `, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpMarker)) } diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index fe44bc8a..b07b44a0 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -167,8 +167,8 @@ func appendIfNotPresent(s, stringToAppend string) string { // rpad adds padding to the right of a string. func rpad(s string, padding int) string { - template := fmt.Sprintf("%%-%ds", padding) - return fmt.Sprintf(template, s) + formattedString := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(formattedString, s) } // tmpl executes the given template text on data, writing the result to w. diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 9d5e9cf5..01f7c6f1 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -35,7 +35,7 @@ const FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" // FParseErrWhitelist configures Flag parse errors to be ignored type FParseErrWhitelist flag.ParseErrorsWhitelist -// Structure to manage groups for commands +// Group Structure to manage groups for commands type Group struct { ID string Title string @@ -47,7 +47,7 @@ type Group struct { // definition to ensure usability. type Command struct { // Use is the one-line usage message. - // Recommended syntax is as follow: + // Recommended syntax is as follows: // [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required. // ... indicates that you can specify multiple values for the previous argument. // | indicates mutually exclusive information. You can use the argument to the left of the separator or the @@ -321,7 +321,7 @@ func (c *Command) SetHelpCommand(cmd *Command) { c.helpCommand = cmd } -// SetHelpCommandGroup sets the group id of the help command. +// SetHelpCommandGroupID sets the group id of the help command. func (c *Command) SetHelpCommandGroupID(groupID string) { if c.helpCommand != nil { c.helpCommand.GroupID = groupID @@ -330,7 +330,7 @@ func (c *Command) SetHelpCommandGroupID(groupID string) { c.helpCommandGroupID = groupID } -// SetCompletionCommandGroup sets the group id of the completion command. +// SetCompletionCommandGroupID sets the group id of the completion command. func (c *Command) SetCompletionCommandGroupID(groupID string) { // completionCommandGroupID is used if no completion command is defined by the user c.Root().completionCommandGroupID = groupID @@ -655,20 +655,44 @@ Loop: // argsMinusFirstX removes only the first x from args. Otherwise, commands that look like // openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). -func argsMinusFirstX(args []string, x string) []string { - for i, y := range args { - if x == y { - ret := []string{} - ret = append(ret, args[:i]...) - ret = append(ret, args[i+1:]...) - return ret +// Special care needs to be taken not to remove a flag value. +func (c *Command) argsMinusFirstX(args []string, x string) []string { + if len(args) == 0 { + return args + } + c.mergePersistentFlags() + flags := c.Flags() + +Loop: + for pos := 0; pos < len(args); pos++ { + s := args[pos] + switch { + case s == "--": + // -- means we have reached the end of the parseable args. Break out of the loop now. + break Loop + case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): + fallthrough + case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): + // This is a flag without a default value, and an equal sign is not used. Increment pos in order to skip + // over the next arg, because that is the value of this flag. + pos++ + continue + case !strings.HasPrefix(s, "-"): + // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so, + // return the args, excluding the one at this position. + if s == x { + ret := []string{} + ret = append(ret, args[:pos]...) + ret = append(ret, args[pos+1:]...) + return ret + } } } return args } func isFlagArg(arg string) bool { - return ((len(arg) >= 3 && arg[1] == '-') || + return ((len(arg) >= 3 && arg[0:2] == "--") || (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-')) } @@ -686,7 +710,7 @@ func (c *Command) Find(args []string) (*Command, []string, error) { cmd := c.findNext(nextSubCmd) if cmd != nil { - return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) + return innerfind(cmd, c.argsMinusFirstX(innerArgs, nextSubCmd)) } return c, innerArgs } @@ -998,6 +1022,10 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { // initialize completion at the last point to allow for user overriding c.InitDefaultCompletionCmd() + // Now that all commands have been created, let's make sure all groups + // are properly created also + c.checkCommandGroups() + args := c.args // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 @@ -1092,6 +1120,19 @@ func (c *Command) ValidateRequiredFlags() error { return nil } +// checkCommandGroups checks if a command has been added to a group that does not exists. +// If so, we panic because it indicates a coding error that should be corrected. +func (c *Command) checkCommandGroups() { + for _, sub := range c.commands { + // if Group is not defined let the developer know right away + if sub.GroupID != "" && !c.ContainsGroup(sub.GroupID) { + panic(fmt.Sprintf("group id '%s' is not defined for subcommand '%s'", sub.GroupID, sub.CommandPath())) + } + + sub.checkCommandGroups() + } +} + // InitDefaultHelpFlag adds default help flag to c. // It is called automatically by executing the c or by calling help and usage. // If c already has help flag, it will do nothing. @@ -1218,10 +1259,6 @@ func (c *Command) AddCommand(cmds ...*Command) { panic("Command can't be a child of itself") } cmds[i].parent = c - // if Group is not defined let the developer know right away - if x.GroupID != "" && !c.ContainsGroup(x.GroupID) { - panic(fmt.Sprintf("Group id '%s' is not defined for subcommand '%s'", x.GroupID, cmds[i].CommandPath())) - } // update max lengths usageLen := len(x.Use) if usageLen > c.commandsMaxUseLen { @@ -1259,7 +1296,7 @@ func (c *Command) AllChildCommandsHaveGroup() bool { return true } -// ContainGroups return if groupID exists in the list of command groups. +// ContainsGroup return if groupID exists in the list of command groups. func (c *Command) ContainsGroup(groupID string) bool { for _, x := range c.commandgroups { if x.ID == groupID { diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go index 2b77f8f0..307f0c12 100644 --- a/vendor/github.com/spf13/cobra/command_notwin.go +++ b/vendor/github.com/spf13/cobra/command_notwin.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go index 520f23ab..adbef395 100644 --- a/vendor/github.com/spf13/cobra/command_win.go +++ b/vendor/github.com/spf13/cobra/command_win.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index e8a0206d..ee38c4d0 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -77,6 +77,10 @@ const ( // obtain the same behavior but only for flags. ShellCompDirectiveFilterDirs + // ShellCompDirectiveKeepOrder indicates that the shell should preserve the order + // in which the completions are provided + ShellCompDirectiveKeepOrder + // =========================================================================== // All directives using iota should be above this one. @@ -159,6 +163,9 @@ func (d ShellCompDirective) string() string { if d&ShellCompDirectiveFilterDirs != 0 { directives = append(directives, "ShellCompDirectiveFilterDirs") } + if d&ShellCompDirectiveKeepOrder != 0 { + directives = append(directives, "ShellCompDirectiveKeepOrder") + } if len(directives) == 0 { directives = append(directives, "ShellCompDirectiveDefault") } @@ -169,7 +176,7 @@ func (d ShellCompDirective) string() string { return strings.Join(directives, ", ") } -// Adds a special hidden command that can be used to request custom completions. +// initCompleteCmd adds a special hidden command that can be used to request custom completions. func (c *Command) initCompleteCmd(args []string) { completeCmd := &Command{ Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd), @@ -727,7 +734,7 @@ to enable it. You can execute the following once: To load completions in your current shell session: - source <(%[1]s completion zsh); compdef _%[1]s %[1]s + source <(%[1]s completion zsh) To load completions for every new session, execute once: diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go index 97112a17..12ca0d2b 100644 --- a/vendor/github.com/spf13/cobra/fish_completions.go +++ b/vendor/github.com/spf13/cobra/fish_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -53,7 +53,7 @@ function __%[1]s_perform_completion __%[1]s_debug "last arg: $lastArg" # Disable ActiveHelp which is not supported for fish shell - set -l requestComp "%[9]s=0 $args[1] %[3]s $args[2..-1] $lastArg" + set -l requestComp "%[10]s=0 $args[1] %[3]s $args[2..-1] $lastArg" __%[1]s_debug "Calling $requestComp" set -l results (eval $requestComp 2> /dev/null) @@ -89,6 +89,60 @@ function __%[1]s_perform_completion printf "%%s\n" "$directiveLine" end +# this function limits calls to __%[1]s_perform_completion, by caching the result behind $__%[1]s_perform_completion_once_result +function __%[1]s_perform_completion_once + __%[1]s_debug "Starting __%[1]s_perform_completion_once" + + if test -n "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "Seems like a valid result already exists, skipping __%[1]s_perform_completion" + return 0 + end + + set --global __%[1]s_perform_completion_once_result (__%[1]s_perform_completion) + if test -z "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "No completions, probably due to a failure" + return 1 + end + + __%[1]s_debug "Performed completions and set __%[1]s_perform_completion_once_result" + return 0 +end + +# this function is used to clear the $__%[1]s_perform_completion_once_result variable after completions are run +function __%[1]s_clear_perform_completion_once_result + __%[1]s_debug "" + __%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable ==========" + set --erase __%[1]s_perform_completion_once_result + __%[1]s_debug "Succesfully erased the variable __%[1]s_perform_completion_once_result" +end + +function __%[1]s_requires_order_preservation + __%[1]s_debug "" + __%[1]s_debug "========= checking if order preservation is required ==========" + + __%[1]s_perform_completion_once + if test -z "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "Error determining if order preservation is required" + return 1 + end + + set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1]) + __%[1]s_debug "Directive is: $directive" + + set -l shellCompDirectiveKeepOrder %[9]d + set -l keeporder (math (math --scale 0 $directive / $shellCompDirectiveKeepOrder) %% 2) + __%[1]s_debug "Keeporder is: $keeporder" + + if test $keeporder -ne 0 + __%[1]s_debug "This does require order preservation" + return 0 + end + + __%[1]s_debug "This doesn't require order preservation" + return 1 +end + + # This function does two things: # - Obtain the completions and store them in the global __%[1]s_comp_results # - Return false if file completion should be performed @@ -99,17 +153,17 @@ function __%[1]s_prepare_completions # Start fresh set --erase __%[1]s_comp_results - set -l results (__%[1]s_perform_completion) - __%[1]s_debug "Completion results: $results" + __%[1]s_perform_completion_once + __%[1]s_debug "Completion results: $__%[1]s_perform_completion_once_result" - if test -z "$results" + if test -z "$__%[1]s_perform_completion_once_result" __%[1]s_debug "No completion, probably due to a failure" # Might as well do file completion, in case it helps return 1 end - set -l directive (string sub --start 2 $results[-1]) - set --global __%[1]s_comp_results $results[1..-2] + set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1]) + set --global __%[1]s_comp_results $__%[1]s_perform_completion_once_result[1..-2] __%[1]s_debug "Completions are: $__%[1]s_comp_results" __%[1]s_debug "Directive is: $directive" @@ -205,13 +259,17 @@ end # Remove any pre-existing completions for the program since we will be handling all of them. complete -c %[2]s -e +# this will get called after the two calls below and clear the $__%[1]s_perform_completion_once_result global +complete -c %[2]s -n '__%[1]s_clear_perform_completion_once_result' # The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results # which provides the program's completion choices. -complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' - +# If this doesn't require order preservation, we don't use the -k flag +complete -c %[2]s -n 'not __%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' +# otherwise we use the -k flag +complete -k -c %[2]s -n '__%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' `, nameForVar, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) } // GenFishCompletion generates fish completion file and writes to the passed writer. diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go index 9c377aaf..b35fde15 100644 --- a/vendor/github.com/spf13/cobra/flag_groups.go +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 004de42e..177d2755 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -77,6 +77,7 @@ filter __%[1]s_escapeStringWithSpecialChars { $ShellCompDirectiveNoFileComp=%[6]d $ShellCompDirectiveFilterFileExt=%[7]d $ShellCompDirectiveFilterDirs=%[8]d + $ShellCompDirectiveKeepOrder=%[9]d # Prepare the command to request completions for the program. # Split the command at the first space to separate the program and arguments. @@ -106,13 +107,22 @@ filter __%[1]s_escapeStringWithSpecialChars { # If the last parameter is complete (there is a space following it) # We add an extra empty parameter so we can indicate this to the go method. __%[1]s_debug "Adding extra empty parameter" -`+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+` -`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` + # PowerShell 7.2+ changed the way how the arguments are passed to executables, + # so for pre-7.2 or when Legacy argument passing is enabled we need to use +`+" # `\"`\" to pass an empty argument, a \"\" or '' does not work!!!"+` + if ($PSVersionTable.PsVersion -lt [version]'7.2.0' -or + ($PSVersionTable.PsVersion -lt [version]'7.3.0' -and -not [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -or + (($PSVersionTable.PsVersion -ge [version]'7.3.0' -or [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -and + $PSNativeCommandArgumentPassing -eq 'Legacy')) { +`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` + } else { + $RequestComp="$RequestComp" + ' ""' + } } __%[1]s_debug "Calling $RequestComp" # First disable ActiveHelp which is not supported for Powershell - $env:%[9]s=0 + $env:%[10]s=0 #call the command store the output in $out and redirect stderr and stdout to null # $Out is an array contains each line per element @@ -137,7 +147,7 @@ filter __%[1]s_escapeStringWithSpecialChars { } $Longest = 0 - $Values = $Out | ForEach-Object { + [Array]$Values = $Out | ForEach-Object { #Split the output in name and description `+" $Name, $Description = $_.Split(\"`t\",2)"+` __%[1]s_debug "Name: $Name Description: $Description" @@ -182,6 +192,11 @@ filter __%[1]s_escapeStringWithSpecialChars { } } + # we sort the values in ascending order by name if keep order isn't passed + if (($Directive -band $ShellCompDirectiveKeepOrder) -eq 0 ) { + $Values = $Values | Sort-Object -Property Name + } + if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) { __%[1]s_debug "ShellCompDirectiveNoFileComp is called" @@ -267,7 +282,7 @@ filter __%[1]s_escapeStringWithSpecialChars { Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock $__%[2]sCompleterBlock `, name, nameForVar, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) } func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error { diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md index 6865f88e..8a291eb2 100644 --- a/vendor/github.com/spf13/cobra/projects_using_cobra.md +++ b/vendor/github.com/spf13/cobra/projects_using_cobra.md @@ -1,11 +1,13 @@ ## Projects using Cobra - [Allero](https://github.com/allero-io/allero) +- [Arewefastyet](https://benchmark.vitess.io) - [Arduino CLI](https://github.com/arduino/arduino-cli) - [Bleve](https://blevesearch.com/) - [Cilium](https://cilium.io/) - [CloudQuery](https://github.com/cloudquery/cloudquery) - [CockroachDB](https://www.cockroachlabs.com/) +- [Constellation](https://github.com/edgelesssys/constellation) - [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) - [Datree](https://github.com/datreeio/datree) - [Delve](https://github.com/derekparker/delve) @@ -25,7 +27,7 @@ - [Istio](https://istio.io) - [Kool](https://github.com/kool-dev/kool) - [Kubernetes](https://kubernetes.io/) -- [Kubescape](https://github.com/armosec/kubescape) +- [Kubescape](https://github.com/kubescape/kubescape) - [KubeVirt](https://github.com/kubevirt/kubevirt) - [Linkerd](https://linkerd.io/) - [Mattermost-server](https://github.com/mattermost/mattermost-server) @@ -51,10 +53,12 @@ - [Random](https://github.com/erdaltsksn/random) - [Rclone](https://rclone.org/) - [Scaleway CLI](https://github.com/scaleway/scaleway-cli) +- [Sia](https://github.com/SiaFoundation/siad) - [Skaffold](https://skaffold.dev/) - [Tendermint](https://github.com/tendermint/tendermint) - [Twitch CLI](https://github.com/twitchdev/twitch-cli) - [UpCloud CLI (`upctl`)](https://github.com/UpCloudLtd/upcloud-cli) +- [Vitess](https://vitess.io) - VMware's [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) & [Tanzu Framework](https://github.com/vmware-tanzu/tanzu-framework) - [Werf](https://werf.io/) - [ZITADEL](https://github.com/zitadel/zitadel) diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go index 126e83c3..b035742d 100644 --- a/vendor/github.com/spf13/cobra/shell_completions.go +++ b/vendor/github.com/spf13/cobra/shell_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md index 553ee5df..065c0621 100644 --- a/vendor/github.com/spf13/cobra/shell_completions.md +++ b/vendor/github.com/spf13/cobra/shell_completions.md @@ -71,7 +71,7 @@ PowerShell: `,cmd.Root().Name()), DisableFlagsInUseLine: true, ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, - Args: cobra.ExactValidArgs(1), + Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), Run: func(cmd *cobra.Command, args []string) { switch args[0] { case "bash": @@ -162,16 +162,7 @@ cmd := &cobra.Command{ } ``` -The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by -the completion algorithm if entered manually, e.g. in: - -```bash -$ kubectl get rc [tab][tab] -backend frontend database -``` - -Note that without declaring `rc` as an alias, the completion algorithm would not know to show the list of -replication controllers following `rc`. +The aliases are shown to the user on tab completion only if no completions were found within sub-commands or `ValidArgs`. ### Dynamic completion of nouns @@ -237,6 +228,10 @@ ShellCompDirectiveFilterFileExt // return []string{"themes"}, ShellCompDirectiveFilterDirs // ShellCompDirectiveFilterDirs + +// ShellCompDirectiveKeepOrder indicates that the shell should preserve the order +// in which the completions are provided +ShellCompDirectiveKeepOrder ``` ***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function. @@ -385,6 +380,19 @@ or ```go ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"} ``` + +If you don't want to show descriptions in the completions, you can add `--no-descriptions` to the default `completion` command to disable them, like: + +```bash +$ source <(helm completion bash) +$ helm completion [tab][tab] +bash (generate autocompletion script for bash) powershell (generate autocompletion script for powershell) +fish (generate autocompletion script for fish) zsh (generate autocompletion script for zsh) + +$ source <(helm completion bash --no-descriptions) +$ helm completion [tab][tab] +bash fish powershell zsh +``` ## Bash completions ### Dependencies diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md index 977306aa..85201d84 100644 --- a/vendor/github.com/spf13/cobra/user_guide.md +++ b/vendor/github.com/spf13/cobra/user_guide.md @@ -188,6 +188,37 @@ var versionCmd = &cobra.Command{ } ``` +### Organizing subcommands + +A command may have subcommands which in turn may have other subcommands. This is achieved by using +`AddCommand`. In some cases, especially in larger applications, each subcommand may be defined in +its own go package. + +The suggested approach is for the parent command to use `AddCommand` to add its most immediate +subcommands. For example, consider the following directory structure: + +```text +├── cmd +│   ├── root.go +│   └── sub1 +│   ├── sub1.go +│   └── sub2 +│   ├── leafA.go +│   ├── leafB.go +│   └── sub2.go +└── main.go +``` + +In this case: + +* The `init` function of `root.go` adds the command defined in `sub1.go` to the root command. +* The `init` function of `sub1.go` adds the command defined in `sub2.go` to the sub1 command. +* The `init` function of `sub2.go` adds the commands defined in `leafA.go` and `leafB.go` to the + sub2 command. + +This approach ensures the subcommands are always included at compile time while avoiding cyclic +references. + ### Returning and handling errors If you wish to return an error to the caller of a command, `RunE` can be used. @@ -313,8 +344,8 @@ rootCmd.MarkFlagsRequiredTogether("username", "password") You can also prevent different flags from being provided together if they represent mutually exclusive options such as specifying an output format as either `--json` or `--yaml` but never both: ```go -rootCmd.Flags().BoolVar(&u, "json", false, "Output in JSON") -rootCmd.Flags().BoolVar(&pw, "yaml", false, "Output in YAML") +rootCmd.Flags().BoolVar(&ofJson, "json", false, "Output in JSON") +rootCmd.Flags().BoolVar(&ofYaml, "yaml", false, "Output in YAML") rootCmd.MarkFlagsMutuallyExclusive("json", "yaml") ``` @@ -349,7 +380,7 @@ shown below: ```go var cmd = &cobra.Command{ Short: "hello", - Args: MatchAll(ExactArgs(2), OnlyValidArgs), + Args: cobra.MatchAll(cobra.ExactArgs(2), cobra.OnlyValidArgs), Run: func(cmd *cobra.Command, args []string) { fmt.Println("Hello, World!") }, @@ -492,10 +523,11 @@ around it. In fact, you can provide your own if you want. ### Grouping commands in help -Cobra supports grouping of available commands. Groups must be explicitly defined by `AddGroup` and set by -the `GroupId` element of a subcommand. The groups will appear in the same order as they are defined. -If you use the generated `help` or `completion` commands, you can set the group ids by `SetHelpCommandGroupId` -and `SetCompletionCommandGroupId`, respectively. +Cobra supports grouping of available commands in the help output. To group commands, each group must be explicitly +defined using `AddGroup()` on the parent command. Then a subcommand can be added to a group using the `GroupID` element +of that subcommand. The groups will appear in the help output in the same order as they are defined using different +calls to `AddGroup()`. If you use the generated `help` or `completion` commands, you can set their group ids using +`SetHelpCommandGroupId()` and `SetCompletionCommandGroupId()` on the root command, respectively. ### Defining your own help diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go index 84cec76f..1856e4c7 100644 --- a/vendor/github.com/spf13/cobra/zsh_completions.go +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -90,6 +90,7 @@ func genZshComp(buf io.StringWriter, name string, includeDesc bool) { compCmd = ShellCompNoDescRequestCmd } WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s +compdef _%[1]s %[1]s # zsh completion for %-36[1]s -*- shell-script -*- @@ -108,8 +109,9 @@ _%[1]s() local shellCompDirectiveNoFileComp=%[5]d local shellCompDirectiveFilterFileExt=%[6]d local shellCompDirectiveFilterDirs=%[7]d + local shellCompDirectiveKeepOrder=%[8]d - local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace + local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace keepOrder local -a completions __%[1]s_debug "\n========= starting completion logic ==========" @@ -177,7 +179,7 @@ _%[1]s() return fi - local activeHelpMarker="%[8]s" + local activeHelpMarker="%[9]s" local endIndex=${#activeHelpMarker} local startIndex=$((${#activeHelpMarker}+1)) local hasActiveHelp=0 @@ -227,6 +229,11 @@ _%[1]s() noSpace="-S ''" fi + if [ $((directive & shellCompDirectiveKeepOrder)) -ne 0 ]; then + __%[1]s_debug "Activating keep order." + keepOrder="-V" + fi + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then # File extension filtering local filteringCmd @@ -262,7 +269,7 @@ _%[1]s() return $result else __%[1]s_debug "Calling _describe" - if eval _describe "completions" completions $flagPrefix $noSpace; then + if eval _describe $keepOrder "completions" completions $flagPrefix $noSpace; then __%[1]s_debug "_describe found some completions" # Return the success of having called _describe @@ -296,6 +303,6 @@ if [ "$funcstack[1]" = "_%[1]s" ]; then fi `, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpMarker)) } diff --git a/vendor/go.bytebuilders.dev/license-verifier/info/lib.go b/vendor/go.bytebuilders.dev/license-verifier/info/lib.go index 15d2d73a..12b50db8 100644 --- a/vendor/go.bytebuilders.dev/license-verifier/info/lib.go +++ b/vendor/go.bytebuilders.dev/license-verifier/info/lib.go @@ -31,6 +31,7 @@ import ( "go.bytebuilders.dev/license-verifier/apis/licenses" + "github.com/PuerkitoBio/purell" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" @@ -46,8 +47,9 @@ var ( ProductName string // This has been renamed to Features ProductUID string - prodAddress = "https://byte.builders" - qaAddress = "https://appscode.ninja" + prodDomain = "byte.builders" + qaDomain = "appscode.ninja" + registrationAPIPath = "api/v1/register" LicenseIssuerAPIPath = "api/v1/license/issue" ) @@ -68,27 +70,84 @@ func SkipLicenseVerification() bool { return !v } -func RegistrationAPIEndpoint() string { - u := APIServerAddress() +func MustRegistrationAPIEndpoint() string { + r, err := RegistrationAPIEndpoint() + if err != nil { + panic(err) + } + return r +} + +func RegistrationAPIEndpoint(override ...string) (string, error) { + u, err := APIServerAddress(override...) + if err != nil { + return "", err + } u.Path = path.Join(u.Path, registrationAPIPath) - return u.String() + return u.String(), nil } -func LicenseIssuerAPIEndpoint() string { - u := APIServerAddress() +func MustLicenseIssuerAPIEndpoint() string { + r, err := LicenseIssuerAPIEndpoint() + if err != nil { + panic(err) + } + return r +} + +func LicenseIssuerAPIEndpoint(override ...string) (string, error) { + u, err := APIServerAddress(override...) + if err != nil { + return "", err + } u.Path = path.Join(u.Path, LicenseIssuerAPIPath) - return u.String() + return u.String(), nil } -func APIServerAddress() *url.URL { - if SkipLicenseVerification() { - u, _ := url.Parse(qaAddress) - return u +func MustAPIServerAddress() *url.URL { + u, err := APIServerAddress() + if err != nil { + panic(err) } - u, _ := url.Parse(prodAddress) return u } +func APIServerAddress(override ...string) (*url.URL, error) { + if len(override) > 0 && override[0] != "" { + nu, err := purell.NormalizeURLString(override[0], + purell.FlagsUsuallySafeGreedy|purell.FlagRemoveDuplicateSlashes) + if err != nil { + return nil, err + } + return url.Parse(nu) + } + + if SkipLicenseVerification() { + return url.Parse("https://api." + qaDomain) + } + return url.Parse("https://api." + prodDomain) +} + +func HostedEndpoint(u string) (bool, error) { + nu, err := purell.NormalizeURLString(u, + purell.FlagsUsuallySafeGreedy|purell.FlagRemoveDuplicateSlashes) + if err != nil { + return false, err + } + u2, err := url.Parse(nu) + if err != nil { + return false, err + } + return HostedDomain(u2.Hostname()), nil +} + +func HostedDomain(d string) bool { + return d == prodDomain || + d == qaDomain || + strings.HasSuffix(d, "."+prodDomain) || + strings.HasSuffix(d, "."+qaDomain) +} + func LoadLicenseCA() ([]byte, error) { if LicenseCA != "" { return []byte(LicenseCA), nil diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go index 3141a7f1..6fc2838a 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1.go +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -431,6 +431,14 @@ func (s *String) readBase128Int(out *int) bool { } ret <<= 7 b := s.read(1)[0] + + // ITU-T X.690, section 8.19.2: + // The subidentifier shall be encoded in the fewest possible octets, + // that is, the leading octet of the subidentifier shall not have the value 0x80. + if i == 0 && b == 0x80 { + return false + } + ret |= int(b & 0x7f) if b&0x80 == 0 { *out = ret diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index c15b8a77..684d984f 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -88,13 +88,9 @@ func (p *pipe) Write(d []byte) (n int, err error) { p.c.L = &p.mu } defer p.c.Signal() - if p.err != nil { + if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } - if p.breakErr != nil { - p.unread += len(d) - return len(d), nil // discard when there is no reader - } return p.b.Write(d) } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 8cb14f3c..cd057f39 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -1822,15 +1822,18 @@ func (sc *serverConn) processData(f *DataFrame) error { } if len(data) > 0 { + st.bodyBytes += int64(len(data)) wrote, err := st.body.Write(data) if err != nil { + // The handler has closed the request body. + // Return the connection-level flow control for the discarded data, + // but not the stream-level flow control. sc.sendWindowUpdate(nil, int(f.Length)-wrote) - return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed)) + return nil } if wrote != len(data) { panic("internal error: bad Writer") } - st.bodyBytes += int64(len(data)) } // Return any padded flow control now, since we won't diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 05ba23d3..ac90a263 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -560,10 +560,11 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res traceGotConn(req, cc, reused) res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { + roundTripErr := err if req, err = shouldRetryRequest(req, err); err == nil { // After the first retry, do exponential backoff with 10% jitter. if retry == 0 { - t.vlogf("RoundTrip retrying after failure: %v", err) + t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue } backoff := float64(uint(1) << (uint(retry) - 1)) @@ -572,7 +573,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res timer := backoffNewTimer(d) select { case <-timer.C: - t.vlogf("RoundTrip retrying after failure: %v", err) + t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): timer.Stop() @@ -1265,6 +1266,27 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { return res, nil } + cancelRequest := func(cs *clientStream, err error) error { + cs.cc.mu.Lock() + defer cs.cc.mu.Unlock() + cs.abortStreamLocked(err) + if cs.ID != 0 { + // This request may have failed because of a problem with the connection, + // or for some unrelated reason. (For example, the user might have canceled + // the request without waiting for a response.) Mark the connection as + // not reusable, since trying to reuse a dead connection is worse than + // unnecessarily creating a new one. + // + // If cs.ID is 0, then the request was never allocated a stream ID and + // whatever went wrong was unrelated to the connection. We might have + // timed out waiting for a stream slot when StrictMaxConcurrentStreams + // is set, for example, in which case retrying on a different connection + // will not help. + cs.cc.doNotReuse = true + } + return err + } + for { select { case <-cs.respHeaderRecv: @@ -1279,15 +1301,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { return handleResponseHeaders() default: waitDone() - return nil, cs.abortErr + return nil, cancelRequest(cs, cs.abortErr) } case <-ctx.Done(): - err := ctx.Err() - cs.abortStream(err) - return nil, err + return nil, cancelRequest(cs, ctx.Err()) case <-cs.reqCancel: - cs.abortStream(errRequestCanceled) - return nil, errRequestCanceled + return nil, cancelRequest(cs, errRequestCanceled) } } } @@ -2555,6 +2574,9 @@ func (b transportResponseBody) Close() error { cs := b.cs cc := cs.cc + cs.bufPipe.BreakWithError(errClosedResponseBody) + cs.abortStream(errClosedResponseBody) + unread := cs.bufPipe.Len() if unread > 0 { cc.mu.Lock() @@ -2573,9 +2595,6 @@ func (b transportResponseBody) Close() error { cc.wmu.Unlock() } - cs.bufPipe.BreakWithError(errClosedResponseBody) - cs.abortStream(errClosedResponseBody) - select { case <-cs.donec: case <-cs.ctx.Done(): diff --git a/vendor/golang.org/x/sys/cpu/hwcap_linux.go b/vendor/golang.org/x/sys/cpu/hwcap_linux.go index f3baa379..1d9d91f3 100644 --- a/vendor/golang.org/x/sys/cpu/hwcap_linux.go +++ b/vendor/golang.org/x/sys/cpu/hwcap_linux.go @@ -24,6 +24,21 @@ var hwCap uint var hwCap2 uint func readHWCAP() error { + // For Go 1.21+, get auxv from the Go runtime. + if a := getAuxv(); len(a) > 0 { + for len(a) >= 2 { + tag, val := a[0], uint(a[1]) + a = a[2:] + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil + } + buf, err := ioutil.ReadFile(procAuxv) if err != nil { // e.g. on android /proc/self/auxv is not accessible, so silently diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv.go b/vendor/golang.org/x/sys/cpu/runtime_auxv.go new file mode 100644 index 00000000..5f92ac9a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init) +// on platforms that use auxv. +var getAuxvFn func() []uintptr + +func getAuxv() []uintptr { + if getAuxvFn == nil { + return nil + } + return getAuxvFn() +} diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go new file mode 100644 index 00000000..b975ea2a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go @@ -0,0 +1,19 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 +// +build go1.21 + +package cpu + +import ( + _ "unsafe" // for linkname +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +func init() { + getAuxvFn = runtime_getAuxv +} diff --git a/vendor/golang.org/x/sys/unix/ioctl_signed.go b/vendor/golang.org/x/sys/unix/ioctl_signed.go new file mode 100644 index 00000000..7def9580 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ioctl_signed.go @@ -0,0 +1,70 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || solaris +// +build aix solaris + +package unix + +import ( + "unsafe" +) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req int, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +// IoctlSetPointerInt performs an ioctl operation which sets an +// integer value on fd, using the specified request number. The ioctl +// argument is called with a pointer to the integer value, rather than +// passing the integer value directly. +func IoctlSetPointerInt(fd int, req int, value int) error { + v := int32(value) + return ioctlPtr(fd, req, unsafe.Pointer(&v)) +} + +// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. +// +// To change fd's window size, the req argument should be TIOCSWINSZ. +func IoctlSetWinsize(fd int, req int, value *Winsize) error { + // TODO: if we get the chance, remove the req parameter and + // hardcode TIOCSWINSZ. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlSetTermios performs an ioctl on fd with a *Termios. +// +// The req value will usually be TCSETA or TIOCSETA. +func IoctlSetTermios(fd int, req int, value *Termios) error { + // TODO: if we get the chance, remove the req parameter. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +// +// A few ioctl requests use the return value as an output parameter; +// for those, IoctlRetInt should be used instead of this function. +func IoctlGetInt(fd int, req int) (int, error) { + var value int + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return value, err +} + +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { + var value Winsize + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} + +func IoctlGetTermios(fd int, req int) (*Termios, error) { + var value Termios + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go similarity index 76% rename from vendor/golang.org/x/sys/unix/ioctl.go rename to vendor/golang.org/x/sys/unix/ioctl_unsigned.go index 1c51b0ec..649913d1 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go @@ -2,13 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd hurd linux netbsd openbsd solaris +//go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd +// +build darwin dragonfly freebsd hurd linux netbsd openbsd package unix import ( - "runtime" "unsafe" ) @@ -27,7 +26,7 @@ func IoctlSetInt(fd int, req uint, value int) error { // passing the integer value directly. func IoctlSetPointerInt(fd int, req uint, value int) error { v := int32(value) - return ioctl(fd, req, uintptr(unsafe.Pointer(&v))) + return ioctlPtr(fd, req, unsafe.Pointer(&v)) } // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. @@ -36,9 +35,7 @@ func IoctlSetPointerInt(fd int, req uint, value int) error { func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlSetTermios performs an ioctl on fd with a *Termios. @@ -46,9 +43,7 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // The req value will usually be TCSETA or TIOCSETA. func IoctlSetTermios(fd int, req uint, value *Termios) error { // TODO: if we get the chance, remove the req parameter. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlGetInt performs an ioctl operation which gets an integer value @@ -58,18 +53,18 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error { // for those, IoctlRetInt should be used instead of this function. func IoctlGetInt(fd int, req uint) (int, error) { var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } func IoctlGetTermios(fd int, req uint) (*Termios, error) { var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go index 5384e7d9..cdc21bf7 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -17,25 +17,23 @@ import ( // IoctlSetInt performs an ioctl operation which sets an integer value // on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { +func IoctlSetInt(fd int, req int, value int) error { return ioctl(fd, req, uintptr(value)) } // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // // To change fd's window size, the req argument should be TIOCSWINSZ. -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { +func IoctlSetWinsize(fd int, req int, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlSetTermios performs an ioctl on fd with a *Termios. // // The req value is expected to be TCSETS, TCSETSW, or TCSETSF -func IoctlSetTermios(fd int, req uint, value *Termios) error { +func IoctlSetTermios(fd int, req int, value *Termios) error { if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) { return ENOSYS } @@ -49,22 +47,22 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error { // // A few ioctl requests use the return value as an output parameter; // for those, IoctlRetInt should be used instead of this function. -func IoctlGetInt(fd int, req uint) (int, error) { +func IoctlGetInt(fd int, req int) (int, error) { var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } // IoctlGetTermios performs an ioctl on fd with a *Termios. // // The req value is expected to be TCGETS -func IoctlGetTermios(fd int, req uint) (*Termios, error) { +func IoctlGetTermios(fd int, req int) (*Termios, error) { var value Termios if req != TCGETS { return &value, ENOSYS diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 7456d9dd..be0423e6 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -66,6 +66,7 @@ includes_Darwin=' #include #include #include +#include #include #include #include @@ -203,6 +204,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -517,10 +519,11 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT)_/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || $2 ~ /^RAW_PAYLOAD_/ || + $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || $2 ~ /^ICMPV?6?_(FILTER|SEC)/ || diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index 463c3eff..39dba6ca 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -7,6 +7,12 @@ package unix +import "unsafe" + func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ptrace1(request, pid, addr, data) } + +func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) error { + return ptrace1Ptr(request, pid, addr, data) +} diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index ed0509a0..9ea66330 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -7,6 +7,12 @@ package unix +import "unsafe" + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { return ENOTSUP } + +func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { + return ENOTSUP +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 2db1b51e..c406ae00 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -292,9 +292,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } - - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -410,7 +408,8 @@ func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 } func (w WaitStatus) TrapCause() int { return -1 } -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctl(fd int, req int, arg uintptr) (err error) +//sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = ioctl // fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX // There is no way to create a custom fcntl and to keep //sys fcntl easily, diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index e92a0be1..f2871fa9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -8,7 +8,6 @@ package unix //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = getrlimit64 -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) = setrlimit64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek64 //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 16eed170..75718ec0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -8,7 +8,6 @@ package unix //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) = mmap64 diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index eda42671..7705c327 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -245,8 +245,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 192b071b..20692150 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -14,7 +14,6 @@ package unix import ( "fmt" - "runtime" "syscall" "unsafe" ) @@ -376,11 +375,10 @@ func Flistxattr(fd int, dest []byte) (sz int, err error) { func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL func IoctlCtlInfo(fd int, ctlInfo *CtlInfo) error { - err := ioctl(fd, CTLIOCGINFO, uintptr(unsafe.Pointer(ctlInfo))) - runtime.KeepAlive(ctlInfo) - return err + return ioctlPtr(fd, CTLIOCGINFO, unsafe.Pointer(ctlInfo)) } // IfreqMTU is struct ifreq used to get or set a network device's MTU. @@ -394,16 +392,14 @@ type IfreqMTU struct { func IoctlGetIfreqMTU(fd int, ifname string) (*IfreqMTU, error) { var ifreq IfreqMTU copy(ifreq.Name[:], ifname) - err := ioctl(fd, SIOCGIFMTU, uintptr(unsafe.Pointer(&ifreq))) + err := ioctlPtr(fd, SIOCGIFMTU, unsafe.Pointer(&ifreq)) return &ifreq, err } // IoctlSetIfreqMTU performs the SIOCSIFMTU ioctl operation on fd to set the MTU // of the network device specified by ifreq.Name. func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { - err := ioctl(fd, SIOCSIFMTU, uintptr(unsafe.Pointer(ifreq))) - runtime.KeepAlive(ifreq) - return err + return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq)) } //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL @@ -617,6 +613,7 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) +//sys Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) //sys Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) @@ -626,7 +623,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Setprivexec(flag int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -680,7 +676,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { // Kqueue_from_portset_np // Kqueue_portset // Getattrlist -// Setattrlist // Getdirentriesattr // Searchfs // Delete diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index b37310ce..9fa87980 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -47,5 +47,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace +//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index d51ec996..f17b8c52 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -47,5 +47,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT //sys Lstat(path string, stat *Stat_t) (err error) //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace +//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index a41111a7..d4ce988e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -172,6 +172,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -325,7 +326,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index d50b9dc2..afb10106 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -161,7 +161,8 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -253,6 +254,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } //sys ptrace(request int, pid int, addr uintptr, data int) (err error) +//sys ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) = SYS_PTRACE func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) @@ -267,19 +269,36 @@ func PtraceDetach(pid int) (err error) { } func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) { - return ptrace(PT_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) + return ptracePtr(PT_GETFPREGS, pid, unsafe.Pointer(fpregsout), 0) } func PtraceGetRegs(pid int, regsout *Reg) (err error) { - return ptrace(PT_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) + return ptracePtr(PT_GETREGS, pid, unsafe.Pointer(regsout), 0) +} + +func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{ + Op: int32(req), + Offs: offs, + } + if countin > 0 { + _ = out[:countin] // check bounds + ioDesc.Addr = &out[0] + } else if out != nil { + ioDesc.Addr = (*byte)(unsafe.Pointer(&_zero)) + } + ioDesc.SetLen(countin) + + err = ptracePtr(PT_IO, pid, unsafe.Pointer(&ioDesc), 0) + return int(ioDesc.Len), err } func PtraceLwpEvents(pid int, enable int) (err error) { return ptrace(PT_LWP_EVENTS, pid, 0, enable) } -func PtraceLwpInfo(pid int, info uintptr) (err error) { - return ptrace(PT_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{}))) +func PtraceLwpInfo(pid int, info *PtraceLwpInfoStruct) (err error) { + return ptracePtr(PT_LWPINFO, pid, unsafe.Pointer(info), int(unsafe.Sizeof(*info))) } func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) { @@ -299,13 +318,25 @@ func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) { } func PtraceSetRegs(pid int, regs *Reg) (err error) { - return ptrace(PT_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0) + return ptracePtr(PT_SETREGS, pid, unsafe.Pointer(regs), 0) } func PtraceSingleStep(pid int) (err error) { return ptrace(PT_STEP, pid, 1, 0) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ @@ -402,7 +433,6 @@ func PtraceSingleStep(pid int) (err error) { //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 6a91d471..b8da5100 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint32(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) @@ -57,16 +61,5 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) -} - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint32(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err + return ptracePtr(PT_GETFSBASE, pid, unsafe.Pointer(fsbase), 0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 48110a0a..47155c48 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) @@ -57,16 +61,5 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) -} - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint64(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err + return ptracePtr(PT_GETFSBASE, pid, unsafe.Pointer(fsbase), 0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 52f1d4b7..08932093 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint32(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) @@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint32(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index 5537ee4f..d151a0d0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) @@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint64(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go index 164abd5d..d5cd64b3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) @@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint64(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index 4ffb6480..381fd467 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -20,3 +20,11 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { } return } + +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(uintptr(arg))) + if r0 == -1 && er != nil { + err = er + } + return +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 5443dddd..fbaeb5ff 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1015,8 +1015,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -1365,6 +1364,10 @@ func SetsockoptTCPRepairOpt(fd, level, opt int, o []TCPRepairOpt) (err error) { return setsockopt(fd, level, opt, unsafe.Pointer(&o[0]), uintptr(SizeofTCPRepairOpt*len(o))) } +func SetsockoptTCPMD5Sig(fd, level, opt int, s *TCPMD5Sig) error { + return setsockopt(fd, level, opt, unsafe.Pointer(s), unsafe.Sizeof(*s)) +} + // Keyctl Commands (http://man7.org/linux/man-pages/man2/keyctl.2.html) // KeyctlInt calls keyctl commands in which each argument is an int. @@ -1579,6 +1582,7 @@ func BindToDevice(fd int, device string) (err error) { } //sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +//sys ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) = SYS_PTRACE func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) { // The peek requests are machine-size oriented, so we wrap it @@ -1596,7 +1600,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro // boundary. n := 0 if addr%SizeofPtr != 0 { - err = ptrace(req, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(req, pid, addr-addr%SizeofPtr, unsafe.Pointer(&buf[0])) if err != nil { return 0, err } @@ -1608,7 +1612,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro for len(out) > 0 { // We use an internal buffer to guarantee alignment. // It's not documented if this is necessary, but we're paranoid. - err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(req, pid, addr+uintptr(n), unsafe.Pointer(&buf[0])) if err != nil { return n, err } @@ -1640,7 +1644,7 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c n := 0 if addr%SizeofPtr != 0 { var buf [SizeofPtr]byte - err = ptrace(peekReq, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(peekReq, pid, addr-addr%SizeofPtr, unsafe.Pointer(&buf[0])) if err != nil { return 0, err } @@ -1667,7 +1671,7 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c // Trailing edge. if len(data) > 0 { var buf [SizeofPtr]byte - err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(peekReq, pid, addr+uintptr(n), unsafe.Pointer(&buf[0])) if err != nil { return n, err } @@ -1696,11 +1700,11 @@ func PtracePokeUser(pid int, addr uintptr, data []byte) (count int, err error) { } func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } func PtraceSetOptions(pid int, options int) (err error) { @@ -1709,7 +1713,7 @@ func PtraceSetOptions(pid int, options int) (err error) { func PtraceGetEventMsg(pid int) (msg uint, err error) { var data _C_long - err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data))) + err = ptracePtr(PTRACE_GETEVENTMSG, pid, 0, unsafe.Pointer(&data)) msg = uint(data) return } @@ -1869,7 +1873,6 @@ func Getpgrp() (pid int) { //sys OpenTree(dfd int, fileName string, flags uint) (r int, err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT -//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) //sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 //sys read(fd int, p []byte) (n int, err error) @@ -1883,6 +1886,15 @@ func Getpgrp() (pid int) { //sysnb Settimeofday(tv *Timeval) (err error) //sys Setns(fd int, nstype int) (err error) +//go:linkname syscall_prlimit syscall.prlimit +func syscall_prlimit(pid, resource int, newlimit, old *syscall.Rlimit) error + +func Prlimit(pid, resource int, newlimit, old *Rlimit) error { + // Just call the syscall version, because as of Go 1.21 + // it will affect starting a new process. + return syscall_prlimit(pid, resource, (*syscall.Rlimit)(newlimit), (*syscall.Rlimit)(old)) +} + // PrctlRetInt performs a prctl operation specified by option and further // optional arguments arg2 through arg5 depending on option. It returns a // non-negative integer that is returned by the prctl syscall. @@ -2154,6 +2166,14 @@ func isGroupMember(gid int) bool { return false } +func isCapDacOverrideSet() bool { + hdr := CapUserHeader{Version: LINUX_CAPABILITY_VERSION_3} + data := [2]CapUserData{} + err := Capget(&hdr, &data[0]) + + return err == nil && data[0].Effective&(1< 0 { + _p1 = unsafe.Pointer(&attrBuf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(attrlist)), uintptr(_p1), uintptr(len(attrBuf)), uintptr(options), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setattrlist_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { @@ -2115,20 +2148,6 @@ var libc_setreuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) @@ -2502,6 +2521,14 @@ func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { return } +func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ptrace_trampoline_addr uintptr //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 95fe4c0e..4baaed0b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -705,6 +705,11 @@ TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) @@ -759,12 +764,6 @@ TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 26a0fdc5..51d6f3fb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -725,6 +725,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" @@ -1984,6 +1992,31 @@ var libc_select_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(attrBuf) > 0 { + _p1 = unsafe.Pointer(&attrBuf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(attrlist)), uintptr(_p1), uintptr(len(attrBuf)), uintptr(options), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setattrlist_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { @@ -2115,20 +2148,6 @@ var libc_setreuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) @@ -2502,6 +2521,14 @@ func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { return } +func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ptrace_trampoline_addr uintptr //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index efa5b4c9..c3b82c03 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -705,6 +705,11 @@ TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) @@ -759,12 +764,6 @@ TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 54749f9c..0eabac7a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -436,6 +436,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -1400,16 +1410,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 77479d45..ee313eb0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1625,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 2e966d4d..4c986e44 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1625,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index d65a7c0f..55521694 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1625,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 6f0b97c6..67a226fb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1625,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index e1c23b52..f0b9ddaa 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1625,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 36ea3a55..da63d9d7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -379,6 +379,16 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(arg) @@ -1336,16 +1346,6 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index c81b0ad4..07b549cc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -411,16 +411,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 2206bce7..5f481bf8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -334,16 +334,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index edf6b39f..824cd52c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -578,16 +578,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func armSyncFileRange(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_ARM_SYNC_FILE_RANGE, uintptr(fd), uintptr(flags), uintptr(off), uintptr(off>>32), uintptr(n), uintptr(n>>32)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 190609f2..e77aecfe 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -289,16 +289,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 5f984cbb..961a3afb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -644,16 +644,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Alarm(seconds uint) (remaining uint, err error) { r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) remaining = uint(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 46fc380a..ed05005e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -278,16 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index cbd0d4da..d365b718 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -278,16 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 0c13d15f..c3f1b8bb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -644,16 +644,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Alarm(seconds uint) (remaining uint, err error) { r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) remaining = uint(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index e01432ae..a6574cf9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -624,16 +624,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off>>32), uintptr(off), uintptr(n>>32), uintptr(n)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 13c7ee7b..f4099026 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -349,16 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 02d0c0fd..9dfcc299 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -349,16 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 9fee3b1d..0b292395 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -269,16 +269,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 647bbfec..6cde3223 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -319,16 +319,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index ada057f8..5253d65b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -329,16 +329,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 79f73899..cdb2af5a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -1597,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index fb161f3a..9d25f76b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -1597,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 4c8ac993..d3f80351 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -1597,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 76dd8ec4..887188a5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -1597,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index caeb807b..6699a783 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" @@ -1886,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 08744425..04f0de34 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index a05e5f4f..1e775fe0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" @@ -1886,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 5782cd10..27b6f4df 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index b2da8e50..7f642789 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" @@ -1886,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index cf310420..b797045f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 048b2655..756ef7b1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" @@ -1886,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index 484bb42e..a8712662 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index 6f33e37e..7bc2e24e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" @@ -1886,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index 55af2726..05d4bffd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 330cf7f7..739be621 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" @@ -1886,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 4028255b..74a25f8d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -687,12 +687,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - CALL libc_setrlimit(SB) - RET -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_setrtable(SB) RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 5f24de0d..7d95a197 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" @@ -1886,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index e1fbd4df..990be245 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 78d4a424..609d1c59 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -110,7 +110,6 @@ import ( //go:cgo_import_dynamic libc_setpriority setpriority "libc.so" //go:cgo_import_dynamic libc_setregid setregid "libc.so" //go:cgo_import_dynamic libc_setreuid setreuid "libc.so" -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" //go:cgo_import_dynamic libc_setsid setsid "libc.so" //go:cgo_import_dynamic libc_setuid setuid "libc.so" //go:cgo_import_dynamic libc_shutdown shutdown "libsocket.so" @@ -250,7 +249,6 @@ import ( //go:linkname procSetpriority libc_setpriority //go:linkname procSetregid libc_setregid //go:linkname procSetreuid libc_setreuid -//go:linkname procSetrlimit libc_setrlimit //go:linkname procSetsid libc_setsid //go:linkname procSetuid libc_setuid //go:linkname procshutdown libc_shutdown @@ -391,7 +389,6 @@ var ( procSetpriority, procSetregid, procSetreuid, - procSetrlimit, procSetsid, procSetuid, procshutdown, @@ -646,7 +643,18 @@ func __minor(version int, dev uint64) (val uint) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) { +func ioctlRet(fd int, req int, arg uintptr) (ret int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) + ret = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctlPtrRet(fd int, req int, arg unsafe.Pointer) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { @@ -1639,16 +1647,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetsid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index f2079457..c3168174 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -257,7 +257,17 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { + _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index e2a64f09..690cefc3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -151,6 +151,16 @@ type Dirent struct { _ [3]byte } +type Attrlist struct { + Bitmapcount uint16 + Reserved uint16 + Commonattr uint32 + Volattr uint32 + Dirattr uint32 + Fileattr uint32 + Forkattr uint32 +} + const ( PathMax = 0x400 ) @@ -610,6 +620,7 @@ const ( AT_REMOVEDIR = 0x80 AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 + AT_EACCESS = 0x10 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 34aa7752..5bffc10e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -151,6 +151,16 @@ type Dirent struct { _ [3]byte } +type Attrlist struct { + Bitmapcount uint16 + Reserved uint16 + Commonattr uint32 + Volattr uint32 + Dirattr uint32 + Fileattr uint32 + Forkattr uint32 +} + const ( PathMax = 0x400 ) @@ -610,6 +620,7 @@ const ( AT_REMOVEDIR = 0x80 AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 + AT_EACCESS = 0x10 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index d9c78cdc..29dc4833 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -362,7 +362,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 26991b16..0a89b289 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -367,7 +367,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index f8324e7e..c8666bb1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -350,7 +350,7 @@ type FpExtendedPrecision struct { type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 4220411f..88fb48a8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -347,7 +347,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index 0660fd45..698dc975 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -348,7 +348,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 7d9fc8f1..ca84727c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -456,36 +456,60 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 } type CanFilter struct { @@ -528,7 +552,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0x68 + SizeofTCPInfo = 0xf0 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -1043,6 +1067,7 @@ const ( PerfBitCommExec = CBitFieldMaskBit24 PerfBitUseClockID = CBitFieldMaskBit25 PerfBitContextSwitch = CBitFieldMaskBit26 + PerfBitWriteBackward = CBitFieldMaskBit27 ) const ( @@ -1239,7 +1264,7 @@ type TCPMD5Sig struct { Flags uint8 Prefixlen uint8 Keylen uint16 - _ uint32 + Ifindex int32 Key [80]uint8 } @@ -1939,7 +1964,11 @@ const ( NFT_MSG_GETOBJ = 0x13 NFT_MSG_DELOBJ = 0x14 NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 + NFT_MSG_NEWFLOWTABLE = 0x16 + NFT_MSG_GETFLOWTABLE = 0x17 + NFT_MSG_DELFLOWTABLE = 0x18 + NFT_MSG_GETRULE_RESET = 0x19 + NFT_MSG_MAX = 0x1a NFTA_LIST_UNSPEC = 0x0 NFTA_LIST_ELEM = 0x1 NFTA_HOOK_UNSPEC = 0x0 @@ -2443,9 +2472,11 @@ const ( SOF_TIMESTAMPING_OPT_STATS = 0x1000 SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + SOF_TIMESTAMPING_BIND_PHC = 0x8000 + SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x8000 - SOF_TIMESTAMPING_MASK = 0xffff + SOF_TIMESTAMPING_LAST = 0x10000 + SOF_TIMESTAMPING_MASK = 0x1ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3265,7 +3296,7 @@ const ( DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES = 0xae DEVLINK_ATTR_NESTED_DEVLINK = 0xaf DEVLINK_ATTR_SELFTESTS = 0xb0 - DEVLINK_ATTR_MAX = 0xb0 + DEVLINK_ATTR_MAX = 0xb3 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -3281,7 +3312,8 @@ const ( DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR = 0x1 DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x3 + DEVLINK_PORT_FN_ATTR_CAPS = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 ) type FsverityDigest struct { @@ -3572,7 +3604,8 @@ const ( ETHTOOL_MSG_MODULE_SET = 0x23 ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 - ETHTOOL_MSG_USER_MAX = 0x25 + ETHTOOL_MSG_RSS_GET = 0x26 + ETHTOOL_MSG_USER_MAX = 0x26 ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3611,7 +3644,8 @@ const ( ETHTOOL_MSG_MODULE_GET_REPLY = 0x23 ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 - ETHTOOL_MSG_KERNEL_MAX = 0x25 + ETHTOOL_MSG_RSS_GET_REPLY = 0x26 + ETHTOOL_MSG_KERNEL_MAX = 0x26 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 @@ -3679,7 +3713,8 @@ const ( ETHTOOL_A_LINKSTATE_SQI_MAX = 0x4 ETHTOOL_A_LINKSTATE_EXT_STATE = 0x5 ETHTOOL_A_LINKSTATE_EXT_SUBSTATE = 0x6 - ETHTOOL_A_LINKSTATE_MAX = 0x6 + ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT = 0x7 + ETHTOOL_A_LINKSTATE_MAX = 0x7 ETHTOOL_A_DEBUG_UNSPEC = 0x0 ETHTOOL_A_DEBUG_HEADER = 0x1 ETHTOOL_A_DEBUG_MSGMASK = 0x2 @@ -4409,7 +4444,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x140 + NL80211_ATTR_MAX = 0x141 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4552,6 +4587,7 @@ const ( NL80211_ATTR_SUPPORT_MESH_AUTH = 0x73 NL80211_ATTR_SURVEY_INFO = 0x54 NL80211_ATTR_SURVEY_RADIO_STATS = 0xda + NL80211_ATTR_TD_BITMAP = 0x141 NL80211_ATTR_TDLS_ACTION = 0x88 NL80211_ATTR_TDLS_DIALOG_TOKEN = 0x89 NL80211_ATTR_TDLS_EXTERNAL_SETUP = 0x8c @@ -5752,3 +5788,25 @@ const ( AUDIT_NLGRP_NONE = 0x0 AUDIT_NLGRP_READLOG = 0x1 ) + +const ( + TUN_F_CSUM = 0x1 + TUN_F_TSO4 = 0x2 + TUN_F_TSO6 = 0x4 + TUN_F_TSO_ECN = 0x8 + TUN_F_UFO = 0x10 +) + +const ( + VIRTIO_NET_HDR_F_NEEDS_CSUM = 0x1 + VIRTIO_NET_HDR_F_DATA_VALID = 0x2 + VIRTIO_NET_HDR_F_RSC_INFO = 0x4 +) + +const ( + VIRTIO_NET_HDR_GSO_NONE = 0x0 + VIRTIO_NET_HDR_GSO_TCPV4 = 0x1 + VIRTIO_NET_HDR_GSO_UDP = 0x3 + VIRTIO_NET_HDR_GSO_TCPV6 = 0x4 + VIRTIO_NET_HDR_GSO_ECN = 0x80 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 89c516a2..4ecc1495 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -414,7 +414,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 62b4fb26..34fddff9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -427,7 +427,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index e86b3589..3b14a603 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -405,7 +405,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]uint8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 6c6be4c9..0517651a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -406,7 +406,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 4982ea35..3b0c5181 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -407,7 +407,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 173141a6..fccdf4dd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -410,7 +410,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 93ae4c51..500de8fc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -409,7 +409,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 4e4e510c..d0434cd2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -409,7 +409,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 3f5ba013..84206ba5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -410,7 +410,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 71dfe7cd..ab078cf1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -417,7 +417,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]uint8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 3a2b7f0a..42eb2c4c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -416,7 +416,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index a52d6275..31304a4e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -416,7 +416,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index dfc007d8..c311f961 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -434,7 +434,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index b53cb910..bba3cefa 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -429,7 +429,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index fe0aa354..ad8a0138 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -411,7 +411,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index 92ac05ff..b8ad1925 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,14 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := uintptr(unsafe.Pointer(block)) + blockp := unsafe.Pointer(block) for { - entry := UTF16PtrToString((*uint16)(unsafe.Pointer(blockp))) + entry := UTF16PtrToString((*uint16)(blockp)) if len(entry) == 0 { break } env = append(env, entry) - blockp += 2 * (uintptr(len(entry)) + 1) + blockp = unsafe.Add(blockp, 2*(len(entry)+1)) } return env, nil } diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go index 75980fd4..a52e0331 100644 --- a/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -95,12 +95,17 @@ func ComposeCommandLine(args []string) string { // DecomposeCommandLine breaks apart its argument command line into unescaped parts using CommandLineToArgv, // as gathered from GetCommandLine, QUERY_SERVICE_CONFIG's BinaryPathName argument, or elsewhere that // command lines are passed around. +// DecomposeCommandLine returns error if commandLine contains NUL. func DecomposeCommandLine(commandLine string) ([]string, error) { if len(commandLine) == 0 { return []string{}, nil } + utf16CommandLine, err := UTF16FromString(commandLine) + if err != nil { + return nil, errorspkg.New("string with NUL passed to DecomposeCommandLine") + } var argc int32 - argv, err := CommandLineToArgv(StringToUTF16Ptr(commandLine), &argc) + argv, err := CommandLineToArgv(&utf16CommandLine[0], &argc) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index f8deca83..c964b684 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -141,6 +141,12 @@ const ( SERVICE_DYNAMIC_INFORMATION_LEVEL_START_REASON = 1 ) +type ENUM_SERVICE_STATUS struct { + ServiceName *uint16 + DisplayName *uint16 + ServiceStatus SERVICE_STATUS +} + type SERVICE_STATUS struct { ServiceType uint32 CurrentState uint32 @@ -245,3 +251,4 @@ type QUERY_SERVICE_LOCK_STATUS struct { //sys UnsubscribeServiceChangeNotifications(subscription uintptr) = sechost.UnsubscribeServiceChangeNotifications? //sys RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) = advapi32.RegisterServiceCtrlHandlerExW //sys QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInfo unsafe.Pointer) (err error) = advapi32.QueryServiceDynamicInformation? +//sys EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) = advapi32.EnumDependentServicesW diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 41cb3c01..3723b2c2 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -824,6 +824,9 @@ const socket_error = uintptr(^uint32(0)) //sys WSAStartup(verreq uint32, data *WSAData) (sockerr error) = ws2_32.WSAStartup //sys WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup //sys WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl +//sys WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceBeginW +//sys WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceNextW +//sys WSALookupServiceEnd(handle Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceEnd //sys socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket //sys sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) [failretval==socket_error] = ws2_32.sendto //sys recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) [failretval==-1] = ws2_32.recvfrom @@ -1019,8 +1022,7 @@ func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 0c4add97..88e62a63 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1243,6 +1243,51 @@ const ( DnsSectionAdditional = 0x0003 ) +const ( + // flags of WSALookupService + LUP_DEEP = 0x0001 + LUP_CONTAINERS = 0x0002 + LUP_NOCONTAINERS = 0x0004 + LUP_NEAREST = 0x0008 + LUP_RETURN_NAME = 0x0010 + LUP_RETURN_TYPE = 0x0020 + LUP_RETURN_VERSION = 0x0040 + LUP_RETURN_COMMENT = 0x0080 + LUP_RETURN_ADDR = 0x0100 + LUP_RETURN_BLOB = 0x0200 + LUP_RETURN_ALIASES = 0x0400 + LUP_RETURN_QUERY_STRING = 0x0800 + LUP_RETURN_ALL = 0x0FF0 + LUP_RES_SERVICE = 0x8000 + + LUP_FLUSHCACHE = 0x1000 + LUP_FLUSHPREVIOUS = 0x2000 + + LUP_NON_AUTHORITATIVE = 0x4000 + LUP_SECURE = 0x8000 + LUP_RETURN_PREFERRED_NAMES = 0x10000 + LUP_DNS_ONLY = 0x20000 + + LUP_ADDRCONFIG = 0x100000 + LUP_DUAL_ADDR = 0x200000 + LUP_FILESERVER = 0x400000 + LUP_DISABLE_IDN_ENCODING = 0x00800000 + LUP_API_ANSI = 0x01000000 + + LUP_RESOLUTION_HANDLE = 0x80000000 +) + +const ( + // values of WSAQUERYSET's namespace + NS_ALL = 0 + NS_DNS = 12 + NS_NLA = 15 + NS_BTH = 16 + NS_EMAIL = 37 + NS_PNRPNAME = 38 + NS_PNRPCLOUD = 39 +) + type DNSSRVData struct { Target *uint16 Priority uint16 @@ -2175,19 +2220,23 @@ type JOBOBJECT_BASIC_UI_RESTRICTIONS struct { } const ( - // JobObjectInformationClass + // JobObjectInformationClass for QueryInformationJobObject and SetInformationJobObject JobObjectAssociateCompletionPortInformation = 7 + JobObjectBasicAccountingInformation = 1 + JobObjectBasicAndIoAccountingInformation = 8 JobObjectBasicLimitInformation = 2 + JobObjectBasicProcessIdList = 3 JobObjectBasicUIRestrictions = 4 JobObjectCpuRateControlInformation = 15 JobObjectEndOfJobTimeInformation = 6 JobObjectExtendedLimitInformation = 9 JobObjectGroupInformation = 11 JobObjectGroupInformationEx = 14 - JobObjectLimitViolationInformation2 = 35 + JobObjectLimitViolationInformation = 13 + JobObjectLimitViolationInformation2 = 34 JobObjectNetRateControlInformation = 32 JobObjectNotificationLimitInformation = 12 - JobObjectNotificationLimitInformation2 = 34 + JobObjectNotificationLimitInformation2 = 33 JobObjectSecurityLimitInformation = 5 ) @@ -3258,3 +3307,43 @@ const ( DWMWA_TEXT_COLOR = 36 DWMWA_VISIBLE_FRAME_BORDER_THICKNESS = 37 ) + +type WSAQUERYSET struct { + Size uint32 + ServiceInstanceName *uint16 + ServiceClassId *GUID + Version *WSAVersion + Comment *uint16 + NameSpace uint32 + NSProviderId *GUID + Context *uint16 + NumberOfProtocols uint32 + AfpProtocols *AFProtocols + QueryString *uint16 + NumberOfCsAddrs uint32 + SaBuffer *CSAddrInfo + OutputFlags uint32 + Blob *BLOB +} + +type WSAVersion struct { + Version uint32 + EnumerationOfComparison int32 +} + +type AFProtocols struct { + AddressFamily int32 + Protocol int32 +} + +type CSAddrInfo struct { + LocalAddr SocketAddress + RemoteAddr SocketAddress + SocketType int32 + Protocol int32 +} + +type BLOB struct { + Size uint32 + BlobData *byte +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index ac60052e..a81ea2c7 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -86,6 +86,7 @@ var ( procDeleteService = modadvapi32.NewProc("DeleteService") procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") + procEnumDependentServicesW = modadvapi32.NewProc("EnumDependentServicesW") procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") @@ -474,6 +475,9 @@ var ( procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procWSAIoctl = modws2_32.NewProc("WSAIoctl") + procWSALookupServiceBeginW = modws2_32.NewProc("WSALookupServiceBeginW") + procWSALookupServiceEnd = modws2_32.NewProc("WSALookupServiceEnd") + procWSALookupServiceNextW = modws2_32.NewProc("WSALookupServiceNextW") procWSARecv = modws2_32.NewProc("WSARecv") procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") procWSASend = modws2_32.NewProc("WSASend") @@ -731,6 +735,14 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes return } +func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) if r1 == 0 { @@ -4067,6 +4079,30 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo return } +func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) { + r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSALookupServiceEnd(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) { + r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) if r1 == socket_error { diff --git a/vendor/golang.org/x/text/encoding/internal/internal.go b/vendor/golang.org/x/text/encoding/internal/internal.go index 75a5fd16..413e6fc6 100644 --- a/vendor/golang.org/x/text/encoding/internal/internal.go +++ b/vendor/golang.org/x/text/encoding/internal/internal.go @@ -64,7 +64,7 @@ func (e FuncEncoding) NewEncoder() *encoding.Encoder { // byte. type RepertoireError byte -// Error implements the error interrface. +// Error implements the error interface. func (r RepertoireError) Error() string { return "encoding: rune not supported by encoding." } diff --git a/vendor/golang.org/x/text/unicode/norm/forminfo.go b/vendor/golang.org/x/text/unicode/norm/forminfo.go index d69ccb4f..487335d1 100644 --- a/vendor/golang.org/x/text/unicode/norm/forminfo.go +++ b/vendor/golang.org/x/text/unicode/norm/forminfo.go @@ -13,7 +13,7 @@ import "encoding/binary" // a rune to a uint16. The values take two forms. For v >= 0x8000: // bits // 15: 1 (inverse of NFD_QC bit of qcInfo) -// 13..7: qcInfo (see below). isYesD is always true (no decompostion). +// 13..7: qcInfo (see below). isYesD is always true (no decomposition). // 6..0: ccc (compressed CCC value). // For v < 0x8000, the respective rune has a decomposition and v is an index // into a byte array of UTF-8 decomposition sequences and additional info and diff --git a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go index 0ffb3156..a411d542 100644 --- a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go +++ b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go @@ -1,7 +1,6 @@ package jsonpatch import ( - "bytes" "encoding/json" "fmt" "reflect" @@ -24,21 +23,28 @@ func (j *Operation) Json() string { } func (j *Operation) MarshalJSON() ([]byte, error) { - var b bytes.Buffer - b.WriteString("{") - b.WriteString(fmt.Sprintf(`"op":"%s"`, j.Operation)) - b.WriteString(fmt.Sprintf(`,"path":"%s"`, j.Path)) - // Consider omitting Value for non-nullable operations. - if j.Value != nil || j.Operation == "replace" || j.Operation == "add" { - v, err := json.Marshal(j.Value) - if err != nil { - return nil, err - } - b.WriteString(`,"value":`) - b.Write(v) - } - b.WriteString("}") - return b.Bytes(), nil + // Ensure for add and replace we emit `value: null` + if j.Value == nil && (j.Operation == "replace" || j.Operation == "add") { + return json.Marshal(struct { + Operation string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value"` + }{ + Operation: j.Operation, + Path: j.Path, + }) + } + // otherwise just marshal normally. We cannot literally do json.Marshal(j) as it would be recursively + // calling this function. + return json.Marshal(struct { + Operation string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value,omitempty"` + }{ + Operation: j.Operation, + Path: j.Path, + Value: j.Value, + }) } type ByPath []Operation @@ -149,9 +155,6 @@ func makePath(path string, newPart interface{}) string { if path == "" { return "/" + key } - if strings.HasSuffix(path, "/") { - return path + key - } return path + "/" + key } @@ -211,22 +214,18 @@ func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, } case []interface{}: bt := bv.([]interface{}) - if isSimpleArray(at) && isSimpleArray(bt) { - patch = append(patch, compareEditDistance(at, bt, p)...) - } else { - n := min(len(at), len(bt)) - for i := len(at) - 1; i >= n; i-- { - patch = append(patch, NewOperation("remove", makePath(p, i), nil)) - } - for i := n; i < len(bt); i++ { - patch = append(patch, NewOperation("add", makePath(p, i), bt[i])) - } - for i := 0; i < n; i++ { - var err error - patch, err = handleValues(at[i], bt[i], makePath(p, i), patch) - if err != nil { - return nil, err - } + n := min(len(at), len(bt)) + for i := len(at) - 1; i >= n; i-- { + patch = append(patch, NewOperation("remove", makePath(p, i), nil)) + } + for i := n; i < len(bt); i++ { + patch = append(patch, NewOperation("add", makePath(p, i), bt[i])) + } + for i := 0; i < n; i++ { + var err error + patch, err = handleValues(at[i], bt[i], makePath(p, i), patch) + if err != nil { + return nil, err } } default: @@ -235,100 +234,9 @@ func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, return patch, nil } -func isBasicType(a interface{}) bool { - switch a.(type) { - case string, float64, bool: - default: - return false - } - return true -} - -func isSimpleArray(a []interface{}) bool { - for i := range a { - switch a[i].(type) { - case string, float64, bool: - default: - val := reflect.ValueOf(a[i]) - if val.Kind() == reflect.Map { - for _, k := range val.MapKeys() { - av := val.MapIndex(k) - if av.Kind() == reflect.Ptr || av.Kind() == reflect.Interface { - if av.IsNil() { - continue - } - av = av.Elem() - } - if av.Kind() != reflect.String && av.Kind() != reflect.Float64 && av.Kind() != reflect.Bool { - return false - } - } - return true - } - return false - } - } - return true -} - -// https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm -// Adapted from https://github.com/texttheater/golang-levenshtein -func compareEditDistance(s, t []interface{}, p string) []Operation { - m := len(s) - n := len(t) - - d := make([][]int, m+1) - for i := 0; i <= m; i++ { - d[i] = make([]int, n+1) - d[i][0] = i - } - for j := 0; j <= n; j++ { - d[0][j] = j - } - - for j := 1; j <= n; j++ { - for i := 1; i <= m; i++ { - if reflect.DeepEqual(s[i-1], t[j-1]) { - d[i][j] = d[i-1][j-1] // no op required - } else { - del := d[i-1][j] + 1 - add := d[i][j-1] + 1 - rep := d[i-1][j-1] + 1 - d[i][j] = min(rep, min(add, del)) - } - } - } - - return backtrace(s, t, p, m, n, d) -} - func min(x int, y int) int { if y < x { return y } return x } - -func backtrace(s, t []interface{}, p string, i int, j int, matrix [][]int) []Operation { - if i > 0 && matrix[i-1][j]+1 == matrix[i][j] { - op := NewOperation("remove", makePath(p, i-1), nil) - return append([]Operation{op}, backtrace(s, t, p, i-1, j, matrix)...) - } - if j > 0 && matrix[i][j-1]+1 == matrix[i][j] { - op := NewOperation("add", makePath(p, i), t[j-1]) - return append([]Operation{op}, backtrace(s, t, p, i, j-1, matrix)...) - } - if i > 0 && j > 0 && matrix[i-1][j-1]+1 == matrix[i][j] { - if isBasicType(s[0]) { - op := NewOperation("replace", makePath(p, i-1), t[j-1]) - return append([]Operation{op}, backtrace(s, t, p, i-1, j-1, matrix)...) - } - - p2, _ := handleValues(s[i-1], t[j-1], makePath(p, i-1), []Operation{}) - return append(p2, backtrace(s, t, p, i-1, j-1, matrix)...) - } - if i > 0 && j > 0 && matrix[i-1][j-1] == matrix[i][j] { - return backtrace(s, t, p, i-1, j-1, matrix) - } - return []Operation{} -} diff --git a/vendor/kmodules.xyz/client-go/Makefile b/vendor/kmodules.xyz/client-go/Makefile index 3c2ed61b..441dd38e 100644 --- a/vendor/kmodules.xyz/client-go/Makefile +++ b/vendor/kmodules.xyz/client-go/Makefile @@ -19,7 +19,7 @@ REPO := $(notdir $(shell pwd)) BIN := client-go # https://github.com/appscodelabs/gengo-builder -CODE_GENERATOR_IMAGE ?= appscode/gengo:release-1.25 +CODE_GENERATOR_IMAGE ?= ghcr.io/appscode/gengo:release-1.25 # This version-strategy uses git tags to set the version string git_branch := $(shell git rev-parse --abbrev-ref HEAD) @@ -153,8 +153,26 @@ gen-crd-protos: --apimachinery-packages=-k8s.io/apimachinery/pkg/api/resource,-k8s.io/apimachinery/pkg/apis/meta/v1,-k8s.io/apimachinery/pkg/apis/meta/v1beta1,-k8s.io/apimachinery/pkg/runtime,-k8s.io/apimachinery/pkg/runtime/schema,-k8s.io/apimachinery/pkg/util/intstr \ --packages=-k8s.io/api/core/v1,kmodules.xyz/client-go/api/v1 +.PHONY: gen-enum +gen-enum: + @docker run \ + -i \ + --rm \ + -u $$(id -u):$$(id -g) \ + -v $$(pwd):/src \ + -w /src \ + -v $$(pwd)/.go/bin/$(OS)_$(ARCH):/go/bin \ + -v $$(pwd)/.go/bin/$(OS)_$(ARCH):/go/bin/$(OS)_$(ARCH) \ + -v $$(pwd)/.go/cache:/.cache \ + --env HTTP_PROXY=$(HTTP_PROXY) \ + --env HTTPS_PROXY=$(HTTPS_PROXY) \ + $(BUILD_IMAGE) \ + /bin/bash -c " \ + go generate ./api/... \ + " + .PHONY: gen -gen: clientset openapi gen-crd-protos +gen: clientset gen-enum openapi gen-crd-protos fmt: $(BUILD_DIRS) @docker run \ diff --git a/vendor/kmodules.xyz/client-go/api/v1/object.go b/vendor/kmodules.xyz/client-go/api/v1/object.go index d7983414..eed83c3f 100644 --- a/vendor/kmodules.xyz/client-go/api/v1/object.go +++ b/vendor/kmodules.xyz/client-go/api/v1/object.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +//go:generate go-enum --mustparse --names --values package v1 import ( @@ -207,30 +208,13 @@ type ObjectInfo struct { Ref ObjectReference `json:"ref" protobuf:"bytes,2,opt,name=ref"` } -// +kubebuilder:validation:Enum=id;config;backup_via;catalog;connect_via;exposed_by;monitored_by;offshoot;restore_into;scaled_by;view;cert_issuer;policy;recommended_for;ops +// +kubebuilder:validation:Enum=backup_via;catalog;cert_issuer;config;connect_via;exposed_by;id;located_on;monitored_by;offshoot;ops;policy;recommended_for;restore_into;scaled_by;view +// ENUM(backup_via,catalog,cert_issuer,config,connect_via,exposed_by,id,located_on,monitored_by,offshoot,ops,policy,recommended_for,restore_into,scaled_by,view) type EdgeLabel string -const ( - EdgeId EdgeLabel = "id" - EdgeConfig EdgeLabel = "config" - EdgeBackupVia EdgeLabel = "backup_via" - EdgeCatalog EdgeLabel = "catalog" - EdgeConnectVia EdgeLabel = "connect_via" - EdgeExposedBy EdgeLabel = "exposed_by" - EdgeMonitoredBy EdgeLabel = "monitored_by" - EdgeOffshoot EdgeLabel = "offshoot" - EdgeRestoreInto EdgeLabel = "restore_into" - EdgeScaledBy EdgeLabel = "scaled_by" - EdgeView EdgeLabel = "view" - EdgeCertIssuer EdgeLabel = "cert_issuer" - EdgePolicy EdgeLabel = "policy" - EdgeOps EdgeLabel = "ops" - EdgeRecommendedFor EdgeLabel = "recommended_for" -) - func (e EdgeLabel) Direct() bool { - return e == EdgeOffshoot || - e == EdgeView || - e == EdgeOps || - e == EdgeRecommendedFor + return e == EdgeLabelOffshoot || + e == EdgeLabelView || + e == EdgeLabelOps || + e == EdgeLabelRecommendedFor } diff --git a/vendor/kmodules.xyz/client-go/api/v1/object_enum.go b/vendor/kmodules.xyz/client-go/api/v1/object_enum.go new file mode 100644 index 00000000..2309b52c --- /dev/null +++ b/vendor/kmodules.xyz/client-go/api/v1/object_enum.go @@ -0,0 +1,145 @@ +// Code generated by go-enum DO NOT EDIT. +// Version: +// Revision: +// Build Date: +// Built By: + +package v1 + +import ( + "fmt" + strings "strings" +) + +const ( + // EdgeLabelBackupVia is a EdgeLabel of type backup_via. + EdgeLabelBackupVia EdgeLabel = "backup_via" + // EdgeLabelCatalog is a EdgeLabel of type catalog. + EdgeLabelCatalog EdgeLabel = "catalog" + // EdgeLabelCertIssuer is a EdgeLabel of type cert_issuer. + EdgeLabelCertIssuer EdgeLabel = "cert_issuer" + // EdgeLabelConfig is a EdgeLabel of type config. + EdgeLabelConfig EdgeLabel = "config" + // EdgeLabelConnectVia is a EdgeLabel of type connect_via. + EdgeLabelConnectVia EdgeLabel = "connect_via" + // EdgeLabelExposedBy is a EdgeLabel of type exposed_by. + EdgeLabelExposedBy EdgeLabel = "exposed_by" + // EdgeLabelId is a EdgeLabel of type id. + EdgeLabelId EdgeLabel = "id" + // EdgeLabelLocatedOn is a EdgeLabel of type located_on. + EdgeLabelLocatedOn EdgeLabel = "located_on" + // EdgeLabelMonitoredBy is a EdgeLabel of type monitored_by. + EdgeLabelMonitoredBy EdgeLabel = "monitored_by" + // EdgeLabelOffshoot is a EdgeLabel of type offshoot. + EdgeLabelOffshoot EdgeLabel = "offshoot" + // EdgeLabelOps is a EdgeLabel of type ops. + EdgeLabelOps EdgeLabel = "ops" + // EdgeLabelPolicy is a EdgeLabel of type policy. + EdgeLabelPolicy EdgeLabel = "policy" + // EdgeLabelRecommendedFor is a EdgeLabel of type recommended_for. + EdgeLabelRecommendedFor EdgeLabel = "recommended_for" + // EdgeLabelRestoreInto is a EdgeLabel of type restore_into. + EdgeLabelRestoreInto EdgeLabel = "restore_into" + // EdgeLabelScaledBy is a EdgeLabel of type scaled_by. + EdgeLabelScaledBy EdgeLabel = "scaled_by" + // EdgeLabelView is a EdgeLabel of type view. + EdgeLabelView EdgeLabel = "view" +) + +var ErrInvalidEdgeLabel = fmt.Errorf("not a valid EdgeLabel, try [%s]", strings.Join(_EdgeLabelNames, ", ")) + +var _EdgeLabelNames = []string{ + string(EdgeLabelBackupVia), + string(EdgeLabelCatalog), + string(EdgeLabelCertIssuer), + string(EdgeLabelConfig), + string(EdgeLabelConnectVia), + string(EdgeLabelExposedBy), + string(EdgeLabelId), + string(EdgeLabelLocatedOn), + string(EdgeLabelMonitoredBy), + string(EdgeLabelOffshoot), + string(EdgeLabelOps), + string(EdgeLabelPolicy), + string(EdgeLabelRecommendedFor), + string(EdgeLabelRestoreInto), + string(EdgeLabelScaledBy), + string(EdgeLabelView), +} + +// EdgeLabelNames returns a list of possible string values of EdgeLabel. +func EdgeLabelNames() []string { + tmp := make([]string, len(_EdgeLabelNames)) + copy(tmp, _EdgeLabelNames) + return tmp +} + +// EdgeLabelValues returns a list of the values for EdgeLabel +func EdgeLabelValues() []EdgeLabel { + return []EdgeLabel{ + EdgeLabelBackupVia, + EdgeLabelCatalog, + EdgeLabelCertIssuer, + EdgeLabelConfig, + EdgeLabelConnectVia, + EdgeLabelExposedBy, + EdgeLabelId, + EdgeLabelLocatedOn, + EdgeLabelMonitoredBy, + EdgeLabelOffshoot, + EdgeLabelOps, + EdgeLabelPolicy, + EdgeLabelRecommendedFor, + EdgeLabelRestoreInto, + EdgeLabelScaledBy, + EdgeLabelView, + } +} + +// String implements the Stringer interface. +func (x EdgeLabel) String() string { + return string(x) +} + +// IsValid provides a quick way to determine if the typed value is +// part of the allowed enumerated values +func (x EdgeLabel) IsValid() bool { + _, err := ParseEdgeLabel(string(x)) + return err == nil +} + +var _EdgeLabelValue = map[string]EdgeLabel{ + "backup_via": EdgeLabelBackupVia, + "catalog": EdgeLabelCatalog, + "cert_issuer": EdgeLabelCertIssuer, + "config": EdgeLabelConfig, + "connect_via": EdgeLabelConnectVia, + "exposed_by": EdgeLabelExposedBy, + "id": EdgeLabelId, + "located_on": EdgeLabelLocatedOn, + "monitored_by": EdgeLabelMonitoredBy, + "offshoot": EdgeLabelOffshoot, + "ops": EdgeLabelOps, + "policy": EdgeLabelPolicy, + "recommended_for": EdgeLabelRecommendedFor, + "restore_into": EdgeLabelRestoreInto, + "scaled_by": EdgeLabelScaledBy, + "view": EdgeLabelView, +} + +// ParseEdgeLabel attempts to convert a string to a EdgeLabel. +func ParseEdgeLabel(name string) (EdgeLabel, error) { + if x, ok := _EdgeLabelValue[name]; ok { + return x, nil + } + return EdgeLabel(""), fmt.Errorf("%s is %w", name, ErrInvalidEdgeLabel) +} + +// MustParseEdgeLabel converts a string to a EdgeLabel, and panics if is not valid. +func MustParseEdgeLabel(name string) EdgeLabel { + val, err := ParseEdgeLabel(name) + if err != nil { + panic(err) + } + return val +} diff --git a/vendor/kmodules.xyz/client-go/meta/conditions.go b/vendor/kmodules.xyz/client-go/meta/conditions.go new file mode 100644 index 00000000..c78172e3 --- /dev/null +++ b/vendor/kmodules.xyz/client-go/meta/conditions.go @@ -0,0 +1,164 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// KEP: https://github.com/kubernetes/enhancements/blob/ced773ab59f0ff080888a912ab99474245623dad/keps/sig-api-machinery/1623-standardize-conditions/README.md + +func NewCondition(reason string, message string, generation int64, conditionStatus ...bool) metav1.Condition { + cs := metav1.ConditionTrue + if len(conditionStatus) > 0 && !conditionStatus[0] { + cs = metav1.ConditionFalse + } + + return metav1.Condition{ + Type: reason, + Reason: reason, + Message: message, + Status: cs, + LastTransitionTime: metav1.Now(), + ObservedGeneration: generation, + } +} + +// HasCondition returns "true" if the desired condition provided in "condType" is present in the condition list. +// Otherwise, it returns "false". +func HasCondition(conditions []metav1.Condition, condType string) bool { + for i := range conditions { + if conditions[i].Type == condType { + return true + } + } + return false +} + +// GetCondition returns a pointer to the desired condition referred by "condType". Otherwise, it returns nil. +func GetCondition(conditions []metav1.Condition, condType string) (int, *metav1.Condition) { + for i := range conditions { + c := conditions[i] + if c.Type == condType { + return i, &c + } + } + return -1, nil +} + +// SetCondition add/update the desired condition to the condition list. It does nothing if the condition is already in +// its desired state. +func SetCondition(conditions []metav1.Condition, newCondition metav1.Condition) []metav1.Condition { + idx, curCond := GetCondition(conditions, newCondition.Type) + // If the current condition is in its desired state, we have nothing to do. Just return the original condition list. + if curCond != nil && + curCond.Status == newCondition.Status && + curCond.Reason == newCondition.Reason && + curCond.Message == newCondition.Message && + curCond.ObservedGeneration == newCondition.ObservedGeneration { + return conditions + } + // The desired conditions is not in the condition list or is not in its desired state. + // Update it if present in the condition list, or append the new condition if it does not present. + newCondition.LastTransitionTime = metav1.Now() + if idx == -1 { + conditions = append(conditions, newCondition) + } else if newCondition.ObservedGeneration >= curCond.ObservedGeneration { + // only update if the new condition is based on observed generation at least as updated as the current condition + conditions[idx] = newCondition + } + return conditions +} + +// RemoveCondition remove a condition from the condition list referred by "condType" parameter. +func RemoveCondition(conditions []metav1.Condition, condType string) []metav1.Condition { + idx, _ := GetCondition(conditions, condType) + if idx == -1 { + // The desired condition is not present in the condition list. So, nothing to do. + return conditions + } + return append(conditions[:idx], conditions[idx+1:]...) +} + +// IsConditionTrue returns "true" if the desired condition is in true state. +// It returns "false" if the desired condition is not in "true" state or is not in the condition list. +func IsConditionTrue(conditions []metav1.Condition, condType string) bool { + for i := range conditions { + if conditions[i].Type == condType && conditions[i].Status == metav1.ConditionTrue { + return true + } + } + return false +} + +// IsConditionFalse returns "true" if the desired condition is in false state. +// It returns "false" if the desired condition is not in "false" state or is not in the condition list. +func IsConditionFalse(conditions []metav1.Condition, condType string) bool { + for i := range conditions { + if conditions[i].Type == condType && conditions[i].Status == metav1.ConditionFalse { + return true + } + } + return false +} + +// IsConditionUnknown returns "true" if the desired condition is in unknown state. +// It returns "false" if the desired condition is not in "unknown" state or is not in the condition list. +func IsConditionUnknown(conditions []metav1.Condition, condType string) bool { + for i := range conditions { + if conditions[i].Type == condType && conditions[i].Status == metav1.ConditionUnknown { + return true + } + } + return false +} + +// Status defines the set of statuses a resource can have. +// Based on kstatus: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus +// +kubebuilder:validation:Enum=InProgress;Failed;Current;Terminating;NotFound;Unknown +type Status string + +const ( + // The set of status conditions which can be assigned to resources. + InProgressStatus Status = "InProgress" + FailedStatus Status = "Failed" + CurrentStatus Status = "Current" + TerminatingStatus Status = "Terminating" + NotFoundStatus Status = "NotFound" + UnknownStatus Status = "Unknown" +) + +var Statuses = []Status{InProgressStatus, FailedStatus, CurrentStatus, TerminatingStatus, UnknownStatus} + +// String returns the status as a string. +func (s Status) String() string { + return string(s) +} + +// StatusFromStringOrDie turns a string into a Status. Will panic if the provided string is +// not a valid status. +func StatusFromStringOrDie(text string) Status { + s := Status(text) + for _, r := range Statuses { + if s == r { + return s + } + } + panic(fmt.Errorf("string has invalid status: %s", s)) +} diff --git a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/appbinding_types.go b/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/appbinding_types.go index 53eb1806..61a4c764 100644 --- a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/appbinding_types.go +++ b/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/appbinding_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1alpha1 import ( + kmapi "kmodules.xyz/client-go/api/v1" + core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -42,31 +44,35 @@ const ( // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" type AppBinding struct { metav1.TypeMeta `json:",inline,omitempty"` - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - Spec AppBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec AppBindingSpec `json:"spec,omitempty"` } // AppBindingSpec is the spec for app type AppBindingSpec struct { // Type used to facilitate programmatic handling of application. // +optional - Type AppType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=AppType"` + Type AppType `json:"type,omitempty"` + + // Reference to underlying application + // +optional + AppRef *kmapi.TypedObjectReference `json:"appRef,omitempty"` // Version used to facilitate programmatic handling of application. // +optional - Version string `json:"version,omitempty" protobuf:"bytes,2,opt,name=version"` + Version string `json:"version,omitempty"` // ClientConfig defines how to communicate with the app. // Required - ClientConfig ClientConfig `json:"clientConfig" protobuf:"bytes,3,opt,name=clientConfig"` + ClientConfig ClientConfig `json:"clientConfig"` // Secret is the name of the secret to create in the AppBinding's // namespace that will hold the credentials associated with the AppBinding. - Secret *core.LocalObjectReference `json:"secret,omitempty" protobuf:"bytes,4,opt,name=secret"` + Secret *core.LocalObjectReference `json:"secret,omitempty"` // List of transformations that should be applied to the credentials // associated with the ServiceBinding before they are inserted into the Secret. - SecretTransforms []SecretTransform `json:"secretTransforms,omitempty" protobuf:"bytes,5,rep,name=secretTransforms"` + SecretTransforms []SecretTransform `json:"secretTransforms,omitempty"` // Parameters is a set of the parameters to be used to connect to the // app. The inline YAML/JSON payload to be translated into equivalent @@ -80,11 +86,11 @@ type AppBindingSpec struct { // +optional // +kubebuilder:validation:EmbeddedResource // +kubebuilder:pruning:PreserveUnknownFields - Parameters *runtime.RawExtension `json:"parameters,omitempty" protobuf:"bytes,6,opt,name=parameters"` + Parameters *runtime.RawExtension `json:"parameters,omitempty"` // TLSSecret is the name of the secret that will hold // the client certificate and private key associated with the AppBinding. - TLSSecret *core.LocalObjectReference `json:"tlsSecret,omitempty" protobuf:"bytes,7,opt,name=tlsSecret"` + TLSSecret *core.LocalObjectReference `json:"tlsSecret,omitempty"` } type AppType string @@ -115,7 +121,7 @@ type ClientConfig struct { // allowed, either. // // +optional - URL *string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"` + URL *string `json:"url,omitempty"` // `service` is a reference to the service for this app. Either // `service` or `url` must be specified. @@ -123,21 +129,21 @@ type ClientConfig struct { // If the webhook is running within the cluster, then you should use `service`. // // +optional - Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,2,opt,name=service"` + Service *ServiceReference `json:"service,omitempty"` // InsecureSkipTLSVerify disables TLS certificate verification when communicating with this app. // This is strongly discouraged. You should use the CABundle instead. - InsecureSkipTLSVerify bool `json:"insecureSkipTLSVerify,omitempty" protobuf:"varint,3,opt,name=insecureSkipTLSVerify"` + InsecureSkipTLSVerify bool `json:"insecureSkipTLSVerify,omitempty"` // CABundle is a PEM encoded CA bundle which will be used to validate the serving certificate of this app. // +optional - CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,4,opt,name=caBundle"` + CABundle []byte `json:"caBundle,omitempty"` // ServerName is used to verify the hostname on the returned // certificates unless InsecureSkipVerify is given. It is also included // in the client's handshake to support virtual hosting unless it is // an IP address. - ServerName string `json:"serverName,omitempty" protobuf:"bytes,5,opt,name=serverName"` + ServerName string `json:"serverName,omitempty"` } // ServiceReference holds a reference to Service.legacy.k8s.io @@ -145,28 +151,28 @@ type ServiceReference struct { // Specifies which scheme to use, for example: http, https // If specified, then it will applied as prefix in this format: scheme:// // If not specified, then nothing will be prefixed - Scheme string `json:"scheme" protobuf:"bytes,1,opt,name=scheme"` + Scheme string `json:"scheme"` // `namespace` is the namespace of the service. // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` + Namespace string `json:"namespace,omitempty"` // `name` is the name of the service. // Required - Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + Name string `json:"name"` // The port that will be exposed by this app. - Port int32 `json:"port" protobuf:"varint,4,opt,name=port"` + Port int32 `json:"port"` // `path` is an optional URL path which will be sent in any request to // this service. // +optional - Path string `json:"path,omitempty" protobuf:"bytes,5,opt,name=path"` + Path string `json:"path,omitempty"` // `query` is optional encoded query string, without '?' which will be // sent in any request to this service. // +optional - Query string `json:"query,omitempty" protobuf:"bytes,6,opt,name=query"` + Query string `json:"query,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -174,19 +180,19 @@ type ServiceReference struct { // AppBindingList is a list of Apps type AppBindingList struct { metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + metav1.ListMeta `json:"metadata,omitempty"` // Items is a list of AppBinding CRD objects - Items []AppBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []AppBinding `json:"items,omitempty"` } type AppReference struct { // `namespace` is the namespace of the app. // Required - Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + Namespace string `json:"namespace"` // `name` is the name of the app. // Required - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + Name string `json:"name"` // Parameters is a set of the parameters to be used to override default // parameters. The inline YAML/JSON payload to be translated into equivalent @@ -198,7 +204,7 @@ type AppReference struct { // +optional // +kubebuilder:validation:EmbeddedResource // +kubebuilder:pruning:PreserveUnknownFields - Parameters *runtime.RawExtension `json:"parameters,omitempty" protobuf:"bytes,3,opt,name=parameters"` + Parameters *runtime.RawExtension `json:"parameters,omitempty"` } type AppBindingMeta interface { @@ -210,9 +216,9 @@ type AppBindingMeta interface { // referenced object. type ObjectReference struct { // Namespace of the referent. - Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + Namespace string `json:"namespace,omitempty"` // Name of the referent. - Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"` + Name string `json:"name,omitempty"` } // ref: https://github.com/kubernetes-sigs/service-catalog/blob/37b874716ad709a175e426f5f5638322a600849f/pkg/apis/servicecatalog/v1beta1/types.go#L1397 @@ -233,14 +239,14 @@ type ObjectReference struct { // Only one of the SecretTransform's members may be specified. type SecretTransform struct { // RenameKey represents a transform that renames a credentials Secret entry's key - RenameKey *RenameKeyTransform `json:"renameKey,omitempty" protobuf:"bytes,1,opt,name=renameKey"` + RenameKey *RenameKeyTransform `json:"renameKey,omitempty"` // AddKey represents a transform that adds an additional key to the credentials Secret - AddKey *AddKeyTransform `json:"addKey,omitempty" protobuf:"bytes,2,opt,name=addKey"` + AddKey *AddKeyTransform `json:"addKey,omitempty"` // AddKeysFrom represents a transform that merges all the entries of an existing Secret // into the credentials Secret - AddKeysFrom *AddKeysFromTransform `json:"addKeysFrom,omitempty" protobuf:"bytes,3,opt,name=addKeysFrom"` + AddKeysFrom *AddKeysFromTransform `json:"addKeysFrom,omitempty"` // RemoveKey represents a transform that removes a credentials Secret entry - RemoveKey *RemoveKeyTransform `json:"removeKey,omitempty" protobuf:"bytes,4,opt,name=removeKey"` + RemoveKey *RemoveKeyTransform `json:"removeKey,omitempty"` } // RenameKeyTransform specifies that one of the credentials keys returned @@ -259,9 +265,9 @@ type SecretTransform struct { // "DB_USER": "johndoe" type RenameKeyTransform struct { // The name of the key to rename - From string `json:"from" protobuf:"bytes,1,opt,name=from"` + From string `json:"from"` // The new name for the key - To string `json:"to" protobuf:"bytes,2,opt,name=to"` + To string `json:"to"` } // AddKeyTransform specifies that Service Catalog should add an @@ -279,14 +285,14 @@ type RenameKeyTransform struct { // AddKeysFromTransform should be used instead. type AddKeyTransform struct { // The name of the key to add - Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + Key string `json:"key"` // The binary value (possibly non-string) to add to the Secret under the specified key. If both // value and stringValue are specified, then value is ignored and stringValue is stored. // +optional - Value []byte `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + Value []byte `json:"value,omitempty"` // The string (non-binary) value to add to the Secret under the specified key. // +optional - StringValue *string `json:"stringValue,omitempty" protobuf:"bytes,3,opt,name=stringValue"` + StringValue *string `json:"stringValue,omitempty"` } // AddKeysFromTransform specifies that Service Catalog should merge @@ -299,12 +305,12 @@ type AddKeyTransform struct { // the credentials Secret. type AddKeysFromTransform struct { // The reference to the Secret that should be merged into the credentials Secret. - SecretRef *core.LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,1,opt,name=secretRef"` + SecretRef *core.LocalObjectReference `json:"secretRef,omitempty"` } // RemoveKeyTransform specifies that one of the credentials keys returned // from the broker should not be included in the credentials Secret. type RemoveKeyTransform struct { // The key to remove from the Secret - Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + Key string `json:"key"` } diff --git a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/generated.pb.go b/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/generated.pb.go deleted file mode 100644 index 547ad42e..00000000 --- a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/generated.pb.go +++ /dev/null @@ -1,4362 +0,0 @@ -/* -Copyright AppsCode Inc. and Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/generated.proto - -package v1alpha1 - -import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - - proto "github.com/gogo/protobuf/proto" - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *AddKeyTransform) Reset() { *m = AddKeyTransform{} } -func (*AddKeyTransform) ProtoMessage() {} -func (*AddKeyTransform) Descriptor() ([]byte, []int) { - return fileDescriptor_4577f11f96b23493, []int{0} -} -func (m *AddKeyTransform) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AddKeyTransform) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AddKeyTransform) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddKeyTransform.Merge(m, src) -} -func (m *AddKeyTransform) XXX_Size() int { - return m.Size() -} -func (m *AddKeyTransform) XXX_DiscardUnknown() { - xxx_messageInfo_AddKeyTransform.DiscardUnknown(m) -} - -var xxx_messageInfo_AddKeyTransform proto.InternalMessageInfo - -func (m *AddKeysFromTransform) Reset() { *m = AddKeysFromTransform{} } -func (*AddKeysFromTransform) ProtoMessage() {} -func (*AddKeysFromTransform) Descriptor() ([]byte, []int) { - return fileDescriptor_4577f11f96b23493, []int{1} -} -func (m *AddKeysFromTransform) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AddKeysFromTransform) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AddKeysFromTransform) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddKeysFromTransform.Merge(m, src) -} -func (m *AddKeysFromTransform) XXX_Size() int { - return m.Size() -} -func (m *AddKeysFromTransform) XXX_DiscardUnknown() { - xxx_messageInfo_AddKeysFromTransform.DiscardUnknown(m) -} - -var xxx_messageInfo_AddKeysFromTransform proto.InternalMessageInfo - -func (m *AppBinding) Reset() { *m = AppBinding{} } -func (*AppBinding) ProtoMessage() {} -func (*AppBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_4577f11f96b23493, []int{2} -} -func (m *AppBinding) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppBinding) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppBinding.Merge(m, src) -} -func (m *AppBinding) XXX_Size() int { - return m.Size() -} -func (m *AppBinding) XXX_DiscardUnknown() { - xxx_messageInfo_AppBinding.DiscardUnknown(m) -} - -var xxx_messageInfo_AppBinding proto.InternalMessageInfo - -func (m *AppBindingList) Reset() { *m = AppBindingList{} } -func (*AppBindingList) ProtoMessage() {} -func (*AppBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_4577f11f96b23493, []int{3} -} -func (m *AppBindingList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppBindingList) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppBindingList.Merge(m, src) -} -func (m *AppBindingList) XXX_Size() int { - return m.Size() -} -func (m *AppBindingList) XXX_DiscardUnknown() { - xxx_messageInfo_AppBindingList.DiscardUnknown(m) -} - -var xxx_messageInfo_AppBindingList proto.InternalMessageInfo - -func (m *AppBindingSpec) Reset() { *m = AppBindingSpec{} } -func (*AppBindingSpec) ProtoMessage() {} -func (*AppBindingSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_4577f11f96b23493, []int{4} -} -func (m *AppBindingSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppBindingSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppBindingSpec.Merge(m, src) -} -func (m *AppBindingSpec) XXX_Size() int { - return m.Size() -} -func (m *AppBindingSpec) XXX_DiscardUnknown() { - xxx_messageInfo_AppBindingSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_AppBindingSpec proto.InternalMessageInfo - -func (m *AppReference) Reset() { *m = AppReference{} } -func (*AppReference) ProtoMessage() {} -func (*AppReference) Descriptor() ([]byte, []int) { - return fileDescriptor_4577f11f96b23493, []int{5} -} -func (m *AppReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AppReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AppReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppReference.Merge(m, src) -} -func (m *AppReference) XXX_Size() int { - return m.Size() -} -func (m *AppReference) XXX_DiscardUnknown() { - xxx_messageInfo_AppReference.DiscardUnknown(m) -} - -var xxx_messageInfo_AppReference proto.InternalMessageInfo - -func (m *ClientConfig) Reset() { *m = ClientConfig{} } -func (*ClientConfig) ProtoMessage() {} -func (*ClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_4577f11f96b23493, []int{6} -} -func (m *ClientConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClientConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientConfig.Merge(m, src) -} -func (m *ClientConfig) XXX_Size() int { - return m.Size() -} -func (m *ClientConfig) XXX_DiscardUnknown() { - xxx_messageInfo_ClientConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_ClientConfig proto.InternalMessageInfo - -func (m *ObjectReference) Reset() { *m = ObjectReference{} } -func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_4577f11f96b23493, []int{7} -} -func (m *ObjectReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ObjectReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ObjectReference.Merge(m, src) -} -func (m *ObjectReference) XXX_Size() int { - return m.Size() -} -func (m *ObjectReference) XXX_DiscardUnknown() { - xxx_messageInfo_ObjectReference.DiscardUnknown(m) -} - -var xxx_messageInfo_ObjectReference proto.InternalMessageInfo - -func (m *Param) Reset() { *m = Param{} } -func (*Param) ProtoMessage() {} -func (*Param) Descriptor() ([]byte, []int) { - return fileDescriptor_4577f11f96b23493, []int{8} -} -func (m *Param) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Param) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Param) XXX_Merge(src proto.Message) { - xxx_messageInfo_Param.Merge(m, src) -} -func (m *Param) XXX_Size() int { - return m.Size() -} -func (m *Param) XXX_DiscardUnknown() { - xxx_messageInfo_Param.DiscardUnknown(m) -} - -var xxx_messageInfo_Param proto.InternalMessageInfo - -func (m *RemoveKeyTransform) Reset() { *m = RemoveKeyTransform{} } -func (*RemoveKeyTransform) ProtoMessage() {} -func (*RemoveKeyTransform) Descriptor() ([]byte, []int) { - return fileDescriptor_4577f11f96b23493, []int{9} -} -func (m *RemoveKeyTransform) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RemoveKeyTransform) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RemoveKeyTransform) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoveKeyTransform.Merge(m, src) -} -func (m *RemoveKeyTransform) XXX_Size() int { - return m.Size() -} -func (m *RemoveKeyTransform) XXX_DiscardUnknown() { - xxx_messageInfo_RemoveKeyTransform.DiscardUnknown(m) -} - -var xxx_messageInfo_RemoveKeyTransform proto.InternalMessageInfo - -func (m *RenameKeyTransform) Reset() { *m = RenameKeyTransform{} } -func (*RenameKeyTransform) ProtoMessage() {} -func (*RenameKeyTransform) Descriptor() ([]byte, []int) { - return fileDescriptor_4577f11f96b23493, []int{10} -} -func (m *RenameKeyTransform) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RenameKeyTransform) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RenameKeyTransform) XXX_Merge(src proto.Message) { - xxx_messageInfo_RenameKeyTransform.Merge(m, src) -} -func (m *RenameKeyTransform) XXX_Size() int { - return m.Size() -} -func (m *RenameKeyTransform) XXX_DiscardUnknown() { - xxx_messageInfo_RenameKeyTransform.DiscardUnknown(m) -} - -var xxx_messageInfo_RenameKeyTransform proto.InternalMessageInfo - -func (m *SecretTransform) Reset() { *m = SecretTransform{} } -func (*SecretTransform) ProtoMessage() {} -func (*SecretTransform) Descriptor() ([]byte, []int) { - return fileDescriptor_4577f11f96b23493, []int{11} -} -func (m *SecretTransform) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SecretTransform) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SecretTransform) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecretTransform.Merge(m, src) -} -func (m *SecretTransform) XXX_Size() int { - return m.Size() -} -func (m *SecretTransform) XXX_DiscardUnknown() { - xxx_messageInfo_SecretTransform.DiscardUnknown(m) -} - -var xxx_messageInfo_SecretTransform proto.InternalMessageInfo - -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_4577f11f96b23493, []int{12} -} -func (m *ServiceReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ServiceReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceReference.Merge(m, src) -} -func (m *ServiceReference) XXX_Size() int { - return m.Size() -} -func (m *ServiceReference) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceReference.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceReference proto.InternalMessageInfo - -func (m *StashAddon) Reset() { *m = StashAddon{} } -func (*StashAddon) ProtoMessage() {} -func (*StashAddon) Descriptor() ([]byte, []int) { - return fileDescriptor_4577f11f96b23493, []int{13} -} -func (m *StashAddon) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StashAddon) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StashAddon) XXX_Merge(src proto.Message) { - xxx_messageInfo_StashAddon.Merge(m, src) -} -func (m *StashAddon) XXX_Size() int { - return m.Size() -} -func (m *StashAddon) XXX_DiscardUnknown() { - xxx_messageInfo_StashAddon.DiscardUnknown(m) -} - -var xxx_messageInfo_StashAddon proto.InternalMessageInfo - -func (m *StashAddonSpec) Reset() { *m = StashAddonSpec{} } -func (*StashAddonSpec) ProtoMessage() {} -func (*StashAddonSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_4577f11f96b23493, []int{14} -} -func (m *StashAddonSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StashAddonSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StashAddonSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_StashAddonSpec.Merge(m, src) -} -func (m *StashAddonSpec) XXX_Size() int { - return m.Size() -} -func (m *StashAddonSpec) XXX_DiscardUnknown() { - xxx_messageInfo_StashAddonSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_StashAddonSpec proto.InternalMessageInfo - -func (m *StashTaskSpec) Reset() { *m = StashTaskSpec{} } -func (*StashTaskSpec) ProtoMessage() {} -func (*StashTaskSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_4577f11f96b23493, []int{15} -} -func (m *StashTaskSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StashTaskSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StashTaskSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_StashTaskSpec.Merge(m, src) -} -func (m *StashTaskSpec) XXX_Size() int { - return m.Size() -} -func (m *StashTaskSpec) XXX_DiscardUnknown() { - xxx_messageInfo_StashTaskSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_StashTaskSpec proto.InternalMessageInfo - -func (m *TaskRef) Reset() { *m = TaskRef{} } -func (*TaskRef) ProtoMessage() {} -func (*TaskRef) Descriptor() ([]byte, []int) { - return fileDescriptor_4577f11f96b23493, []int{16} -} -func (m *TaskRef) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TaskRef) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskRef.Merge(m, src) -} -func (m *TaskRef) XXX_Size() int { - return m.Size() -} -func (m *TaskRef) XXX_DiscardUnknown() { - xxx_messageInfo_TaskRef.DiscardUnknown(m) -} - -var xxx_messageInfo_TaskRef proto.InternalMessageInfo - -func init() { - proto.RegisterType((*AddKeyTransform)(nil), "kmodules.xyz.custom_resources.apis.appcatalog.v1alpha1.AddKeyTransform") - proto.RegisterType((*AddKeysFromTransform)(nil), "kmodules.xyz.custom_resources.apis.appcatalog.v1alpha1.AddKeysFromTransform") - proto.RegisterType((*AppBinding)(nil), "kmodules.xyz.custom_resources.apis.appcatalog.v1alpha1.AppBinding") - proto.RegisterType((*AppBindingList)(nil), "kmodules.xyz.custom_resources.apis.appcatalog.v1alpha1.AppBindingList") - proto.RegisterType((*AppBindingSpec)(nil), "kmodules.xyz.custom_resources.apis.appcatalog.v1alpha1.AppBindingSpec") - proto.RegisterType((*AppReference)(nil), "kmodules.xyz.custom_resources.apis.appcatalog.v1alpha1.AppReference") - proto.RegisterType((*ClientConfig)(nil), "kmodules.xyz.custom_resources.apis.appcatalog.v1alpha1.ClientConfig") - proto.RegisterType((*ObjectReference)(nil), "kmodules.xyz.custom_resources.apis.appcatalog.v1alpha1.ObjectReference") - proto.RegisterType((*Param)(nil), "kmodules.xyz.custom_resources.apis.appcatalog.v1alpha1.Param") - proto.RegisterType((*RemoveKeyTransform)(nil), "kmodules.xyz.custom_resources.apis.appcatalog.v1alpha1.RemoveKeyTransform") - proto.RegisterType((*RenameKeyTransform)(nil), "kmodules.xyz.custom_resources.apis.appcatalog.v1alpha1.RenameKeyTransform") - proto.RegisterType((*SecretTransform)(nil), "kmodules.xyz.custom_resources.apis.appcatalog.v1alpha1.SecretTransform") - proto.RegisterType((*ServiceReference)(nil), "kmodules.xyz.custom_resources.apis.appcatalog.v1alpha1.ServiceReference") - proto.RegisterType((*StashAddon)(nil), "kmodules.xyz.custom_resources.apis.appcatalog.v1alpha1.StashAddon") - proto.RegisterType((*StashAddonSpec)(nil), "kmodules.xyz.custom_resources.apis.appcatalog.v1alpha1.StashAddonSpec") - proto.RegisterType((*StashTaskSpec)(nil), "kmodules.xyz.custom_resources.apis.appcatalog.v1alpha1.StashTaskSpec") - proto.RegisterType((*TaskRef)(nil), "kmodules.xyz.custom_resources.apis.appcatalog.v1alpha1.TaskRef") -} - -func init() { - proto.RegisterFile("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/generated.proto", fileDescriptor_4577f11f96b23493) -} - -var fileDescriptor_4577f11f96b23493 = []byte{ - // 1314 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xfa, 0x6f, 0xfc, 0xec, 0x26, 0x61, 0x68, 0x85, 0x89, 0x54, 0x3b, 0x5a, 0x24, 0x14, - 0x84, 0xba, 0x26, 0x01, 0x55, 0x5c, 0x10, 0xca, 0x96, 0x16, 0x4a, 0x4d, 0x29, 0xe3, 0xb4, 0x07, - 0x54, 0x04, 0x93, 0xf5, 0xd8, 0xde, 0xda, 0xbb, 0xb3, 0xcc, 0xac, 0xdd, 0x1a, 0x21, 0xe0, 0xc0, - 0x15, 0x09, 0xf1, 0x89, 0x38, 0xf6, 0x82, 0xd4, 0x1b, 0x3d, 0x59, 0xd4, 0x7c, 0x02, 0x2e, 0x1c, - 0x50, 0x0f, 0x68, 0x66, 0x67, 0xff, 0xd8, 0x69, 0x44, 0xe3, 0x94, 0x8b, 0xb5, 0xf3, 0xde, 0x9b, - 0xdf, 0xef, 0xbd, 0x37, 0xef, 0xbd, 0x19, 0xc3, 0xb5, 0xa1, 0xc7, 0xba, 0xe3, 0x11, 0x15, 0xd6, - 0x83, 0xe9, 0x37, 0x2d, 0x67, 0x2c, 0x42, 0xe6, 0x5d, 0xe2, 0x54, 0xb0, 0x31, 0x77, 0xa8, 0x68, - 0x91, 0xc0, 0x95, 0x3f, 0x81, 0x43, 0x42, 0x32, 0x62, 0xfd, 0xd6, 0x64, 0x8f, 0x8c, 0x82, 0x01, - 0xd9, 0x6b, 0xf5, 0xa9, 0x4f, 0x39, 0x09, 0x69, 0xd7, 0x0a, 0x38, 0x0b, 0x19, 0xba, 0x9c, 0xc5, - 0xb1, 0x22, 0x9c, 0x2f, 0x13, 0x1c, 0x4b, 0xe2, 0x58, 0x29, 0x8e, 0x15, 0xe3, 0x6c, 0x5f, 0xea, - 0xbb, 0xe1, 0x60, 0x7c, 0x64, 0x39, 0xcc, 0x6b, 0xf5, 0x59, 0x9f, 0xb5, 0x14, 0xdc, 0xd1, 0xb8, - 0xa7, 0x56, 0x6a, 0xa1, 0xbe, 0x22, 0x9a, 0x6d, 0x73, 0xf8, 0xae, 0xb0, 0x5c, 0x26, 0xfd, 0x6a, - 0x39, 0x8c, 0xd3, 0xd6, 0xe4, 0x98, 0x2b, 0xdb, 0xef, 0xa4, 0x36, 0x1e, 0x71, 0x06, 0xae, 0x4f, - 0xf9, 0xb4, 0x15, 0x0c, 0xfb, 0x51, 0x30, 0x1e, 0x0d, 0xc9, 0xb3, 0x76, 0xb5, 0x4e, 0xda, 0xc5, - 0xc7, 0x7e, 0xe8, 0x7a, 0xf4, 0xd8, 0x86, 0xcb, 0xff, 0xb5, 0x41, 0x38, 0x03, 0xea, 0x91, 0xe5, - 0x7d, 0xe6, 0x8f, 0x06, 0x6c, 0x1e, 0x74, 0xbb, 0x37, 0xe8, 0xf4, 0x90, 0x13, 0x5f, 0xf4, 0x18, - 0xf7, 0xd0, 0x45, 0xc8, 0x0f, 0xe9, 0xb4, 0x6e, 0xec, 0x18, 0xbb, 0x15, 0xbb, 0xfa, 0x70, 0xd6, - 0x5c, 0x9b, 0xcf, 0x9a, 0xf9, 0x1b, 0x74, 0x8a, 0xa5, 0x1c, 0x35, 0xa1, 0x38, 0x21, 0xa3, 0x31, - 0xad, 0xe7, 0x76, 0x8c, 0xdd, 0x9a, 0x5d, 0x99, 0xcf, 0x9a, 0xc5, 0x3b, 0x52, 0x80, 0x23, 0x39, - 0xda, 0x83, 0xaa, 0x08, 0xb9, 0xeb, 0xf7, 0x95, 0xb4, 0x9e, 0x57, 0x38, 0x9b, 0xf3, 0x59, 0xb3, - 0xda, 0x49, 0xc5, 0x38, 0x6b, 0x63, 0x7a, 0x70, 0x3e, 0xf2, 0x42, 0x5c, 0xe3, 0xcc, 0x4b, 0x5d, - 0xb9, 0x0d, 0x15, 0x41, 0x1d, 0x4e, 0x43, 0x4c, 0x7b, 0xca, 0xa1, 0xea, 0xfe, 0xae, 0x15, 0x85, - 0x2a, 0x4f, 0xd1, 0x92, 0x59, 0xb7, 0x26, 0x7b, 0x56, 0x9b, 0x39, 0x64, 0xf4, 0xe9, 0xd1, 0x3d, - 0xea, 0x48, 0x4b, 0xca, 0xa9, 0xef, 0x50, 0xfb, 0xdc, 0x7c, 0xd6, 0xac, 0x74, 0xe2, 0xed, 0x38, - 0x45, 0x32, 0x1f, 0x1b, 0x00, 0x07, 0x41, 0x60, 0xbb, 0x7e, 0xd7, 0xf5, 0xfb, 0xe8, 0x2b, 0x58, - 0x97, 0x07, 0xd1, 0x25, 0x21, 0xd1, 0x24, 0x6f, 0x65, 0x48, 0x92, 0x7c, 0x5a, 0xc1, 0xb0, 0x1f, - 0xd5, 0x8e, 0xb4, 0x96, 0xb4, 0x11, 0xe3, 0x27, 0x34, 0x24, 0x36, 0xd2, 0x79, 0x82, 0x54, 0x86, - 0x13, 0x54, 0x34, 0x80, 0x82, 0x08, 0xa8, 0xa3, 0x52, 0x56, 0xdd, 0xbf, 0x66, 0xad, 0x56, 0x9f, - 0x56, 0xea, 0x73, 0x27, 0xa0, 0x8e, 0x5d, 0xd3, 0x9c, 0x05, 0xb9, 0xc2, 0x8a, 0xc1, 0xfc, 0xdd, - 0x80, 0x8d, 0xd4, 0xac, 0xed, 0x8a, 0x10, 0xdd, 0x3d, 0x16, 0x9e, 0xf5, 0x7c, 0xe1, 0xc9, 0xdd, - 0x2a, 0xb8, 0x2d, 0x4d, 0xb4, 0x1e, 0x4b, 0x32, 0xa1, 0xf5, 0xa1, 0xe8, 0x86, 0xd4, 0x13, 0xf5, - 0xdc, 0x4e, 0x7e, 0xb7, 0xba, 0x6f, 0x9f, 0x3d, 0x36, 0xfb, 0x9c, 0xa6, 0x2b, 0x5e, 0x97, 0xc0, - 0x38, 0xc2, 0x37, 0x9f, 0x16, 0xb2, 0x91, 0xc9, 0x90, 0xd1, 0x9b, 0x50, 0x08, 0xa7, 0x01, 0xd5, - 0xa5, 0xfa, 0x4a, 0x9c, 0x8e, 0xc3, 0x69, 0x40, 0xff, 0x99, 0x35, 0xcb, 0x07, 0x41, 0x20, 0x3f, - 0xb1, 0x32, 0x42, 0x6f, 0x40, 0x79, 0x42, 0xb9, 0x70, 0x99, 0xaf, 0x8e, 0xa1, 0x62, 0x6f, 0x6a, - 0xfb, 0xf2, 0x9d, 0x48, 0x8c, 0x63, 0x3d, 0xfa, 0x0e, 0x6a, 0xce, 0xc8, 0xa5, 0x7e, 0x78, 0x85, - 0xf9, 0x3d, 0xb7, 0xaf, 0x4a, 0xb8, 0xba, 0xff, 0xc1, 0xaa, 0xa1, 0x5d, 0xc9, 0x60, 0xd9, 0xe7, - 0x35, 0x6b, 0x2d, 0x2b, 0xc5, 0x0b, 0x7c, 0xa8, 0x0d, 0xa5, 0xa8, 0x58, 0xeb, 0x85, 0x53, 0xd6, - 0x3c, 0xcc, 0x67, 0xcd, 0x92, 0xae, 0x79, 0x8d, 0x81, 0x7e, 0x32, 0x60, 0x2b, 0xfa, 0x4c, 0x1a, - 0x4b, 0xd4, 0x8b, 0xea, 0xb4, 0x3e, 0x5c, 0x35, 0xa4, 0xce, 0x22, 0x9e, 0x5d, 0xd7, 0x51, 0x6d, - 0x2d, 0x29, 0x04, 0x3e, 0x46, 0x8d, 0xbe, 0x00, 0x08, 0x08, 0x27, 0x1e, 0x0d, 0x29, 0x17, 0xf5, - 0x92, 0x8a, 0xf0, 0xd2, 0x89, 0x15, 0xa9, 0x07, 0x98, 0x85, 0xc9, 0xfd, 0xab, 0x0f, 0x42, 0xea, - 0xcb, 0x03, 0xb2, 0x37, 0x64, 0xa7, 0xdd, 0x4a, 0x40, 0x70, 0x06, 0x50, 0xce, 0x8c, 0x70, 0x24, - 0x22, 0x3f, 0xea, 0xe5, 0x55, 0x66, 0xc6, 0x61, 0xbb, 0xa3, 0x53, 0x98, 0x22, 0x99, 0xbf, 0x1a, - 0x50, 0x3b, 0x08, 0x82, 0xc4, 0x14, 0xb5, 0xa0, 0xe2, 0x13, 0x8f, 0x8a, 0x80, 0x38, 0x71, 0x05, - 0xbe, 0xa4, 0xb3, 0x50, 0xb9, 0x19, 0x2b, 0x70, 0x6a, 0x83, 0x76, 0xa0, 0x20, 0x17, 0xba, 0xfa, - 0x92, 0xe6, 0x95, 0xb6, 0x58, 0x69, 0x96, 0x32, 0x93, 0x7f, 0xc1, 0x99, 0x31, 0x7f, 0xcb, 0xc1, - 0x42, 0xd5, 0xa1, 0x57, 0x21, 0x3f, 0xe6, 0x23, 0xed, 0x7c, 0x59, 0x4e, 0xf9, 0xdb, 0xb8, 0x8d, - 0xa5, 0x0c, 0x31, 0x28, 0x0b, 0xca, 0x27, 0xae, 0x43, 0xf5, 0xd0, 0xfa, 0x68, 0xf5, 0x52, 0x51, - 0x30, 0x69, 0x8e, 0xab, 0xb2, 0xe7, 0x62, 0x69, 0xcc, 0x82, 0x3a, 0x70, 0xc1, 0xf5, 0x05, 0x75, - 0xc6, 0x9c, 0x76, 0x86, 0x6e, 0x70, 0xd8, 0xee, 0xdc, 0xa1, 0xdc, 0xed, 0x4d, 0x55, 0x1a, 0xd6, - 0xed, 0x8b, 0x3a, 0x5d, 0x17, 0xae, 0x3f, 0xcb, 0x08, 0x3f, 0x7b, 0x2f, 0xda, 0x85, 0x75, 0x87, - 0xd8, 0x63, 0xbf, 0x3b, 0xa2, 0xaa, 0x95, 0x6a, 0x76, 0x4d, 0x8e, 0xb1, 0x2b, 0x07, 0x91, 0x0c, - 0x27, 0x5a, 0xb4, 0x0f, 0x20, 0x3d, 0xa1, 0x5c, 0x1e, 0x47, 0xbd, 0xa8, 0x32, 0x92, 0xcc, 0xf4, - 0x4e, 0xa2, 0xc1, 0x19, 0x2b, 0xb3, 0x0b, 0x9b, 0x4b, 0xf5, 0xf3, 0x3f, 0x14, 0x85, 0x79, 0x13, - 0x8a, 0xea, 0x3c, 0x13, 0x53, 0xe3, 0xc4, 0xfa, 0x79, 0x2d, 0x7b, 0x35, 0x57, 0xd2, 0x39, 0x9a, - 0xbd, 0x9e, 0xcd, 0xb7, 0x01, 0x61, 0xea, 0xb1, 0x09, 0x3d, 0xc5, 0xa5, 0x6f, 0x62, 0xb9, 0x49, - 0x72, 0x2c, 0x6c, 0xda, 0x81, 0x42, 0x8f, 0x33, 0x6f, 0xd9, 0x23, 0x79, 0x87, 0x63, 0xa5, 0x41, - 0xdb, 0x90, 0x0b, 0x99, 0x76, 0x07, 0xb4, 0x3e, 0x77, 0xc8, 0x70, 0x2e, 0x64, 0xe6, 0x5f, 0x79, - 0xd8, 0x5c, 0x1a, 0x17, 0xe8, 0x3e, 0x54, 0x78, 0xcc, 0xa3, 0x2f, 0xab, 0x8f, 0x57, 0x2d, 0xbc, - 0xe3, 0x0e, 0x47, 0xed, 0x9d, 0xc8, 0x71, 0xca, 0x85, 0x86, 0x50, 0x22, 0xea, 0x05, 0xa2, 0xcb, - 0x7d, 0xe5, 0xc9, 0xb8, 0xf4, 0x9a, 0x8a, 0x26, 0x72, 0x24, 0xc4, 0x9a, 0x02, 0x7d, 0x0f, 0x55, - 0x92, 0x3e, 0x77, 0x74, 0xa3, 0xb7, 0xcf, 0xc6, 0xb8, 0xf8, 0x72, 0x8a, 0xde, 0x5b, 0x19, 0x0d, - 0xce, 0x32, 0x46, 0x69, 0xd6, 0x35, 0xa0, 0xef, 0x98, 0x33, 0xa4, 0x79, 0xb9, 0x98, 0xe2, 0x34, - 0x6b, 0x39, 0x4e, 0xb9, 0xcc, 0xbf, 0x0d, 0xd8, 0x5a, 0x1e, 0x08, 0xe8, 0x75, 0x28, 0xa9, 0xe7, - 0x69, 0x5c, 0xda, 0x1b, 0xba, 0x50, 0x4a, 0x1d, 0x25, 0xc5, 0x5a, 0xbb, 0xd8, 0x5c, 0xb9, 0x53, - 0x34, 0x57, 0xfe, 0xc4, 0x8e, 0xd9, 0x81, 0x42, 0xc0, 0x78, 0x74, 0xcf, 0x16, 0x53, 0x8b, 0x5b, - 0x8c, 0x87, 0x58, 0x69, 0x94, 0x05, 0x09, 0x07, 0x7a, 0x24, 0xa4, 0x16, 0x24, 0x1c, 0x60, 0xa5, - 0x91, 0x5d, 0xf7, 0xf5, 0x98, 0xf2, 0xa9, 0xba, 0xca, 0x32, 0x5d, 0xf7, 0x99, 0x14, 0xe2, 0x48, - 0x67, 0x4e, 0x01, 0x3a, 0x21, 0x11, 0x83, 0x83, 0x6e, 0x97, 0xf9, 0x68, 0x08, 0x45, 0x21, 0x57, - 0xba, 0xc4, 0x57, 0x7e, 0x10, 0xa6, 0x90, 0xea, 0x41, 0x98, 0x50, 0x2b, 0x39, 0x8e, 0x38, 0xcc, - 0x6f, 0x61, 0x63, 0xd1, 0x0e, 0xdd, 0x83, 0x22, 0x91, 0x0b, 0x4d, 0x7f, 0xf5, 0x4c, 0xf4, 0x87, - 0x44, 0x0c, 0x17, 0xd9, 0x15, 0x11, 0x8e, 0x28, 0xcc, 0xa7, 0x06, 0x9c, 0x5b, 0xb0, 0x43, 0x02, - 0xe0, 0x88, 0x38, 0xc3, 0x71, 0x20, 0x25, 0xda, 0x85, 0xf7, 0x57, 0x75, 0x41, 0x62, 0x60, 0xda, - 0x4b, 0x67, 0xb5, 0x9d, 0x40, 0xe3, 0x0c, 0x0d, 0x9a, 0x40, 0x95, 0x53, 0x11, 0x32, 0x4e, 0x15, - 0x6b, 0xee, 0xc5, 0xb0, 0xbe, 0xac, 0x59, 0xab, 0x38, 0xc5, 0xc6, 0x59, 0x22, 0xf3, 0x17, 0x03, - 0xca, 0xda, 0xfa, 0x39, 0x06, 0x38, 0x85, 0x92, 0xba, 0xaf, 0xe3, 0xd7, 0xf4, 0x7b, 0xab, 0x3a, - 0xa8, 0x6e, 0x8c, 0xb4, 0x91, 0xd4, 0x52, 0x60, 0x0d, 0x6e, 0xdf, 0x7d, 0xf8, 0xa4, 0xb1, 0xf6, - 0xe8, 0x49, 0x63, 0xed, 0xf1, 0x93, 0xc6, 0xda, 0x0f, 0xf3, 0x86, 0xf1, 0x70, 0xde, 0x30, 0x1e, - 0xcd, 0x1b, 0xc6, 0xe3, 0x79, 0xc3, 0xf8, 0x63, 0xde, 0x30, 0x7e, 0xfe, 0xb3, 0xb1, 0xf6, 0xf9, - 0xe5, 0xd5, 0xfe, 0x8d, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xe0, 0xe8, 0x7d, 0x76, 0xc6, 0x0f, - 0x00, 0x00, -} - -func (m *AddKeyTransform) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddKeyTransform) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AddKeyTransform) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.StringValue != nil { - i -= len(*m.StringValue) - copy(dAtA[i:], *m.StringValue) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StringValue))) - i-- - dAtA[i] = 0x1a - } - if m.Value != nil { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AddKeysFromTransform) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddKeysFromTransform) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AddKeysFromTransform) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AppBinding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppBinding) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppBindingList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppBindingList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppBindingSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppBindingSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.TLSSecret != nil { - { - size, err := m.TLSSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.Parameters != nil { - { - size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if len(m.SecretTransforms) > 0 { - for iNdEx := len(m.SecretTransforms) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.SecretTransforms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if m.Secret != nil { - { - size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - { - size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AppReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AppReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AppReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Parameters != nil { - { - size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ClientConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClientConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.ServerName) - copy(dAtA[i:], m.ServerName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServerName))) - i-- - dAtA[i] = 0x2a - if m.CABundle != nil { - i -= len(m.CABundle) - copy(dAtA[i:], m.CABundle) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i-- - dAtA[i] = 0x22 - } - i-- - if m.InsecureSkipTLSVerify { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - if m.Service != nil { - { - size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.URL != nil { - i -= len(*m.URL) - copy(dAtA[i:], *m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ObjectReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Param) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Param) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Param) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *RemoveKeyTransform) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemoveKeyTransform) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RemoveKeyTransform) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *RenameKeyTransform) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RenameKeyTransform) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RenameKeyTransform) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.To) - copy(dAtA[i:], m.To) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.To))) - i-- - dAtA[i] = 0x12 - i -= len(m.From) - copy(dAtA[i:], m.From) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.From))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *SecretTransform) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SecretTransform) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SecretTransform) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.RemoveKey != nil { - { - size, err := m.RemoveKey.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.AddKeysFrom != nil { - { - size, err := m.AddKeysFrom.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.AddKey != nil { - { - size, err := m.AddKey.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.RenameKey != nil { - { - size, err := m.RenameKey.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ServiceReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0x32 - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x2a - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - i-- - dAtA[i] = 0x20 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x12 - i -= len(m.Scheme) - copy(dAtA[i:], m.Scheme) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scheme))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *StashAddon) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StashAddon) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StashAddon) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Stash.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *StashAddonSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StashAddonSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StashAddonSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Addon.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *StashTaskSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StashTaskSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StashTaskSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.RestoreTask.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.BackupTask.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *TaskRef) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TaskRef) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TaskRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Params) > 0 { - for iNdEx := len(m.Params) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Params[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AddKeyTransform) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if m.Value != nil { - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.StringValue != nil { - l = len(*m.StringValue) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AddKeysFromTransform) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AppBinding) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AppBindingList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *AppBindingSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Secret != nil { - l = m.Secret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.SecretTransforms) > 0 { - for _, e := range m.SecretTransforms { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Parameters != nil { - l = m.Parameters.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TLSSecret != nil { - l = m.TLSSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AppReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Parameters != nil { - l = m.Parameters.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ClientConfig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.URL != nil { - l = len(*m.URL) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Service != nil { - l = m.Service.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if m.CABundle != nil { - l = len(m.CABundle) - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.ServerName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ObjectReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Param) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *RemoveKeyTransform) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *RenameKeyTransform) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.From) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.To) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SecretTransform) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RenameKey != nil { - l = m.RenameKey.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AddKey != nil { - l = m.AddKey.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AddKeysFrom != nil { - l = m.AddKeysFrom.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RemoveKey != nil { - l = m.RemoveKey.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ServiceReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Scheme) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Port)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Query) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *StashAddon) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Stash.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *StashAddonSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Addon.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *StashTaskSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.BackupTask.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.RestoreTask.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *TaskRef) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Params) > 0 { - for _, e := range m.Params { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AddKeyTransform) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AddKeyTransform{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Value:` + valueToStringGenerated(this.Value) + `,`, - `StringValue:` + valueToStringGenerated(this.StringValue) + `,`, - `}`, - }, "") - return s -} -func (this *AddKeysFromTransform) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AddKeysFromTransform{`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "v1.LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppBinding) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "AppBindingSpec", "AppBindingSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppBindingList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]AppBinding{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "AppBinding", "AppBinding", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&AppBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *AppBindingSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForSecretTransforms := "[]SecretTransform{" - for _, f := range this.SecretTransforms { - repeatedStringForSecretTransforms += strings.Replace(strings.Replace(f.String(), "SecretTransform", "SecretTransform", 1), `&`, ``, 1) + "," - } - repeatedStringForSecretTransforms += "}" - s := strings.Join([]string{`&AppBindingSpec{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "ClientConfig", "ClientConfig", 1), `&`, ``, 1) + `,`, - `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "LocalObjectReference", "v1.LocalObjectReference", 1) + `,`, - `SecretTransforms:` + repeatedStringForSecretTransforms + `,`, - `Parameters:` + strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1) + `,`, - `TLSSecret:` + strings.Replace(fmt.Sprintf("%v", this.TLSSecret), "LocalObjectReference", "v1.LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AppReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AppReference{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Parameters:` + strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClientConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClientConfig{`, - `URL:` + valueToStringGenerated(this.URL) + `,`, - `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, - `InsecureSkipTLSVerify:` + fmt.Sprintf("%v", this.InsecureSkipTLSVerify) + `,`, - `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, - `ServerName:` + fmt.Sprintf("%v", this.ServerName) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ObjectReference{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *Param) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Param{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *RemoveKeyTransform) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RemoveKeyTransform{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `}`, - }, "") - return s -} -func (this *RenameKeyTransform) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RenameKeyTransform{`, - `From:` + fmt.Sprintf("%v", this.From) + `,`, - `To:` + fmt.Sprintf("%v", this.To) + `,`, - `}`, - }, "") - return s -} -func (this *SecretTransform) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SecretTransform{`, - `RenameKey:` + strings.Replace(this.RenameKey.String(), "RenameKeyTransform", "RenameKeyTransform", 1) + `,`, - `AddKey:` + strings.Replace(this.AddKey.String(), "AddKeyTransform", "AddKeyTransform", 1) + `,`, - `AddKeysFrom:` + strings.Replace(this.AddKeysFrom.String(), "AddKeysFromTransform", "AddKeysFromTransform", 1) + `,`, - `RemoveKey:` + strings.Replace(this.RemoveKey.String(), "RemoveKeyTransform", "RemoveKeyTransform", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceReference{`, - `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Query:` + fmt.Sprintf("%v", this.Query) + `,`, - `}`, - }, "") - return s -} -func (this *StashAddon) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StashAddon{`, - `Stash:` + strings.Replace(strings.Replace(this.Stash.String(), "StashAddonSpec", "StashAddonSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *StashAddonSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StashAddonSpec{`, - `Addon:` + strings.Replace(strings.Replace(this.Addon.String(), "StashTaskSpec", "StashTaskSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *StashTaskSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StashTaskSpec{`, - `BackupTask:` + strings.Replace(strings.Replace(this.BackupTask.String(), "TaskRef", "TaskRef", 1), `&`, ``, 1) + `,`, - `RestoreTask:` + strings.Replace(strings.Replace(this.RestoreTask.String(), "TaskRef", "TaskRef", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *TaskRef) String() string { - if this == nil { - return "nil" - } - repeatedStringForParams := "[]Param{" - for _, f := range this.Params { - repeatedStringForParams += strings.Replace(strings.Replace(f.String(), "Param", "Param", 1), `&`, ``, 1) + "," - } - repeatedStringForParams += "}" - s := strings.Join([]string{`&TaskRef{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Params:` + repeatedStringForParams + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AddKeyTransform) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddKeyTransform: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddKeyTransform: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.StringValue = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddKeysFromTransform) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddKeysFromTransform: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddKeysFromTransform: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &v1.LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppBinding) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppBinding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppBinding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppBindingList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppBindingList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppBindingList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, AppBinding{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppBindingSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppBindingSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = AppType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Secret == nil { - m.Secret = &v1.LocalObjectReference{} - } - if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretTransforms", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretTransforms = append(m.SecretTransforms, SecretTransform{}) - if err := m.SecretTransforms[len(m.SecretTransforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Parameters == nil { - m.Parameters = &runtime.RawExtension{} - } - if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLSSecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TLSSecret == nil { - m.TLSSecret = &v1.LocalObjectReference{} - } - if err := m.TLSSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AppReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AppReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AppReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Parameters == nil { - m.Parameters = &runtime.RawExtension{} - } - if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClientConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClientConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.URL = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Service == nil { - m.Service = &ServiceReference{} - } - if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipTLSVerify", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.InsecureSkipTLSVerify = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) - if m.CABundle == nil { - m.CABundle = []byte{} - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Param) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Param: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Param: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RemoveKeyTransform) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RemoveKeyTransform: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveKeyTransform: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RenameKeyTransform) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RenameKeyTransform: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RenameKeyTransform: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.From = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.To = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SecretTransform) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SecretTransform: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SecretTransform: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RenameKey", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RenameKey == nil { - m.RenameKey = &RenameKeyTransform{} - } - if err := m.RenameKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AddKey", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AddKey == nil { - m.AddKey = &AddKeyTransform{} - } - if err := m.AddKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AddKeysFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AddKeysFrom == nil { - m.AddKeysFrom = &AddKeysFromTransform{} - } - if err := m.AddKeysFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RemoveKey", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RemoveKey == nil { - m.RemoveKey = &RemoveKeyTransform{} - } - if err := m.RemoveKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scheme", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scheme = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Query = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StashAddon) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StashAddon: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StashAddon: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stash", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Stash.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StashAddonSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StashAddonSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StashAddonSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addon", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Addon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StashTaskSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StashTaskSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StashTaskSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BackupTask", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BackupTask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RestoreTask", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RestoreTask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskRef) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskRef: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskRef: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Params = append(m.Params, Param{}) - if err := m.Params[len(m.Params)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/generated.proto b/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/generated.proto index 977433aa..7a34e6e1 100644 --- a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/generated.proto +++ b/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/generated.proto @@ -32,9 +32,13 @@ option go_package = "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1"; // AddKeyTransform specifies that Service Catalog should add an // additional entry to the Secret associated with the ServiceBinding. // For example, given the following AddKeyTransform: -// {"key": "CONNECTION_POOL_SIZE", "stringValue": "10"} +// +// {"key": "CONNECTION_POOL_SIZE", "stringValue": "10"} +// // the following entry will appear in the Secret: -// "CONNECTION_POOL_SIZE": "10" +// +// "CONNECTION_POOL_SIZE": "10" +// // Note that this transform should only be used to add non-sensitive // (non-secret) values. To add sensitive information, the // AddKeysFromTransform should be used instead. @@ -55,7 +59,9 @@ message AddKeyTransform { // AddKeysFromTransform specifies that Service Catalog should merge // an existing secret into the Secret associated with the ServiceBinding. // For example, given the following AddKeysFromTransform: -// {"secretRef": {"namespace": "foo", "name": "bar"}} +// +// {"secretRef": {"namespace": "foo", "name": "bar"}} +// // the entries of the Secret "bar" from Namespace "foo" will be merged into // the credentials Secret. message AddKeysFromTransform { @@ -88,21 +94,25 @@ message AppBindingSpec { // +optional optional string type = 1; + // Reference to underlying application + // +optional + optional TypedObjectReference appRef = 2; + // Version used to facilitate programmatic handling of application. // +optional - optional string version = 2; + optional string version = 3; // ClientConfig defines how to communicate with the app. // Required - optional ClientConfig clientConfig = 3; + optional ClientConfig clientConfig = 4; // Secret is the name of the secret to create in the AppBinding's // namespace that will hold the credentials associated with the AppBinding. - optional k8s.io.api.core.v1.LocalObjectReference secret = 4; + optional k8s.io.api.core.v1.LocalObjectReference secret = 5; // List of transformations that should be applied to the credentials // associated with the ServiceBinding before they are inserted into the Secret. - repeated SecretTransform secretTransforms = 5; + repeated SecretTransform secretTransforms = 6; // Parameters is a set of the parameters to be used to connect to the // app. The inline YAML/JSON payload to be translated into equivalent @@ -116,11 +126,11 @@ message AppBindingSpec { // +optional // +kubebuilder:validation:EmbeddedResource // +kubebuilder:pruning:PreserveUnknownFields - optional k8s.io.apimachinery.pkg.runtime.RawExtension parameters = 6; + optional k8s.io.apimachinery.pkg.runtime.RawExtension parameters = 7; // TLSSecret is the name of the secret that will hold // the client certificate and private key associated with the AppBinding. - optional k8s.io.api.core.v1.LocalObjectReference tlsSecret = 7; + optional k8s.io.api.core.v1.LocalObjectReference tlsSecret = 8; } message AppReference { @@ -219,11 +229,16 @@ message RemoveKeyTransform { // from the broker should be renamed and stored under a different key // in the Secret. // For example, given the following credentials entry: -// "USERNAME": "johndoe" +// +// "USERNAME": "johndoe" +// // and the following RenameKeyTransform: -// {"from": "USERNAME", "to": "DB_USER"} +// +// {"from": "USERNAME", "to": "DB_USER"} +// // the following entry will appear in the Secret: -// "DB_USER": "johndoe" +// +// "DB_USER": "johndoe" message RenameKeyTransform { // The name of the key to rename optional string from = 1; @@ -316,3 +331,18 @@ message TaskRef { repeated Param params = 2; } +message TypedObjectReference { + optional string apiGroup = 1; + + optional string kind = 2; + + // Namespace of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + // +optional + optional string namespace = 3; + + // Name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + optional string name = 4; +} + diff --git a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/openapi_generated.go b/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/openapi_generated.go index 5f701288..51fa4caa 100644 --- a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/openapi_generated.go +++ b/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/openapi_generated.go @@ -24,6 +24,8 @@ limitations under the License. package v1alpha1 import ( + apiv1 "kmodules.xyz/client-go/api/v1" + resource "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" intstr "k8s.io/apimachinery/pkg/util/intstr" @@ -326,6 +328,21 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/apimachinery/pkg/runtime.Unknown": schema_k8sio_apimachinery_pkg_runtime_Unknown(ref), "k8s.io/apimachinery/pkg/util/intstr.IntOrString": schema_apimachinery_pkg_util_intstr_IntOrString(ref), "k8s.io/apimachinery/pkg/version.Info": schema_k8sio_apimachinery_pkg_version_Info(ref), + "kmodules.xyz/client-go/api/v1.CertificatePrivateKey": schema_kmodulesxyz_client_go_api_v1_CertificatePrivateKey(ref), + "kmodules.xyz/client-go/api/v1.CertificateSpec": schema_kmodulesxyz_client_go_api_v1_CertificateSpec(ref), + "kmodules.xyz/client-go/api/v1.ClusterMetadata": schema_kmodulesxyz_client_go_api_v1_ClusterMetadata(ref), + "kmodules.xyz/client-go/api/v1.Condition": schema_kmodulesxyz_client_go_api_v1_Condition(ref), + "kmodules.xyz/client-go/api/v1.HealthCheckSpec": schema_kmodulesxyz_client_go_api_v1_HealthCheckSpec(ref), + "kmodules.xyz/client-go/api/v1.ObjectID": schema_kmodulesxyz_client_go_api_v1_ObjectID(ref), + "kmodules.xyz/client-go/api/v1.ObjectInfo": schema_kmodulesxyz_client_go_api_v1_ObjectInfo(ref), + "kmodules.xyz/client-go/api/v1.ObjectReference": schema_kmodulesxyz_client_go_api_v1_ObjectReference(ref), + "kmodules.xyz/client-go/api/v1.ReadonlyHealthCheckSpec": schema_kmodulesxyz_client_go_api_v1_ReadonlyHealthCheckSpec(ref), + "kmodules.xyz/client-go/api/v1.ResourceID": schema_kmodulesxyz_client_go_api_v1_ResourceID(ref), + "kmodules.xyz/client-go/api/v1.TLSConfig": schema_kmodulesxyz_client_go_api_v1_TLSConfig(ref), + "kmodules.xyz/client-go/api/v1.TimeOfDay": schema_kmodulesxyz_client_go_api_v1_TimeOfDay(ref), + "kmodules.xyz/client-go/api/v1.TypedObjectReference": schema_kmodulesxyz_client_go_api_v1_TypedObjectReference(ref), + "kmodules.xyz/client-go/api/v1.X509Subject": schema_kmodulesxyz_client_go_api_v1_X509Subject(ref), + "kmodules.xyz/client-go/api/v1.stringSetMerger": schema_kmodulesxyz_client_go_api_v1_stringSetMerger(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AddKeyTransform": schema_custom_resources_apis_appcatalog_v1alpha1_AddKeyTransform(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AddKeysFromTransform": schema_custom_resources_apis_appcatalog_v1alpha1_AddKeysFromTransform(ref), "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AppBinding": schema_custom_resources_apis_appcatalog_v1alpha1_AppBinding(ref), @@ -16148,166 +16165,851 @@ func schema_k8sio_apimachinery_pkg_version_Info(ref common.ReferenceCallback) co } } -func schema_custom_resources_apis_appcatalog_v1alpha1_AddKeyTransform(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_kmodulesxyz_client_go_api_v1_CertificatePrivateKey(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "AddKeyTransform specifies that Service Catalog should add an additional entry to the Secret associated with the ServiceBinding. For example, given the following AddKeyTransform:\n\n\t{\"key\": \"CONNECTION_POOL_SIZE\", \"stringValue\": \"10\"}\n\nthe following entry will appear in the Secret:\n\n\t\"CONNECTION_POOL_SIZE\": \"10\"\n\nNote that this transform should only be used to add non-sensitive (non-secret) values. To add sensitive information, the AddKeysFromTransform should be used instead.", + Description: "CertificatePrivateKey contains configuration options for private keys used by the Certificate controller. This allows control of how private keys are rotated.", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "key": { + "encoding": { SchemaProps: spec.SchemaProps{ - Description: "The name of the key to add", - Default: "", + Description: "The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are \"pkcs1\" and \"pkcs8\" standing for PKCS#1 and PKCS#8, respectively. Defaults to PKCS#1 if not specified. See here for the difference between the formats: https://stackoverflow.com/a/48960291", Type: []string{"string"}, Format: "", }, }, - "value": { + }, + }, + }, + } +} + +func schema_kmodulesxyz_client_go_api_v1_CertificateSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "alias": { SchemaProps: spec.SchemaProps{ - Description: "The binary value (possibly non-string) to add to the Secret under the specified key. If both value and stringValue are specified, then value is ignored and stringValue is stored.", + Description: "Alias represents the identifier of the certificate.", + Default: "", Type: []string{"string"}, - Format: "byte", + Format: "", }, }, - "stringValue": { + "issuerRef": { SchemaProps: spec.SchemaProps{ - Description: "The string (non-binary) value to add to the Secret under the specified key.", + Description: "IssuerRef is a reference to a Certificate Issuer.", + Ref: ref("k8s.io/api/core/v1.TypedLocalObjectReference"), + }, + }, + "secretName": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the k8s secret name that holds the certificates. Default to --cert.", Type: []string{"string"}, Format: "", }, }, + "subject": { + SchemaProps: spec.SchemaProps{ + Description: "Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name).", + Ref: ref("kmodules.xyz/client-go/api/v1.X509Subject"), + }, + }, + "duration": { + SchemaProps: spec.SchemaProps{ + Description: "Certificate default Duration", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "renewBefore": { + SchemaProps: spec.SchemaProps{ + Description: "Certificate renew before expiration duration", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "dnsNames": { + SchemaProps: spec.SchemaProps{ + Description: "DNSNames is a list of subject alt names to be used on the Certificate.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "ipAddresses": { + SchemaProps: spec.SchemaProps{ + Description: "IPAddresses is a list of IP addresses to be used on the Certificate", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "uris": { + SchemaProps: spec.SchemaProps{ + Description: "URIs is a list of URI subjectAltNames to be set on the Certificate.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "emailAddresses": { + SchemaProps: spec.SchemaProps{ + Description: "EmailAddresses is a list of email subjectAltNames to be set on the Certificate.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "privateKey": { + SchemaProps: spec.SchemaProps{ + Description: "Options to control private keys used for the Certificate.", + Ref: ref("kmodules.xyz/client-go/api/v1.CertificatePrivateKey"), + }, + }, }, - Required: []string{"key"}, + Required: []string{"alias"}, }, }, + Dependencies: []string{ + "k8s.io/api/core/v1.TypedLocalObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "kmodules.xyz/client-go/api/v1.CertificatePrivateKey", "kmodules.xyz/client-go/api/v1.X509Subject"}, } } -func schema_custom_resources_apis_appcatalog_v1alpha1_AddKeysFromTransform(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_kmodulesxyz_client_go_api_v1_ClusterMetadata(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "AddKeysFromTransform specifies that Service Catalog should merge an existing secret into the Secret associated with the ServiceBinding. For example, given the following AddKeysFromTransform:\n\n\t{\"secretRef\": {\"namespace\": \"foo\", \"name\": \"bar\"}}\n\nthe entries of the Secret \"bar\" from Namespace \"foo\" will be merged into the credentials Secret.", - Type: []string{"object"}, + Type: []string{"object"}, Properties: map[string]spec.Schema{ - "secretRef": { + "uid": { SchemaProps: spec.SchemaProps{ - Description: "The reference to the Secret that should be merged into the credentials Secret.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "displayName": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "provider": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", }, }, }, + Required: []string{"uid"}, }, }, - Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference"}, } } -func schema_custom_resources_apis_appcatalog_v1alpha1_AppBinding(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_kmodulesxyz_client_go_api_v1_Condition(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Type: []string{"object"}, Properties: map[string]spec.Schema{ - "kind": { + "type": { SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Description: "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important.", + Default: "", Type: []string{"string"}, Format: "", }, }, - "apiVersion": { + "status": { SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Description: "Status of the condition, one of True, False, Unknown.", + Default: "", Type: []string{"string"}, Format: "", }, }, - "metadata": { + "observedGeneration": { SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + Description: "If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.", + Type: []string{"integer"}, + Format: "int64", }, }, - "spec": { + "lastTransitionTime": { SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AppBindingSpec"), + Description: "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human readable message indicating details about the transition. This field may be empty.", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, + Required: []string{"type", "status", "lastTransitionTime", "reason", "message"}, }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AppBindingSpec"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, } } -func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingList(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_kmodulesxyz_client_go_api_v1_HealthCheckSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "AppBindingList is a list of Apps", + Description: "HealthCheckSpec defines attributes of the health check", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "kind": { + "periodSeconds": { SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", + Description: "How often (in seconds) to perform the health check. Default to 10 seconds. Minimum value is 1.", + Type: []string{"integer"}, + Format: "int32", }, }, - "apiVersion": { + "timeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, + Description: "Number of seconds after which the probe times out. Defaults to 10 second. Minimum value is 1. It should be less than the periodSeconds.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "failureThreshold": { + SchemaProps: spec.SchemaProps{ + Description: "Minimum consecutive failures for the health check to be considered failed after having succeeded. Defaults to 1. Minimum value is 1.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "disableWriteCheck": { + SchemaProps: spec.SchemaProps{ + Description: "Whether to disable write check on database. Defaults to false.", + Type: []string{"boolean"}, Format: "", }, }, - "metadata": { + }, + }, + }, + } +} + +func schema_kmodulesxyz_client_go_api_v1_ObjectID(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "group": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "namespace": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_kmodulesxyz_client_go_api_v1_ObjectInfo(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "resource": { SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + Ref: ref("kmodules.xyz/client-go/api/v1.ResourceID"), }, }, - "items": { + "ref": { SchemaProps: spec.SchemaProps{ - Description: "Items is a list of AppBinding CRD objects", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AppBinding"), - }, - }, - }, + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/client-go/api/v1.ObjectReference"), }, }, }, + Required: []string{"resource", "ref"}, }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AppBinding"}, + "kmodules.xyz/client-go/api/v1.ObjectReference", "kmodules.xyz/client-go/api/v1.ResourceID"}, } } -func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_kmodulesxyz_client_go_api_v1_ObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "AppBindingSpec is the spec for app", + Description: "ObjectReference contains enough information to let you inspect or modify the referred object.", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "type": { + "namespace": { SchemaProps: spec.SchemaProps{ - Description: "Type used to facilitate programmatic handling of application.", + Description: "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + } +} + +func schema_kmodulesxyz_client_go_api_v1_ReadonlyHealthCheckSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ReadonlyHealthCheckSpec defines attributes of the health check using only read-only checks", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "periodSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "How often (in seconds) to perform the health check. Default to 10 seconds. Minimum value is 1.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "timeoutSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "Number of seconds after which the probe times out. Defaults to 10 second. Minimum value is 1. It should be less than the periodSeconds.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "failureThreshold": { + SchemaProps: spec.SchemaProps{ + Description: "Minimum consecutive failures for the health check to be considered failed after having succeeded. Defaults to 1. Minimum value is 1.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + } +} + +func schema_kmodulesxyz_client_go_api_v1_ResourceID(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResourceID identifies a resource", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "group": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "version": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration too: plural.group and it must be all lowercase.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is the serialized kind of the resource. It is normally CamelCase and singular.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "scope": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"group", "version", "name", "kind", "scope"}, + }, + }, + } +} + +func schema_kmodulesxyz_client_go_api_v1_TLSConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "issuerRef": { + SchemaProps: spec.SchemaProps{ + Description: "IssuerRef is a reference to a Certificate Issuer.", + Ref: ref("k8s.io/api/core/v1.TypedLocalObjectReference"), + }, + }, + "certificates": { + SchemaProps: spec.SchemaProps{ + Description: "Certificate provides server and/or client certificate options used by application pods. These options are passed to a cert-manager Certificate object. xref: https://github.com/jetstack/cert-manager/blob/v0.16.0/pkg/apis/certmanager/v1beta1/types_certificate.go#L82-L162", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/client-go/api/v1.CertificateSpec"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.TypedLocalObjectReference", "kmodules.xyz/client-go/api/v1.CertificateSpec"}, + } +} + +func schema_kmodulesxyz_client_go_api_v1_TimeOfDay(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TimeOfDay is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.", + Type: apiv1.TimeOfDay{}.OpenAPISchemaType(), + Format: apiv1.TimeOfDay{}.OpenAPISchemaFormat(), + }, + }, + } +} + +func schema_kmodulesxyz_client_go_api_v1_TypedObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TypedObjectReference represents an typed namespaced object.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiGroup": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "namespace": { + SchemaProps: spec.SchemaProps{ + Description: "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Default: "", Type: []string{"string"}, Format: "", }, }, + }, + Required: []string{"name"}, + }, + }, + } +} + +func schema_kmodulesxyz_client_go_api_v1_X509Subject(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "X509Subject Full X509 name specification", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "organizations": { + SchemaProps: spec.SchemaProps{ + Description: "Organizations to be used on the Certificate.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "countries": { + SchemaProps: spec.SchemaProps{ + Description: "Countries to be used on the CertificateSpec.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "organizationalUnits": { + SchemaProps: spec.SchemaProps{ + Description: "Organizational Units to be used on the CertificateSpec.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "localities": { + SchemaProps: spec.SchemaProps{ + Description: "Cities to be used on the CertificateSpec.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "provinces": { + SchemaProps: spec.SchemaProps{ + Description: "State/Provinces to be used on the CertificateSpec.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "streetAddresses": { + SchemaProps: spec.SchemaProps{ + Description: "Street addresses to be used on the CertificateSpec.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "postalCodes": { + SchemaProps: spec.SchemaProps{ + Description: "Postal codes to be used on the CertificateSpec.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "serialNumber": { + SchemaProps: spec.SchemaProps{ + Description: "Serial number to be used on the CertificateSpec.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_kmodulesxyz_client_go_api_v1_stringSetMerger(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + }, + }, + } +} + +func schema_custom_resources_apis_appcatalog_v1alpha1_AddKeyTransform(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AddKeyTransform specifies that Service Catalog should add an additional entry to the Secret associated with the ServiceBinding. For example, given the following AddKeyTransform:\n\n\t{\"key\": \"CONNECTION_POOL_SIZE\", \"stringValue\": \"10\"}\n\nthe following entry will appear in the Secret:\n\n\t\"CONNECTION_POOL_SIZE\": \"10\"\n\nNote that this transform should only be used to add non-sensitive (non-secret) values. To add sensitive information, the AddKeysFromTransform should be used instead.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "key": { + SchemaProps: spec.SchemaProps{ + Description: "The name of the key to add", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "The binary value (possibly non-string) to add to the Secret under the specified key. If both value and stringValue are specified, then value is ignored and stringValue is stored.", + Type: []string{"string"}, + Format: "byte", + }, + }, + "stringValue": { + SchemaProps: spec.SchemaProps{ + Description: "The string (non-binary) value to add to the Secret under the specified key.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"key"}, + }, + }, + } +} + +func schema_custom_resources_apis_appcatalog_v1alpha1_AddKeysFromTransform(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AddKeysFromTransform specifies that Service Catalog should merge an existing secret into the Secret associated with the ServiceBinding. For example, given the following AddKeysFromTransform:\n\n\t{\"secretRef\": {\"namespace\": \"foo\", \"name\": \"bar\"}}\n\nthe entries of the Secret \"bar\" from Namespace \"foo\" will be merged into the credentials Secret.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "The reference to the Secret that should be merged into the credentials Secret.", + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference"}, + } +} + +func schema_custom_resources_apis_appcatalog_v1alpha1_AppBinding(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AppBindingSpec"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AppBindingSpec"}, + } +} + +func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AppBindingList is a list of Apps", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "Items is a list of AppBinding CRD objects", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AppBinding"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.AppBinding"}, + } +} + +func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AppBindingSpec is the spec for app", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type used to facilitate programmatic handling of application.", + Type: []string{"string"}, + Format: "", + }, + }, + "appRef": { + SchemaProps: spec.SchemaProps{ + Description: "Reference to underlying application", + Ref: ref("kmodules.xyz/client-go/api/v1.TypedObjectReference"), + }, + }, "version": { SchemaProps: spec.SchemaProps{ Description: "Version used to facilitate programmatic handling of application.", @@ -16359,7 +17061,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, } } diff --git a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/stash_types.go b/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/stash_types.go index 0d96ff82..2ea89fd2 100644 --- a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/stash_types.go +++ b/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/stash_types.go @@ -25,32 +25,32 @@ import ( type StashAddon struct { metav1.TypeMeta `json:",inline,omitempty"` - Stash StashAddonSpec `json:"stash,omitempty" protobuf:"bytes,1,opt,name=stash"` + Stash StashAddonSpec `json:"stash,omitempty"` } // StashAddonSpec is the spec for app type StashAddonSpec struct { - Addon StashTaskSpec `json:"addon,omitempty" protobuf:"bytes,1,opt,name=addon"` + Addon StashTaskSpec `json:"addon,omitempty"` } // StashTaskSpec is the spec for app type StashTaskSpec struct { // Backup task definition - BackupTask TaskRef `json:"backupTask" protobuf:"bytes,1,opt,name=backupTask"` + BackupTask TaskRef `json:"backupTask"` // Restore task definition - RestoreTask TaskRef `json:"restoreTask" protobuf:"bytes,2,opt,name=restoreTask"` + RestoreTask TaskRef `json:"restoreTask"` } type TaskRef struct { - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + Name string `json:"name"` // Params specifies a list of parameter to pass to the Task. Stash will use this parameters to resolve the task. // +optional - Params []Param `json:"params,omitempty" protobuf:"bytes,2,rep,name=params"` + Params []Param `json:"params,omitempty"` } // Param declares a value to use for the Param called Name. type Param struct { - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - Value string `json:"value" protobuf:"bytes,2,opt,name=value"` + Name string `json:"name"` + Value string `json:"value"` } diff --git a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/zz_generated.deepcopy.go b/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/zz_generated.deepcopy.go index d01e3164..22b722ec 100644 --- a/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1/zz_generated.deepcopy.go @@ -22,6 +22,8 @@ limitations under the License. package v1alpha1 import ( + apiv1 "kmodules.xyz/client-go/api/v1" + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -136,6 +138,11 @@ func (in *AppBindingList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AppBindingSpec) DeepCopyInto(out *AppBindingSpec) { *out = *in + if in.AppRef != nil { + in, out := &in.AppRef, &out.AppRef + *out = new(apiv1.TypedObjectReference) + **out = **in + } in.ClientConfig.DeepCopyInto(&out.ClientConfig) if in.Secret != nil { in, out := &in.Secret, &out.Secret diff --git a/vendor/kmodules.xyz/custom-resources/crds/appcatalog.appscode.com_appbindings.yaml b/vendor/kmodules.xyz/custom-resources/crds/appcatalog.appscode.com_appbindings.yaml index 9b8b534d..898b9263 100644 --- a/vendor/kmodules.xyz/custom-resources/crds/appcatalog.appscode.com_appbindings.yaml +++ b/vendor/kmodules.xyz/custom-resources/crds/appcatalog.appscode.com_appbindings.yaml @@ -47,6 +47,22 @@ spec: spec: description: AppBindingSpec is the spec for app properties: + appRef: + description: Reference to underlying application + properties: + apiGroup: + type: string + kind: + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + required: + - name + type: object clientConfig: description: ClientConfig defines how to communicate with the app. Required diff --git a/vendor/kmodules.xyz/offshoot-api/api/v1/generated.pb.go b/vendor/kmodules.xyz/offshoot-api/api/v1/generated.pb.go index f8c0a224..ec863500 100644 --- a/vendor/kmodules.xyz/offshoot-api/api/v1/generated.pb.go +++ b/vendor/kmodules.xyz/offshoot-api/api/v1/generated.pb.go @@ -73,10 +73,38 @@ func (m *ContainerRuntimeSettings) XXX_DiscardUnknown() { var xxx_messageInfo_ContainerRuntimeSettings proto.InternalMessageInfo +func (m *EphemeralVolumeSource) Reset() { *m = EphemeralVolumeSource{} } +func (*EphemeralVolumeSource) ProtoMessage() {} +func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_3a450b99b211cb39, []int{1} +} +func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EphemeralVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EphemeralVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EphemeralVolumeSource.Merge(m, src) +} +func (m *EphemeralVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *EphemeralVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_EphemeralVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo + func (m *IONiceSettings) Reset() { *m = IONiceSettings{} } func (*IONiceSettings) ProtoMessage() {} func (*IONiceSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_3a450b99b211cb39, []int{1} + return fileDescriptor_3a450b99b211cb39, []int{2} } func (m *IONiceSettings) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -104,7 +132,7 @@ var xxx_messageInfo_IONiceSettings proto.InternalMessageInfo func (m *NiceSettings) Reset() { *m = NiceSettings{} } func (*NiceSettings) ProtoMessage() {} func (*NiceSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_3a450b99b211cb39, []int{2} + return fileDescriptor_3a450b99b211cb39, []int{3} } func (m *NiceSettings) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,7 +160,7 @@ var xxx_messageInfo_NiceSettings proto.InternalMessageInfo func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (*ObjectMeta) ProtoMessage() {} func (*ObjectMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_3a450b99b211cb39, []int{3} + return fileDescriptor_3a450b99b211cb39, []int{4} } func (m *ObjectMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -160,7 +188,7 @@ var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo func (m *PartialObjectMeta) Reset() { *m = PartialObjectMeta{} } func (*PartialObjectMeta) ProtoMessage() {} func (*PartialObjectMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_3a450b99b211cb39, []int{4} + return fileDescriptor_3a450b99b211cb39, []int{5} } func (m *PartialObjectMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -188,7 +216,7 @@ var xxx_messageInfo_PartialObjectMeta proto.InternalMessageInfo func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_3a450b99b211cb39, []int{5} + return fileDescriptor_3a450b99b211cb39, []int{6} } func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -213,10 +241,38 @@ func (m *PersistentVolumeClaim) XXX_DiscardUnknown() { var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo +func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} } +func (*PersistentVolumeClaimTemplate) ProtoMessage() {} +func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_3a450b99b211cb39, []int{7} +} +func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PersistentVolumeClaimTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimTemplate.Merge(m, src) +} +func (m *PersistentVolumeClaimTemplate) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo + func (m *PodRuntimeSettings) Reset() { *m = PodRuntimeSettings{} } func (*PodRuntimeSettings) ProtoMessage() {} func (*PodRuntimeSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_3a450b99b211cb39, []int{6} + return fileDescriptor_3a450b99b211cb39, []int{8} } func (m *PodRuntimeSettings) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -244,7 +300,7 @@ var xxx_messageInfo_PodRuntimeSettings proto.InternalMessageInfo func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3a450b99b211cb39, []int{7} + return fileDescriptor_3a450b99b211cb39, []int{9} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -272,7 +328,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3a450b99b211cb39, []int{8} + return fileDescriptor_3a450b99b211cb39, []int{10} } func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -300,7 +356,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo func (m *RuntimeSettings) Reset() { *m = RuntimeSettings{} } func (*RuntimeSettings) ProtoMessage() {} func (*RuntimeSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_3a450b99b211cb39, []int{9} + return fileDescriptor_3a450b99b211cb39, []int{11} } func (m *RuntimeSettings) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -328,7 +384,7 @@ var xxx_messageInfo_RuntimeSettings proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_3a450b99b211cb39, []int{10} + return fileDescriptor_3a450b99b211cb39, []int{12} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -356,7 +412,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3a450b99b211cb39, []int{11} + return fileDescriptor_3a450b99b211cb39, []int{13} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -384,7 +440,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceTemplateSpec) Reset() { *m = ServiceTemplateSpec{} } func (*ServiceTemplateSpec) ProtoMessage() {} func (*ServiceTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3a450b99b211cb39, []int{12} + return fileDescriptor_3a450b99b211cb39, []int{14} } func (m *ServiceTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -409,8 +465,65 @@ func (m *ServiceTemplateSpec) XXX_DiscardUnknown() { var xxx_messageInfo_ServiceTemplateSpec proto.InternalMessageInfo +func (m *Volume) Reset() { *m = Volume{} } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { + return fileDescriptor_3a450b99b211cb39, []int{15} +} +func (m *Volume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Volume) XXX_Merge(src proto.Message) { + xxx_messageInfo_Volume.Merge(m, src) +} +func (m *Volume) XXX_Size() int { + return m.Size() +} +func (m *Volume) XXX_DiscardUnknown() { + xxx_messageInfo_Volume.DiscardUnknown(m) +} + +var xxx_messageInfo_Volume proto.InternalMessageInfo + +func (m *VolumeSource) Reset() { *m = VolumeSource{} } +func (*VolumeSource) ProtoMessage() {} +func (*VolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_3a450b99b211cb39, []int{16} +} +func (m *VolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeSource.Merge(m, src) +} +func (m *VolumeSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeSource proto.InternalMessageInfo + func init() { proto.RegisterType((*ContainerRuntimeSettings)(nil), "kmodules.xyz.offshoot_api.api.v1.ContainerRuntimeSettings") + proto.RegisterType((*EphemeralVolumeSource)(nil), "kmodules.xyz.offshoot_api.api.v1.EphemeralVolumeSource") proto.RegisterType((*IONiceSettings)(nil), "kmodules.xyz.offshoot_api.api.v1.IONiceSettings") proto.RegisterType((*NiceSettings)(nil), "kmodules.xyz.offshoot_api.api.v1.NiceSettings") proto.RegisterType((*ObjectMeta)(nil), "kmodules.xyz.offshoot_api.api.v1.ObjectMeta") @@ -420,6 +533,7 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "kmodules.xyz.offshoot_api.api.v1.PartialObjectMeta.AnnotationsEntry") proto.RegisterMapType((map[string]string)(nil), "kmodules.xyz.offshoot_api.api.v1.PartialObjectMeta.LabelsEntry") proto.RegisterType((*PersistentVolumeClaim)(nil), "kmodules.xyz.offshoot_api.api.v1.PersistentVolumeClaim") + proto.RegisterType((*PersistentVolumeClaimTemplate)(nil), "kmodules.xyz.offshoot_api.api.v1.PersistentVolumeClaimTemplate") proto.RegisterType((*PodRuntimeSettings)(nil), "kmodules.xyz.offshoot_api.api.v1.PodRuntimeSettings") proto.RegisterMapType((map[string]string)(nil), "kmodules.xyz.offshoot_api.api.v1.PodRuntimeSettings.NodeSelectorEntry") proto.RegisterMapType((map[string]string)(nil), "kmodules.xyz.offshoot_api.api.v1.PodRuntimeSettings.PodAnnotationsEntry") @@ -431,6 +545,8 @@ func init() { proto.RegisterType((*ServicePort)(nil), "kmodules.xyz.offshoot_api.api.v1.ServicePort") proto.RegisterType((*ServiceSpec)(nil), "kmodules.xyz.offshoot_api.api.v1.ServiceSpec") proto.RegisterType((*ServiceTemplateSpec)(nil), "kmodules.xyz.offshoot_api.api.v1.ServiceTemplateSpec") + proto.RegisterType((*Volume)(nil), "kmodules.xyz.offshoot_api.api.v1.Volume") + proto.RegisterType((*VolumeSource)(nil), "kmodules.xyz.offshoot_api.api.v1.VolumeSource") } func init() { @@ -438,150 +554,201 @@ func init() { } var fileDescriptor_3a450b99b211cb39 = []byte{ - // 2273 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x5f, 0x6f, 0x1b, 0xc7, - 0x11, 0x37, 0x25, 0x52, 0x22, 0x87, 0x12, 0x25, 0xad, 0x2c, 0xe7, 0xa4, 0xc8, 0xa4, 0xcc, 0x16, - 0x85, 0x82, 0xda, 0x54, 0xec, 0xb8, 0x80, 0x93, 0xa0, 0x4e, 0x75, 0xb4, 0x64, 0x2b, 0x95, 0x25, - 0x62, 0xa9, 0xa4, 0x4d, 0xd1, 0x3f, 0x39, 0x1d, 0x57, 0xd4, 0x59, 0xc7, 0x5b, 0xf6, 0x76, 0x29, - 0x9b, 0x29, 0x8a, 0xe6, 0xb1, 0x28, 0x50, 0xa0, 0x9f, 0xa0, 0xdf, 0xa0, 0x0f, 0x05, 0xfa, 0x11, - 0xfa, 0x60, 0xa0, 0x2f, 0x79, 0x29, 0x90, 0x27, 0xb6, 0x66, 0xbf, 0x85, 0x8b, 0x02, 0xc5, 0xee, - 0xed, 0xfd, 0x23, 0x8f, 0xfa, 0x43, 0xdb, 0x6d, 0x1e, 0x04, 0xe8, 0xe6, 0xcf, 0x6f, 0x66, 0x67, - 0xe7, 0x66, 0xe6, 0x86, 0xf0, 0xee, 0x49, 0x8b, 0x36, 0x3a, 0x36, 0x61, 0x95, 0x67, 0xdd, 0x2f, - 0x36, 0xe8, 0xd1, 0x11, 0x3b, 0xa6, 0x94, 0xdf, 0x32, 0xda, 0xd6, 0x86, 0xf8, 0x3b, 0xbd, 0xbd, - 0xd1, 0x24, 0x0e, 0x71, 0x0d, 0x4e, 0x1a, 0x95, 0xb6, 0x4b, 0x39, 0x45, 0x6b, 0x51, 0x8d, 0x8a, - 0xaf, 0xf1, 0x0b, 0xa3, 0x6d, 0x55, 0xc4, 0xdf, 0xe9, 0xed, 0x95, 0x5b, 0x4d, 0x8b, 0x1f, 0x77, - 0x0e, 0x2b, 0x26, 0x6d, 0x6d, 0x34, 0x69, 0x93, 0x6e, 0x48, 0xc5, 0xc3, 0xce, 0x91, 0x7c, 0x92, - 0x0f, 0xf2, 0x3f, 0x0f, 0x70, 0xa5, 0x7c, 0x72, 0x8f, 0x55, 0x2c, 0x2a, 0xed, 0x99, 0xd4, 0x25, - 0x09, 0x46, 0x57, 0xee, 0x86, 0x32, 0x2d, 0xc3, 0x3c, 0xb6, 0x1c, 0xe2, 0x76, 0x37, 0xda, 0x27, - 0x4d, 0x41, 0x60, 0x1b, 0x2d, 0xc2, 0x8d, 0x04, 0xad, 0xf2, 0x7f, 0x32, 0xa0, 0x55, 0xa9, 0xc3, - 0x0d, 0x21, 0x8f, 0x3b, 0x0e, 0xb7, 0x5a, 0xa4, 0x4e, 0x38, 0xb7, 0x9c, 0x26, 0x43, 0x9f, 0x41, - 0xce, 0x25, 0x8c, 0x76, 0x5c, 0x93, 0x30, 0x2d, 0xb5, 0x96, 0x5a, 0xcf, 0xdf, 0x59, 0xaf, 0x78, - 0x66, 0xe4, 0x41, 0x84, 0x2b, 0x95, 0xd3, 0xdb, 0x15, 0xac, 0x84, 0x30, 0xf9, 0x65, 0xc7, 0x72, - 0x49, 0x8b, 0x38, 0x9c, 0xe9, 0x0b, 0xcf, 0x7b, 0xa5, 0x2b, 0xfd, 0x5e, 0x29, 0xe7, 0x73, 0x19, - 0x0e, 0xd1, 0x10, 0x86, 0x59, 0xdb, 0x3a, 0x25, 0x0e, 0x61, 0xac, 0xe6, 0xd2, 0x43, 0xa2, 0x4d, - 0x48, 0xf8, 0xe5, 0x24, 0x78, 0x29, 0xa0, 0x2f, 0xf4, 0x7b, 0xa5, 0xd9, 0xdd, 0xa8, 0x0e, 0x8e, - 0x43, 0xa0, 0x4f, 0xa0, 0xe0, 0x12, 0xa3, 0x61, 0x85, 0xa0, 0x93, 0xe7, 0x81, 0xa2, 0x7e, 0xaf, - 0x54, 0xc0, 0x31, 0x25, 0x3c, 0x00, 0x82, 0x3e, 0x86, 0x9c, 0x6d, 0x1d, 0x11, 0xb3, 0x6b, 0xda, - 0x44, 0x4b, 0x4b, 0xc4, 0xeb, 0x49, 0x88, 0xbb, 0xbe, 0x90, 0x3e, 0x2b, 0x8e, 0x1d, 0x3c, 0xe2, - 0x50, 0x1d, 0x1d, 0xc2, 0x1c, 0x23, 0x66, 0xc7, 0xb5, 0x78, 0x57, 0x44, 0x9d, 0x3c, 0xe3, 0x5a, - 0x46, 0x22, 0x7e, 0x2b, 0x09, 0xb1, 0x1e, 0x17, 0xd5, 0x17, 0xfb, 0xbd, 0xd2, 0xdc, 0x00, 0x11, - 0x0f, 0x02, 0xa2, 0x5d, 0x48, 0x3b, 0x96, 0x49, 0xb4, 0x29, 0x09, 0x5c, 0xa9, 0x9c, 0x97, 0x8c, - 0x95, 0x3d, 0xcb, 0x0c, 0xee, 0x5c, 0xcf, 0xf6, 0x7b, 0xa5, 0xb4, 0xa0, 0x60, 0x89, 0x82, 0x0e, - 0x60, 0xca, 0xa2, 0x12, 0x6f, 0x5a, 0xe2, 0xbd, 0x7b, 0x3e, 0xde, 0xce, 0x7e, 0x0c, 0x11, 0xfa, - 0xbd, 0xd2, 0x94, 0x47, 0xc3, 0x0a, 0x0b, 0xed, 0xc2, 0x34, 0x71, 0x4e, 0xb7, 0x5d, 0xda, 0xd2, - 0xb2, 0x6b, 0x93, 0xeb, 0xf9, 0x3b, 0x37, 0x92, 0xce, 0xbf, 0xe5, 0x89, 0xd4, 0x65, 0xce, 0xe8, - 0x73, 0x2a, 0xa1, 0xa6, 0x15, 0x19, 0xfb, 0x10, 0xe8, 0x7d, 0x98, 0x24, 0xce, 0xa9, 0x96, 0x93, - 0x48, 0x2b, 0x23, 0x90, 0x3e, 0x35, 0x5c, 0x3d, 0xaf, 0x20, 0x26, 0xb7, 0x9c, 0x53, 0x2c, 0x74, - 0xca, 0x3f, 0x87, 0x42, 0xdc, 0x5d, 0x54, 0x82, 0x8c, 0x69, 0x1b, 0xcc, 0x4b, 0xf8, 0x8c, 0x9e, - 0xeb, 0xf7, 0x4a, 0x99, 0xaa, 0x20, 0x60, 0x8f, 0x8e, 0xbe, 0x0b, 0x39, 0xf9, 0xcf, 0x03, 0x83, - 0x1b, 0x32, 0x6d, 0x33, 0xde, 0x85, 0x57, 0x7d, 0x22, 0x0e, 0xf9, 0xe5, 0xfb, 0x30, 0x13, 0x43, - 0xaf, 0x00, 0x18, 0x8d, 0x27, 0x1d, 0xc6, 0xc5, 0x4b, 0xa2, 0x4c, 0x14, 0xfa, 0xbd, 0x12, 0x6c, - 0x06, 0x54, 0x1c, 0x91, 0x28, 0xff, 0x63, 0x02, 0x60, 0xff, 0xf0, 0x09, 0x31, 0xf9, 0x63, 0xc2, - 0x0d, 0xf4, 0x39, 0x4c, 0xd9, 0xc6, 0x21, 0xb1, 0x85, 0x77, 0xe2, 0xb0, 0xf7, 0xce, 0xbf, 0x8d, - 0x50, 0xbb, 0xb2, 0x2b, 0x55, 0xb7, 0x1c, 0xee, 0x76, 0xf5, 0x82, 0x0a, 0xc5, 0x94, 0x47, 0xc4, - 0x0a, 0x17, 0x71, 0xc8, 0x1b, 0x8e, 0x43, 0xb9, 0xc1, 0x2d, 0xea, 0x30, 0x6d, 0x42, 0x9a, 0xf9, - 0xfe, 0xa5, 0xcc, 0x6c, 0x86, 0xfa, 0x9e, 0xad, 0x45, 0x65, 0x2b, 0x1f, 0xe1, 0xe0, 0xa8, 0x99, - 0x95, 0xf7, 0x21, 0x1f, 0x71, 0x0e, 0xcd, 0xc3, 0xe4, 0x09, 0xe9, 0xca, 0xf0, 0xe4, 0xb0, 0xf8, - 0x17, 0x5d, 0x85, 0xcc, 0xa9, 0x61, 0x77, 0xbc, 0x3a, 0x91, 0xc3, 0xde, 0xc3, 0x07, 0x13, 0xf7, - 0x52, 0x2b, 0xf7, 0x61, 0x7e, 0xd0, 0xe0, 0x65, 0xf4, 0xcb, 0x2f, 0xd3, 0xb0, 0x50, 0x33, 0x5c, - 0x6e, 0x19, 0x76, 0x24, 0xd0, 0x6b, 0x90, 0x76, 0x8c, 0x16, 0xf1, 0x20, 0xf4, 0x19, 0x75, 0x80, - 0xf4, 0x9e, 0xd1, 0x12, 0x2f, 0x86, 0xd1, 0x22, 0xe8, 0x1e, 0xcc, 0xf8, 0xc5, 0x54, 0x50, 0x3d, - 0x60, 0xfd, 0xaa, 0x92, 0x9c, 0x79, 0x18, 0xe1, 0xe1, 0x98, 0x24, 0xda, 0x80, 0x9c, 0x40, 0x60, - 0x6d, 0xc3, 0xf4, 0x4a, 0x54, 0x2e, 0x2c, 0x96, 0x7b, 0x3e, 0x03, 0x87, 0x32, 0xa8, 0x19, 0xdc, - 0x7a, 0x5a, 0x5e, 0xc7, 0x47, 0xe7, 0x5f, 0xc7, 0xd0, 0x89, 0x2e, 0x74, 0xf9, 0xbf, 0x8a, 0x5f, - 0x7e, 0x46, 0x5a, 0x7b, 0x30, 0x8e, 0xb5, 0xcb, 0xe7, 0x00, 0x7a, 0x0a, 0x73, 0xf4, 0xa9, 0xe8, - 0x42, 0xe4, 0x88, 0xb8, 0xc4, 0x11, 0x3d, 0x67, 0x4a, 0x3a, 0x70, 0x37, 0xf2, 0x46, 0x07, 0xad, - 0xad, 0xd2, 0x3e, 0x69, 0x0a, 0x02, 0xab, 0x88, 0xd6, 0x26, 0x33, 0x30, 0xa6, 0xac, 0xbf, 0xa5, - 0x0c, 0xce, 0xc5, 0xe9, 0x0c, 0x0f, 0x5a, 0xf9, 0x7f, 0x26, 0xdf, 0x9f, 0x26, 0x60, 0xa9, 0x46, - 0x5c, 0x66, 0x31, 0x4e, 0x1c, 0xfe, 0x29, 0xb5, 0x3b, 0x2d, 0x52, 0xb5, 0x0d, 0xab, 0x85, 0x9e, - 0x40, 0x56, 0x1c, 0xac, 0x21, 0x8a, 0x8c, 0xd7, 0x7a, 0xdf, 0x1b, 0xe3, 0x1e, 0xf4, 0x65, 0x15, - 0x85, 0xe1, 0x14, 0xc7, 0x01, 0x3e, 0xda, 0x87, 0x34, 0x6b, 0x13, 0x53, 0xf5, 0xe0, 0x5b, 0x89, - 0xed, 0x32, 0xc9, 0xc9, 0x7a, 0x9b, 0x98, 0xe1, 0xbb, 0x21, 0x9e, 0xb0, 0x04, 0x42, 0x3f, 0x82, - 0x29, 0xc6, 0x0d, 0xde, 0x61, 0xaa, 0x03, 0x6f, 0x5c, 0x1c, 0x52, 0xaa, 0x85, 0x09, 0xea, 0x3d, - 0x63, 0x05, 0x57, 0xfe, 0x73, 0x01, 0x50, 0x8d, 0x36, 0x06, 0x07, 0x95, 0xdf, 0xa6, 0xa0, 0xd0, - 0xa6, 0x8d, 0xc8, 0x55, 0xa8, 0xfa, 0xf8, 0xe8, 0x02, 0x31, 0x1b, 0x82, 0x13, 0xa4, 0xa1, 0xfc, - 0xbd, 0xa6, 0x3c, 0x2a, 0xc4, 0x99, 0x78, 0xc0, 0x2e, 0xfa, 0x32, 0x05, 0x33, 0x0e, 0x6d, 0x90, - 0x3a, 0xb1, 0x89, 0xc9, 0xa9, 0xab, 0x2a, 0xe8, 0xf6, 0x58, 0x8e, 0xec, 0x45, 0x80, 0x3c, 0x37, - 0x82, 0xfa, 0x12, 0x65, 0xe1, 0x98, 0x45, 0xf4, 0x31, 0x20, 0x46, 0xdc, 0x53, 0xcb, 0x24, 0x9b, - 0xa6, 0x49, 0x3b, 0x0e, 0x97, 0xf5, 0xc9, 0x2b, 0x34, 0x2b, 0x4a, 0x1f, 0xd5, 0x87, 0x24, 0x70, - 0x82, 0x16, 0xfa, 0x4b, 0x0a, 0x96, 0xe3, 0xe4, 0x68, 0x90, 0xbd, 0x72, 0x54, 0x1f, 0xeb, 0x6c, - 0xf5, 0x51, 0xa8, 0xde, 0x41, 0x6f, 0x28, 0x47, 0x97, 0x47, 0xca, 0xe1, 0xd1, 0x8e, 0xa1, 0x06, - 0xac, 0x1a, 0x1d, 0x4e, 0x5b, 0x82, 0x1e, 0x07, 0x38, 0xa0, 0x27, 0xc4, 0x91, 0x43, 0x57, 0x56, - 0x5f, 0xeb, 0xf7, 0x4a, 0xab, 0x9b, 0x67, 0xc8, 0xe1, 0x33, 0x51, 0xd0, 0x4d, 0xc8, 0x8a, 0xc0, - 0xcb, 0xf0, 0x4e, 0xc9, 0xf0, 0xce, 0x2b, 0xaf, 0xb3, 0x7b, 0x8a, 0x8e, 0x03, 0x09, 0x44, 0x86, - 0x67, 0x3f, 0x6f, 0xa4, 0xfa, 0x4e, 0xe2, 0xdb, 0x41, 0x1b, 0xe3, 0x8d, 0x7f, 0x0e, 0xcc, 0x5b, - 0x2d, 0xa3, 0x49, 0x6a, 0x1d, 0xdb, 0xae, 0x13, 0xd3, 0x25, 0x9c, 0xa9, 0x19, 0x2b, 0x71, 0x76, - 0xdf, 0xa5, 0xa6, 0x5f, 0x15, 0xc2, 0xda, 0xa9, 0xa9, 0x63, 0xcc, 0xef, 0x0c, 0x20, 0xe1, 0x21, - 0x6c, 0xb4, 0x0d, 0x59, 0xe3, 0xe8, 0xc8, 0x72, 0x2c, 0xde, 0xd5, 0x72, 0xf2, 0x3c, 0xab, 0x49, - 0x76, 0x36, 0x95, 0x8c, 0x3e, 0x23, 0xc2, 0xe3, 0x3f, 0xe1, 0x40, 0x17, 0x7d, 0x08, 0xb3, 0xcc, - 0x3c, 0x26, 0x22, 0x8f, 0x5c, 0x19, 0x51, 0x90, 0x11, 0x5d, 0x52, 0xae, 0xcc, 0xd6, 0xa3, 0x4c, - 0x1c, 0x97, 0x45, 0x9f, 0x40, 0x9e, 0x53, 0x5b, 0xb4, 0x58, 0x99, 0x97, 0x79, 0x79, 0xde, 0x62, - 0x92, 0x1f, 0x07, 0x81, 0x58, 0xd8, 0x92, 0x42, 0x1a, 0xc3, 0x51, 0x1c, 0xf4, 0x10, 0x16, 0xda, - 0xae, 0x45, 0x65, 0x78, 0xc5, 0x48, 0x27, 0xfd, 0x9a, 0x91, 0x7e, 0x85, 0x85, 0x75, 0x50, 0x00, - 0x0f, 0xeb, 0xa0, 0x75, 0xc8, 0xfa, 0x44, 0x6d, 0x56, 0x0e, 0x7d, 0x32, 0x0c, 0xbe, 0x2e, 0x0e, - 0xb8, 0xa8, 0x11, 0xf9, 0x88, 0x79, 0x68, 0x70, 0xc2, 0xb4, 0x82, 0x3c, 0xcc, 0xb7, 0x47, 0x24, - 0x09, 0x8e, 0x0a, 0x87, 0x55, 0x2a, 0x46, 0x66, 0x78, 0x00, 0x13, 0xfd, 0x00, 0xe6, 0x5d, 0xef, - 0x7d, 0x0c, 0xcf, 0x35, 0xe7, 0x0d, 0x30, 0xe2, 0xda, 0xf1, 0x00, 0x0f, 0x0f, 0x49, 0xa3, 0x6d, - 0x40, 0xc4, 0x31, 0x0e, 0x6d, 0xa2, 0x5e, 0x8c, 0x5d, 0xcb, 0x39, 0x61, 0xda, 0xbc, 0x7c, 0xaf, - 0xae, 0x89, 0x02, 0xb3, 0x35, 0xc4, 0xc5, 0x09, 0x1a, 0xe8, 0xf7, 0x29, 0x58, 0xe6, 0xb4, 0x4d, - 0x6d, 0xda, 0xec, 0xd6, 0xdb, 0xc2, 0xcd, 0x2a, 0x75, 0x18, 0x77, 0x0d, 0xcb, 0xe1, 0x4c, 0x5b, - 0x90, 0x67, 0xbf, 0x99, 0x7c, 0x91, 0xc9, 0x4a, 0x61, 0xe5, 0x18, 0x25, 0xc1, 0xf0, 0x68, 0x8b, - 0x2b, 0x9b, 0xb0, 0x98, 0x50, 0xfe, 0x2f, 0x35, 0x14, 0x7c, 0x04, 0x0b, 0x43, 0x85, 0xfb, 0x52, - 0x00, 0xbb, 0x50, 0x3c, 0xbb, 0x3a, 0x5e, 0x6a, 0xc6, 0xf8, 0xfb, 0x02, 0x4c, 0x8b, 0x6a, 0x22, - 0x1a, 0x73, 0x72, 0x6b, 0x48, 0x8d, 0xd5, 0x1a, 0x3a, 0x89, 0x8d, 0xee, 0xc3, 0x0b, 0x35, 0x03, - 0xe1, 0xcc, 0x98, 0xdd, 0x2d, 0x5a, 0x6f, 0x26, 0x5f, 0x67, 0xbd, 0x49, 0x8f, 0x5f, 0x6f, 0x32, - 0xaf, 0xa9, 0xde, 0x24, 0xd5, 0xee, 0xa9, 0x37, 0x58, 0xbb, 0x13, 0xeb, 0xdb, 0xf4, 0x2b, 0xd6, - 0xb7, 0xec, 0x99, 0xf5, 0xed, 0x7b, 0x90, 0x3f, 0xa6, 0x8c, 0xef, 0x11, 0xfe, 0x94, 0xba, 0x27, - 0xb2, 0x63, 0x64, 0xc3, 0xc8, 0x3c, 0x0a, 0x59, 0x38, 0x2a, 0x87, 0xde, 0x81, 0x69, 0xf1, 0x58, - 0xdb, 0x79, 0x20, 0xfb, 0x42, 0x36, 0xdc, 0x06, 0x3c, 0xf2, 0xc8, 0xd8, 0xe7, 0xfb, 0xa2, 0x3b, - 0xb5, 0xaa, 0x96, 0x1f, 0x16, 0xdd, 0xa9, 0x55, 0xb1, 0xcf, 0x47, 0xfb, 0xb0, 0xc4, 0x8e, 0x0d, - 0x97, 0xd4, 0x5c, 0x6a, 0x12, 0xef, 0x28, 0xde, 0x57, 0xd9, 0x8c, 0x54, 0x5c, 0xee, 0xf7, 0x4a, - 0x4b, 0xf5, 0x24, 0x01, 0x9c, 0xac, 0x97, 0xd4, 0xe3, 0x67, 0xdf, 0x40, 0x8f, 0x6f, 0xc2, 0x75, - 0x4e, 0xdc, 0x96, 0xe5, 0xc8, 0xbc, 0x79, 0xe8, 0x1a, 0x26, 0xa9, 0x11, 0xd7, 0x92, 0x70, 0xd4, - 0x69, 0x88, 0x9e, 0x91, 0x5a, 0x9f, 0xd4, 0x6f, 0xf4, 0x7b, 0xa5, 0xeb, 0x07, 0x67, 0x09, 0xe2, - 0xb3, 0x71, 0xd0, 0x3e, 0xe4, 0x1a, 0x0e, 0xab, 0x51, 0xdb, 0x32, 0xbb, 0xaa, 0x41, 0xdc, 0xf6, - 0x3f, 0x55, 0x1f, 0xec, 0xd5, 0x3d, 0xc6, 0xcb, 0x5e, 0x69, 0x75, 0x78, 0x41, 0x59, 0x09, 0xf8, - 0x38, 0xc4, 0x40, 0x8f, 0x25, 0x60, 0x95, 0x3a, 0x47, 0x56, 0x53, 0x76, 0x8b, 0xfc, 0x9d, 0xb5, - 0x11, 0xa1, 0x79, 0xb0, 0x57, 0xf7, 0xe4, 0xbc, 0xf5, 0x4a, 0xf0, 0x88, 0x43, 0x84, 0x6f, 0x5a, - 0xf7, 0x40, 0x5b, 0x30, 0x7d, 0x2a, 0x3f, 0x66, 0x98, 0x86, 0x46, 0x6f, 0xa3, 0xbc, 0xef, 0x9d, - 0x30, 0x2f, 0xbd, 0x67, 0x86, 0x7d, 0x5d, 0xf4, 0x33, 0x28, 0x88, 0x22, 0x15, 0x2c, 0x66, 0x99, - 0xb6, 0x28, 0xd1, 0x12, 0xf7, 0x8e, 0x81, 0x54, 0xd8, 0xfd, 0x77, 0x62, 0xca, 0x78, 0x00, 0x0c, - 0xad, 0x42, 0xda, 0x70, 0x9b, 0x4c, 0xbb, 0xba, 0x36, 0xb9, 0x9e, 0xf3, 0x36, 0x7e, 0x9b, 0x6e, - 0x93, 0x61, 0x49, 0xf5, 0xb7, 0x69, 0x4b, 0x97, 0xdf, 0xa6, 0xc5, 0x17, 0xc6, 0xd7, 0xde, 0xec, - 0xc2, 0xf8, 0xad, 0x37, 0xb1, 0x30, 0xd6, 0x5e, 0xfb, 0xc2, 0x78, 0xf9, 0xd5, 0x16, 0xc6, 0xbf, - 0x06, 0xcd, 0xf4, 0x2f, 0x6e, 0xa0, 0x2c, 0x68, 0x2b, 0x17, 0xdf, 0x1c, 0xaf, 0xf6, 0x7b, 0xa5, - 0x70, 0xcf, 0x3f, 0x58, 0x5f, 0x46, 0x9a, 0x40, 0x9f, 0xc1, 0x8c, 0x97, 0x93, 0x8f, 0x45, 0xdb, - 0x67, 0xda, 0xdb, 0x32, 0x29, 0x4a, 0xa3, 0x93, 0x5a, 0xca, 0x85, 0x7d, 0x3c, 0x42, 0x64, 0x38, - 0x06, 0xf5, 0xca, 0x53, 0x52, 0xf9, 0x8f, 0x13, 0x30, 0x57, 0xa3, 0x8d, 0x03, 0xd2, 0x6a, 0xdb, - 0x06, 0x27, 0x72, 0xbe, 0xf9, 0xe9, 0xd0, 0xd6, 0xe4, 0xe6, 0x65, 0x56, 0x97, 0x3a, 0x52, 0x8e, - 0x43, 0xe2, 0x9e, 0xe4, 0x73, 0x00, 0x11, 0x29, 0x97, 0xda, 0x36, 0x71, 0xd5, 0xb6, 0x64, 0x4c, - 0xfc, 0x6a, 0x80, 0x83, 0x23, 0x98, 0xe8, 0x87, 0x6a, 0x13, 0xe3, 0x0d, 0x36, 0xef, 0x5c, 0x78, - 0x96, 0x4a, 0xda, 0xc2, 0x94, 0xff, 0x96, 0x82, 0xb9, 0xc1, 0x4d, 0xc9, 0x3e, 0x4c, 0xb6, 0x69, - 0x43, 0xc5, 0xe6, 0xee, 0x38, 0x1f, 0xee, 0xfa, 0xb4, 0x78, 0xe5, 0x05, 0x5d, 0x20, 0xa1, 0x26, - 0xe4, 0x82, 0xec, 0x51, 0x21, 0xf9, 0xe0, 0x7c, 0xd8, 0x51, 0x3f, 0x39, 0xa9, 0x4d, 0x7a, 0xc0, - 0x0d, 0xb1, 0xcb, 0xbf, 0x81, 0xbc, 0x1a, 0x4c, 0x6b, 0xd4, 0xe5, 0x17, 0x58, 0xd0, 0xae, 0x41, - 0xba, 0x4d, 0x5d, 0xae, 0x56, 0xf4, 0x81, 0x84, 0xd0, 0xc6, 0x92, 0xe3, 0x7f, 0xbf, 0x0b, 0x8a, - 0x8c, 0x78, 0x26, 0xfe, 0xfd, 0x2e, 0x25, 0x03, 0x89, 0xf2, 0xbf, 0x33, 0x81, 0x07, 0x32, 0xd7, - 0x30, 0x64, 0x04, 0x8a, 0xbf, 0x6a, 0xba, 0x75, 0xfe, 0xa9, 0x23, 0xfe, 0xeb, 0xb3, 0xca, 0x52, - 0x46, 0x3c, 0x31, 0xec, 0x41, 0xa1, 0x0d, 0xc8, 0x99, 0x76, 0x87, 0x71, 0xe2, 0xee, 0xd4, 0xd4, - 0x46, 0x39, 0x28, 0x8b, 0x55, 0x9f, 0x81, 0x43, 0x19, 0xa4, 0x43, 0x9a, 0x77, 0xdb, 0xfe, 0x76, - 0xa7, 0xe2, 0x1f, 0xf2, 0xa0, 0xdb, 0x26, 0x2f, 0x7b, 0xa5, 0x62, 0x42, 0x5b, 0x56, 0x4e, 0x08, - 0x09, 0x2c, 0x75, 0xd1, 0x6d, 0xc8, 0x93, 0x67, 0x9c, 0xb8, 0x8e, 0x61, 0xef, 0xd4, 0xbc, 0xa5, - 0x4e, 0x4e, 0x9f, 0x13, 0xe3, 0xd8, 0x56, 0x48, 0xc6, 0x51, 0x19, 0x74, 0x1f, 0x0a, 0x36, 0x35, - 0x1a, 0xba, 0x61, 0x1b, 0x8e, 0x29, 0x9d, 0xcd, 0x48, 0x07, 0x82, 0x0e, 0xb4, 0x1b, 0xe3, 0xe2, - 0x01, 0x69, 0xf4, 0x63, 0xd0, 0xa2, 0x14, 0xef, 0x17, 0x1e, 0x6c, 0x38, 0x4d, 0xb5, 0xf4, 0xcd, - 0x79, 0x15, 0x6b, 0x77, 0x84, 0x0c, 0x1e, 0xa9, 0x8d, 0x7e, 0x97, 0x82, 0x25, 0xdf, 0xd3, 0x03, - 0x57, 0x4c, 0xfb, 0xa6, 0x1a, 0x5f, 0xbc, 0xb9, 0xf6, 0x40, 0x79, 0xb8, 0xb4, 0x95, 0x24, 0xf4, - 0xb2, 0x57, 0x7a, 0x6f, 0x74, 0xcc, 0x12, 0x55, 0x64, 0x20, 0x93, 0x4d, 0xa2, 0xc7, 0xb0, 0x78, - 0x4c, 0x0c, 0x9b, 0x1f, 0x57, 0x8f, 0x89, 0x79, 0xe2, 0xe7, 0x94, 0x9a, 0x90, 0xdf, 0x56, 0x9e, - 0x2c, 0x3e, 0x1a, 0x16, 0xc1, 0x49, 0x7a, 0xe8, 0x0b, 0x58, 0x62, 0x84, 0x31, 0x8b, 0x3a, 0xfe, - 0xf7, 0x8c, 0x1a, 0xa4, 0x72, 0x7e, 0xb9, 0x48, 0xea, 0x04, 0x09, 0x0a, 0x6a, 0xb2, 0x4d, 0x62, - 0xe1, 0x64, 0x13, 0xe5, 0xbf, 0xa6, 0x60, 0xd1, 0x4f, 0x9d, 0xff, 0x5d, 0xc5, 0x1d, 0xda, 0x4c, - 0x5f, 0xf0, 0x15, 0x1b, 0x55, 0x13, 0xf5, 0xed, 0xe7, 0x2f, 0x8a, 0x57, 0xbe, 0x7a, 0x51, 0xbc, - 0xf2, 0xf5, 0x8b, 0xe2, 0x95, 0x2f, 0xfb, 0xc5, 0xd4, 0xf3, 0x7e, 0x31, 0xf5, 0x55, 0xbf, 0x98, - 0xfa, 0xba, 0x5f, 0x4c, 0xfd, 0xb3, 0x5f, 0x4c, 0xfd, 0xe1, 0x5f, 0xc5, 0x2b, 0x3f, 0x59, 0x3b, - 0xef, 0x27, 0xff, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x02, 0xfb, 0xed, 0x48, 0x15, 0x20, 0x00, - 0x00, + // 3094 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0x37, 0x25, 0x51, 0x14, 0x87, 0xfa, 0xb0, 0x46, 0x96, 0xb3, 0x52, 0x6c, 0x51, 0x66, 0xdb, + 0xd4, 0x41, 0x62, 0x2a, 0x56, 0x1c, 0xd4, 0x71, 0x50, 0xa7, 0x22, 0x25, 0xd9, 0x4a, 0x64, 0x89, + 0x1d, 0x2a, 0x4a, 0x1b, 0xb4, 0x69, 0x56, 0xcb, 0x21, 0xb9, 0xd1, 0x72, 0x87, 0xd9, 0x19, 0x4a, + 0x56, 0x0a, 0xb4, 0x41, 0x0f, 0x45, 0x11, 0xa0, 0x40, 0x0f, 0x3d, 0xf7, 0xd4, 0x6b, 0x0f, 0x05, + 0xfa, 0x27, 0xf4, 0x10, 0xa0, 0x97, 0x1c, 0x73, 0x28, 0xd8, 0x86, 0x3d, 0xf5, 0xbb, 0x97, 0x5e, + 0x5c, 0x14, 0x28, 0xe6, 0x63, 0xbf, 0xb8, 0x43, 0x7d, 0xd9, 0x75, 0xd1, 0x83, 0x00, 0xed, 0xfb, + 0xf8, 0xbd, 0x99, 0xb7, 0x6f, 0xdf, 0x7b, 0xf3, 0x86, 0xe0, 0xa5, 0xfd, 0x16, 0xa9, 0x75, 0x1c, + 0x4c, 0x8b, 0x0f, 0x8f, 0x3e, 0x5c, 0x22, 0xf5, 0x3a, 0x6d, 0x12, 0xc2, 0x6e, 0x98, 0x6d, 0x7b, + 0x89, 0xff, 0x1d, 0xdc, 0x5c, 0x6a, 0x60, 0x17, 0x7b, 0x26, 0xc3, 0xb5, 0x62, 0xdb, 0x23, 0x8c, + 0xc0, 0xc5, 0xa8, 0x46, 0xd1, 0xd7, 0xf8, 0x8e, 0xd9, 0xb6, 0x8b, 0xfc, 0xef, 0xe0, 0xe6, 0xfc, + 0x8d, 0x86, 0xcd, 0x9a, 0x9d, 0xbd, 0xa2, 0x45, 0x5a, 0x4b, 0x0d, 0xd2, 0x20, 0x4b, 0x42, 0x71, + 0xaf, 0x53, 0x17, 0x4f, 0xe2, 0x41, 0xfc, 0x27, 0x01, 0xe7, 0x0b, 0xfb, 0xb7, 0x69, 0xd1, 0x26, + 0xc2, 0x9e, 0x45, 0x3c, 0xac, 0x31, 0x3a, 0x7f, 0x2b, 0x94, 0x69, 0x99, 0x56, 0xd3, 0x76, 0xb1, + 0x77, 0xb4, 0xd4, 0xde, 0x6f, 0x70, 0x02, 0x5d, 0x6a, 0x61, 0x66, 0x6a, 0xb4, 0x0a, 0xff, 0x4e, + 0x03, 0xa3, 0x4c, 0x5c, 0x66, 0x72, 0x79, 0xd4, 0x71, 0x99, 0xdd, 0xc2, 0x55, 0xcc, 0x98, 0xed, + 0x36, 0x28, 0xfc, 0x26, 0xc8, 0x7a, 0x98, 0x92, 0x8e, 0x67, 0x61, 0x6a, 0xa4, 0x16, 0x53, 0xd7, + 0x73, 0xcb, 0xd7, 0x8b, 0xd2, 0x8c, 0xd8, 0x08, 0x5f, 0x4a, 0xf1, 0xe0, 0x66, 0x11, 0x29, 0x21, + 0x84, 0x3f, 0xe8, 0xd8, 0x1e, 0x6e, 0x61, 0x97, 0xd1, 0xd2, 0xf4, 0x27, 0xdd, 0xfc, 0x85, 0x5e, + 0x37, 0x9f, 0xf5, 0xb9, 0x14, 0x85, 0x68, 0x10, 0x81, 0x09, 0xc7, 0x3e, 0xc0, 0x2e, 0xa6, 0xb4, + 0xe2, 0x91, 0x3d, 0x6c, 0x0c, 0x09, 0xf8, 0x39, 0x1d, 0xbc, 0x10, 0x28, 0x4d, 0xf7, 0xba, 0xf9, + 0x89, 0xcd, 0xa8, 0x0e, 0x8a, 0x43, 0xc0, 0xb7, 0xc0, 0xa4, 0x87, 0xcd, 0x9a, 0x1d, 0x82, 0x0e, + 0x9f, 0x04, 0x0a, 0x7b, 0xdd, 0xfc, 0x24, 0x8a, 0x29, 0xa1, 0x3e, 0x10, 0xf8, 0x06, 0xc8, 0x3a, + 0x76, 0x1d, 0x5b, 0x47, 0x96, 0x83, 0x8d, 0x11, 0x81, 0x78, 0x55, 0x87, 0xb8, 0xe9, 0x0b, 0x95, + 0x26, 0xf8, 0xb6, 0x83, 0x47, 0x14, 0xaa, 0xc3, 0x3d, 0x30, 0x45, 0xb1, 0xd5, 0xf1, 0x6c, 0x76, + 0xc4, 0xbd, 0x8e, 0x1f, 0x32, 0x23, 0x2d, 0x10, 0xbf, 0xa0, 0x43, 0xac, 0xc6, 0x45, 0x4b, 0x33, + 0xbd, 0x6e, 0x7e, 0xaa, 0x8f, 0x88, 0xfa, 0x01, 0xe1, 0x26, 0x18, 0x71, 0x6d, 0x0b, 0x1b, 0xa3, + 0x02, 0xb8, 0x58, 0x3c, 0x29, 0x18, 0x8b, 0x5b, 0xb6, 0x15, 0xbc, 0xf3, 0xd2, 0x58, 0xaf, 0x9b, + 0x1f, 0xe1, 0x14, 0x24, 0x50, 0xe0, 0x0e, 0x18, 0xb5, 0x89, 0xc0, 0xcb, 0x08, 0xbc, 0x97, 0x4e, + 0xc6, 0xdb, 0xd8, 0x8e, 0x21, 0x82, 0x5e, 0x37, 0x3f, 0x2a, 0x69, 0x48, 0x61, 0xc1, 0x4d, 0x90, + 0xc1, 0xee, 0xc1, 0xba, 0x47, 0x5a, 0xc6, 0xd8, 0xe2, 0xf0, 0xf5, 0xdc, 0xf2, 0x35, 0xdd, 0xfe, + 0xd7, 0xa4, 0x48, 0x55, 0xc4, 0x4c, 0x69, 0x4a, 0x05, 0x54, 0x46, 0x91, 0x91, 0x0f, 0x01, 0x5f, + 0x05, 0xc3, 0xd8, 0x3d, 0x30, 0xb2, 0x02, 0x69, 0x7e, 0x00, 0xd2, 0xae, 0xe9, 0x95, 0x72, 0x0a, + 0x62, 0x78, 0xcd, 0x3d, 0x40, 0x5c, 0xa7, 0xf0, 0xf3, 0x14, 0x98, 0x5d, 0x6b, 0x37, 0x71, 0x0b, + 0x7b, 0xa6, 0xb3, 0x4b, 0x9c, 0x4e, 0x0b, 0x4b, 0x73, 0xf0, 0xe3, 0x14, 0x98, 0x39, 0x10, 0x84, + 0xb2, 0x63, 0xda, 0xad, 0x1d, 0xdc, 0x6a, 0x3b, 0x26, 0xc3, 0xea, 0x3b, 0x78, 0xfd, 0x64, 0x37, + 0x54, 0xb0, 0x47, 0x6d, 0xca, 0xb0, 0xcb, 0x76, 0x93, 0x30, 0xa5, 0x67, 0x7a, 0xdd, 0xfc, 0x8c, + 0x86, 0x81, 0x74, 0x46, 0x0b, 0xef, 0x82, 0xc9, 0xb8, 0x57, 0x61, 0x1e, 0xa4, 0x2d, 0xc7, 0xa4, + 0xf2, 0xbb, 0x4c, 0x97, 0xb2, 0xbd, 0x6e, 0x3e, 0x5d, 0xe6, 0x04, 0x24, 0xe9, 0xf0, 0x05, 0x90, + 0x15, 0xff, 0xac, 0x9a, 0xcc, 0x14, 0x5f, 0x57, 0x5a, 0xc6, 0x65, 0xd9, 0x27, 0xa2, 0x90, 0x5f, + 0xb8, 0x0b, 0xc6, 0x63, 0xe8, 0x45, 0x00, 0xcc, 0xda, 0xfb, 0x1d, 0xca, 0xf8, 0xb7, 0xac, 0x4c, + 0x4c, 0xf6, 0xba, 0x79, 0xb0, 0x12, 0x50, 0x51, 0x44, 0xa2, 0xf0, 0xbb, 0x21, 0x00, 0xb6, 0xf7, + 0xde, 0xc7, 0x16, 0x7b, 0x80, 0x99, 0x09, 0xdf, 0x03, 0xa3, 0x8e, 0xb9, 0x87, 0x1d, 0xbe, 0x3a, + 0xfe, 0x4e, 0x6e, 0x9f, 0xec, 0xad, 0x50, 0xbb, 0xb8, 0x29, 0x54, 0xd7, 0x5c, 0xe6, 0x1d, 0x95, + 0x26, 0xd5, 0x1b, 0x1b, 0x95, 0x44, 0xa4, 0x70, 0x21, 0x03, 0x39, 0xd3, 0x75, 0x09, 0x33, 0x99, + 0x4d, 0x5c, 0x6a, 0x0c, 0x09, 0x33, 0x5f, 0x3d, 0x93, 0x99, 0x95, 0x50, 0x5f, 0xda, 0x9a, 0x51, + 0xb6, 0x72, 0x11, 0x0e, 0x8a, 0x9a, 0x99, 0x7f, 0x15, 0xe4, 0x22, 0x8b, 0x83, 0x17, 0xc1, 0xf0, + 0x3e, 0x3e, 0x12, 0xee, 0xc9, 0x22, 0xfe, 0x2f, 0xbc, 0x04, 0xd2, 0x07, 0xa6, 0xd3, 0x91, 0xe9, + 0x2c, 0x8b, 0xe4, 0xc3, 0x9d, 0xa1, 0xdb, 0xa9, 0xf9, 0xbb, 0xe0, 0x62, 0xbf, 0xc1, 0xb3, 0xe8, + 0x17, 0x1e, 0x8d, 0x80, 0xe9, 0x8a, 0xe9, 0x31, 0xdb, 0x74, 0x22, 0x8e, 0x5e, 0x04, 0x23, 0xae, + 0xd9, 0x92, 0x41, 0x99, 0x2d, 0x8d, 0xab, 0x0d, 0x8c, 0x6c, 0x99, 0x2d, 0xfe, 0xfd, 0x9a, 0x2d, + 0x0c, 0x6f, 0x83, 0x71, 0x3f, 0xe7, 0x73, 0xaa, 0x04, 0x2e, 0x5d, 0x52, 0x92, 0xe3, 0xf7, 0x22, + 0x3c, 0x14, 0x93, 0x84, 0x4b, 0x20, 0xcb, 0x11, 0x68, 0xdb, 0xb4, 0x64, 0x26, 0xcd, 0x86, 0x39, + 0x7d, 0xcb, 0x67, 0xa0, 0x50, 0x06, 0x36, 0x82, 0xb7, 0x3e, 0x22, 0x5e, 0xc7, 0x69, 0xbe, 0x91, + 0xfe, 0x1d, 0x9d, 0xea, 0xe5, 0x7f, 0x37, 0xfe, 0xf2, 0xd3, 0xc2, 0xda, 0xea, 0x79, 0xac, 0x9d, + 0x3d, 0x06, 0xe0, 0x21, 0x98, 0x22, 0x87, 0xbc, 0x58, 0xe2, 0x3a, 0xf6, 0xb0, 0xcb, 0x4b, 0xe3, + 0xa8, 0x58, 0xc0, 0xad, 0x48, 0xe2, 0x09, 0x2a, 0x70, 0xb1, 0xbd, 0xdf, 0xe0, 0x04, 0x5a, 0xe4, + 0x15, 0x58, 0x44, 0x60, 0x4c, 0xb9, 0xf4, 0x8c, 0x32, 0x38, 0x15, 0xa7, 0x53, 0xd4, 0x6f, 0xe5, + 0x7f, 0x19, 0x7c, 0xbf, 0x18, 0x02, 0xb3, 0xda, 0x74, 0x06, 0xdf, 0x07, 0x63, 0x7c, 0x63, 0x35, + 0x9e, 0x64, 0x64, 0x66, 0x7c, 0xf9, 0x1c, 0xef, 0xa1, 0x34, 0xa7, 0xbc, 0x90, 0x0c, 0x71, 0x14, + 0xe0, 0xc3, 0x6d, 0x30, 0x42, 0xdb, 0xd8, 0x52, 0xad, 0xc2, 0x0d, 0x6d, 0x55, 0xd7, 0x2d, 0xb2, + 0xda, 0xc6, 0x56, 0xf8, 0x6d, 0xf0, 0x27, 0x24, 0x80, 0xe0, 0xdb, 0x60, 0x94, 0x32, 0x93, 0x75, + 0xa8, 0x6a, 0x14, 0x96, 0x4e, 0x0f, 0x29, 0xd4, 0xc2, 0x00, 0x95, 0xcf, 0x48, 0xc1, 0x15, 0x7e, + 0x9b, 0x02, 0x57, 0x8f, 0x4d, 0xff, 0xff, 0xd7, 0x7e, 0x2b, 0xfc, 0x72, 0x12, 0xc0, 0x0a, 0xa9, + 0xf5, 0xb7, 0x8b, 0x3f, 0x4a, 0x81, 0xc9, 0x36, 0xa9, 0x45, 0x22, 0x4d, 0xa5, 0xff, 0xfb, 0xa7, + 0xd8, 0x5a, 0x02, 0x8e, 0x93, 0x12, 0x9f, 0xe7, 0x65, 0xb5, 0x9a, 0xc9, 0x38, 0x13, 0xf5, 0xd9, + 0x85, 0x1f, 0xa5, 0xc0, 0xb8, 0x4b, 0x6a, 0xb8, 0x8a, 0x1d, 0x6c, 0x31, 0xe2, 0xa9, 0x02, 0xb1, + 0x7e, 0xae, 0x85, 0x6c, 0x45, 0x80, 0xe4, 0x32, 0x82, 0xf4, 0x19, 0x65, 0xa1, 0x98, 0x45, 0xf8, + 0x06, 0x80, 0x14, 0x7b, 0x07, 0xb6, 0x85, 0x57, 0x2c, 0x8b, 0x74, 0x5c, 0x26, 0xd2, 0xaf, 0xcc, + 0xa3, 0xf3, 0x4a, 0x1f, 0x56, 0x13, 0x12, 0x48, 0xa3, 0x05, 0x7f, 0x95, 0x02, 0x73, 0x71, 0x72, + 0xd4, 0xc9, 0x32, 0xdb, 0x56, 0xcf, 0xb5, 0xb7, 0xea, 0x20, 0x54, 0xb9, 0xd1, 0x6b, 0x6a, 0xa1, + 0x73, 0x03, 0xe5, 0xd0, 0xe0, 0x85, 0xc1, 0x1a, 0xb8, 0x62, 0x76, 0x18, 0x69, 0x71, 0x7a, 0x1c, + 0x60, 0x87, 0xec, 0x63, 0x57, 0xb4, 0xbe, 0x63, 0xa5, 0xc5, 0x5e, 0x37, 0x7f, 0x65, 0xe5, 0x18, + 0x39, 0x74, 0x2c, 0x0a, 0x7c, 0x11, 0x8c, 0x71, 0xc7, 0x0b, 0xf7, 0x8e, 0x0a, 0xf7, 0x5e, 0x54, + 0xab, 0x1e, 0xdb, 0x52, 0x74, 0x14, 0x48, 0x40, 0x9c, 0xec, 0xc0, 0x65, 0x63, 0xfb, 0x9c, 0xf6, + 0xbb, 0x20, 0xb5, 0xf3, 0x35, 0xe1, 0x2e, 0xb8, 0x68, 0xb7, 0xcc, 0x06, 0xae, 0x74, 0x1c, 0xa7, + 0x8a, 0x2d, 0x0f, 0x33, 0xaa, 0x3a, 0x5d, 0xed, 0x09, 0x6a, 0x93, 0x58, 0xfe, 0xc7, 0x1b, 0x96, + 0x06, 0x43, 0x6d, 0xe3, 0xe2, 0x46, 0x1f, 0x12, 0x4a, 0x60, 0xc3, 0x75, 0x30, 0x66, 0xd6, 0xeb, + 0xb6, 0x6b, 0xb3, 0x23, 0x23, 0x2b, 0xf6, 0x73, 0x45, 0x67, 0x67, 0x45, 0xc9, 0x94, 0xc6, 0xb9, + 0x7b, 0xfc, 0x27, 0x14, 0xe8, 0xc2, 0xd7, 0xc0, 0x04, 0xb5, 0x9a, 0x98, 0xc7, 0x91, 0x27, 0x3c, + 0x0a, 0x84, 0x47, 0x67, 0xd5, 0x52, 0x26, 0xaa, 0x51, 0x26, 0x8a, 0xcb, 0xc2, 0xb7, 0x40, 0x8e, + 0x11, 0x87, 0x77, 0x10, 0x22, 0x2e, 0x73, 0x62, 0xbf, 0x0b, 0xba, 0x75, 0xec, 0x04, 0x62, 0x61, + 0xc5, 0x0d, 0x69, 0x14, 0x45, 0x71, 0xe0, 0x3d, 0x30, 0xdd, 0xf6, 0x6c, 0x22, 0xdc, 0xcb, 0x3b, + 0x56, 0xb1, 0xae, 0x71, 0xb1, 0xae, 0x30, 0xff, 0xf5, 0x0b, 0xa0, 0xa4, 0x0e, 0xbc, 0x0e, 0xc6, + 0x7c, 0xa2, 0x31, 0x21, 0x7a, 0x5a, 0xe1, 0x06, 0x5f, 0x17, 0x05, 0x5c, 0x58, 0x8b, 0x1c, 0x25, + 0xef, 0x99, 0x0c, 0x53, 0x63, 0x52, 0x6c, 0xe6, 0x8b, 0x03, 0x82, 0x04, 0x45, 0x85, 0xc3, 0x2c, + 0x15, 0x23, 0x53, 0xd4, 0x87, 0x09, 0xbf, 0x06, 0x2e, 0x7a, 0xf2, 0x7b, 0x0c, 0xf7, 0x35, 0x25, + 0xfb, 0x33, 0xfe, 0xda, 0x51, 0x1f, 0x0f, 0x25, 0xa4, 0xe1, 0x3a, 0x80, 0xd8, 0x35, 0xf7, 0x1c, + 0xac, 0x3e, 0x8c, 0x4d, 0xdb, 0xdd, 0xa7, 0xc6, 0x45, 0xf1, 0x5d, 0x5d, 0xe6, 0x09, 0x66, 0x2d, + 0xc1, 0x45, 0x1a, 0x0d, 0xf8, 0xe3, 0x14, 0x98, 0x63, 0xa4, 0x4d, 0x1c, 0xd2, 0x38, 0xaa, 0xb6, + 0xf9, 0x32, 0xcb, 0xc4, 0xa5, 0xcc, 0x33, 0x6d, 0x97, 0x51, 0x63, 0x5a, 0xec, 0xfd, 0x45, 0xfd, + 0x8b, 0xd4, 0x2b, 0x85, 0x99, 0x63, 0x90, 0x04, 0x45, 0x83, 0x2d, 0xce, 0xaf, 0x80, 0x19, 0x4d, + 0xfa, 0x3f, 0x53, 0xcf, 0xf3, 0x3a, 0x98, 0x4e, 0x24, 0xee, 0x33, 0x01, 0x6c, 0x82, 0x85, 0xe3, + 0xb3, 0xe3, 0x99, 0x5a, 0xa8, 0x7f, 0x42, 0x90, 0xe1, 0xd9, 0x84, 0xf7, 0x1d, 0x55, 0x90, 0x91, + 0x87, 0x3c, 0xbf, 0x40, 0x5e, 0x3f, 0x39, 0x77, 0xcb, 0xba, 0x1c, 0x1e, 0x82, 0xe5, 0x33, 0x45, + 0x3e, 0x12, 0xfc, 0x36, 0x98, 0xe4, 0x9f, 0x70, 0x30, 0xcc, 0xa1, 0xc6, 0x25, 0x81, 0xad, 0x9d, + 0x55, 0x04, 0x52, 0x61, 0xac, 0x6e, 0xc4, 0x94, 0x51, 0x1f, 0x18, 0x6c, 0x80, 0xab, 0x0c, 0x7b, + 0x2d, 0xdb, 0x15, 0x0e, 0xb8, 0xe7, 0x99, 0x16, 0xae, 0x60, 0xcf, 0x16, 0xf9, 0x91, 0xb8, 0x35, + 0x2a, 0x26, 0x23, 0xc3, 0xa5, 0x6b, 0xbd, 0x6e, 0xfe, 0xea, 0xce, 0x71, 0x82, 0xe8, 0x78, 0x1c, + 0xb8, 0x0d, 0xb2, 0x35, 0x97, 0x56, 0x88, 0x63, 0x5b, 0x47, 0x2a, 0x9f, 0xdf, 0xf4, 0x8f, 0x1d, + 0xab, 0x5b, 0x55, 0xc9, 0x78, 0xd4, 0xcd, 0x5f, 0x49, 0xce, 0xc4, 0x8a, 0x01, 0x1f, 0x85, 0x18, + 0xb0, 0xd3, 0xd7, 0x0a, 0x64, 0x84, 0x5b, 0x5e, 0x3b, 0x55, 0xb9, 0xe4, 0xaf, 0xeb, 0x89, 0xd6, + 0xff, 0xb1, 0x73, 0xd5, 0xff, 0x57, 0x40, 0xae, 0x49, 0x28, 0xdb, 0xc2, 0xec, 0x90, 0x78, 0xfb, + 0x46, 0x4e, 0x7c, 0xdf, 0x41, 0xe2, 0xbc, 0x1f, 0xb2, 0x50, 0x54, 0x0e, 0x3e, 0x0f, 0x32, 0xfc, + 0xb1, 0xb2, 0xb1, 0x2a, 0xd2, 0xe5, 0x58, 0x18, 0x3d, 0xf7, 0x25, 0x19, 0xf9, 0x7c, 0x5f, 0x74, + 0xa3, 0x52, 0x16, 0x99, 0xb1, 0x4f, 0x74, 0xa3, 0x52, 0x46, 0x3e, 0x1f, 0x6e, 0x83, 0x59, 0xda, + 0x34, 0x3d, 0x5c, 0xf1, 0x88, 0x85, 0x65, 0x1e, 0x92, 0x67, 0xc4, 0x67, 0x85, 0xe2, 0x5c, 0xaf, + 0x9b, 0x9f, 0xad, 0xea, 0x04, 0x90, 0x5e, 0x4f, 0x57, 0x92, 0x27, 0x9f, 0x52, 0x49, 0x9e, 0x7a, + 0x4a, 0x25, 0x19, 0x3e, 0xc9, 0x92, 0x3c, 0x73, 0xfe, 0x92, 0x7c, 0xf9, 0xbf, 0x59, 0x92, 0x8d, + 0xc7, 0x2c, 0xc9, 0x73, 0xc7, 0x96, 0xe4, 0x07, 0x22, 0x2f, 0x94, 0x89, 0x5b, 0xb7, 0x1b, 0xc6, + 0xbc, 0xf0, 0xe7, 0xe2, 0x80, 0xf8, 0x58, 0xdd, 0xaa, 0x4a, 0x39, 0x39, 0xf1, 0x0a, 0x1e, 0x51, + 0x88, 0xa0, 0xad, 0xbd, 0x57, 0x9f, 0x40, 0xed, 0x5d, 0x78, 0xc2, 0xb5, 0xf7, 0xda, 0xd3, 0xae, + 0xbd, 0xf0, 0x2a, 0x18, 0x31, 0xbd, 0x06, 0x35, 0xfe, 0xc8, 0x13, 0x65, 0x56, 0x0e, 0x84, 0x57, + 0xbc, 0x06, 0x45, 0x82, 0x0c, 0xef, 0xc8, 0x61, 0xeb, 0x9f, 0x32, 0x67, 0x9f, 0xb6, 0xc2, 0x77, + 0xa2, 0x17, 0x0a, 0x7f, 0xce, 0x3c, 0xd1, 0x1b, 0x85, 0x6a, 0xff, 0x8d, 0xc2, 0x5f, 0x32, 0x8f, + 0x7f, 0xa5, 0xb0, 0x9b, 0xb8, 0x52, 0xf8, 0x6b, 0xe6, 0x49, 0xdc, 0x29, 0xbc, 0x19, 0xbd, 0x53, + 0xf8, 0x5b, 0xe6, 0xf1, 0x2e, 0x15, 0xbe, 0x07, 0x0c, 0xcb, 0x2f, 0xd4, 0x7d, 0x59, 0xd0, 0xf8, + 0x7b, 0xe6, 0xf4, 0xd7, 0x0b, 0x57, 0x7a, 0xdd, 0x7c, 0x78, 0x19, 0xd4, 0x9f, 0x4f, 0x07, 0xda, + 0x80, 0xef, 0x80, 0x71, 0xd9, 0x84, 0x3c, 0xe0, 0x05, 0x8b, 0x1a, 0xff, 0x90, 0xa1, 0x91, 0xd7, + 0xd9, 0xdc, 0x0d, 0x05, 0xc3, 0x2a, 0x1a, 0x21, 0x52, 0x14, 0xc3, 0x7a, 0xec, 0x2e, 0xae, 0xf0, + 0xb3, 0x21, 0x30, 0x55, 0x21, 0x35, 0x7f, 0xf0, 0x22, 0xfa, 0xaf, 0x6f, 0x25, 0x86, 0x2f, 0x2f, + 0x9e, 0x65, 0x72, 0x5c, 0x82, 0x6a, 0xe1, 0x40, 0x3b, 0x6e, 0x79, 0x0f, 0x00, 0xee, 0x2a, 0x8f, + 0x38, 0x0e, 0xf6, 0xd4, 0xd0, 0xe5, 0x9c, 0xf8, 0xe5, 0x00, 0x07, 0x45, 0x30, 0xe1, 0x9b, 0x6a, + 0xa0, 0x23, 0xa7, 0x56, 0xcf, 0x9f, 0xba, 0x93, 0xd1, 0x0e, 0x73, 0x7e, 0x93, 0x02, 0x53, 0xfd, + 0x93, 0x9c, 0x6d, 0x30, 0xdc, 0x26, 0x35, 0xe5, 0x9b, 0x5b, 0xe7, 0x19, 0x2c, 0x94, 0x32, 0xfc, + 0xc3, 0xe7, 0x74, 0x8e, 0x04, 0x1b, 0x20, 0x1b, 0x84, 0x8f, 0x72, 0xc9, 0x9d, 0x93, 0x61, 0x07, + 0x5d, 0x4c, 0xaa, 0x8b, 0x8c, 0x80, 0x1b, 0x62, 0x17, 0xbe, 0x0f, 0x72, 0x2a, 0xb9, 0x56, 0x88, + 0xc7, 0x4e, 0x31, 0x1f, 0x5f, 0x04, 0x23, 0x6d, 0xe2, 0x31, 0x75, 0x43, 0x12, 0x48, 0x70, 0x6d, + 0x24, 0x38, 0xfe, 0x7c, 0x81, 0x53, 0x84, 0xc7, 0xd3, 0xf1, 0xf9, 0x82, 0x90, 0x0c, 0x24, 0x0a, + 0xff, 0x4a, 0x07, 0x2b, 0x10, 0xb1, 0x86, 0x40, 0x9a, 0xa3, 0xf8, 0x9d, 0xfe, 0x8d, 0x93, 0x77, + 0x1d, 0x59, 0x7f, 0x69, 0x42, 0x59, 0x4a, 0xf3, 0x27, 0x8a, 0x24, 0x14, 0x5c, 0x02, 0x59, 0xcb, + 0xe9, 0x50, 0x86, 0xbd, 0x8d, 0x8a, 0x1a, 0xe8, 0x07, 0xb9, 0xb1, 0xec, 0x33, 0x50, 0x28, 0x03, + 0x4b, 0x60, 0x84, 0x1d, 0xb5, 0xfd, 0xe9, 0x53, 0xd1, 0xdf, 0xe4, 0xce, 0x51, 0x1b, 0x3f, 0xea, + 0xe6, 0x17, 0x34, 0x9d, 0xb4, 0x5a, 0x04, 0x97, 0x40, 0x42, 0x17, 0xde, 0x04, 0x39, 0xfc, 0x90, + 0x61, 0xcf, 0x35, 0x9d, 0x8d, 0x8a, 0x1c, 0x3a, 0x65, 0x4b, 0x53, 0xbc, 0x4b, 0x58, 0x0b, 0xc9, + 0x28, 0x2a, 0x03, 0xef, 0x82, 0x49, 0x87, 0x98, 0xb5, 0x92, 0xe9, 0x98, 0xae, 0x25, 0x16, 0x9b, + 0x16, 0x0b, 0x08, 0xce, 0x1c, 0x9b, 0x31, 0x2e, 0xea, 0x93, 0x86, 0xdf, 0x00, 0x46, 0x94, 0x22, + 0x2f, 0xe6, 0x90, 0xe9, 0x36, 0xd4, 0xcc, 0x3d, 0x2b, 0x53, 0xd6, 0xe6, 0x00, 0x19, 0x34, 0x50, + 0x1b, 0x7e, 0x9c, 0x02, 0xb3, 0xfe, 0x4a, 0x77, 0x3c, 0xde, 0x6a, 0x59, 0xea, 0xc4, 0x91, 0x11, + 0x2b, 0xdc, 0x51, 0x2b, 0x9c, 0x5d, 0xd3, 0x09, 0x3d, 0xea, 0xe6, 0x5f, 0x1e, 0xec, 0x33, 0xad, + 0x8a, 0x70, 0xa4, 0xde, 0x24, 0x7c, 0x00, 0x66, 0x9a, 0xd8, 0x74, 0x58, 0xb3, 0xdc, 0xc4, 0xd6, + 0xbe, 0x1f, 0x53, 0xe2, 0xa8, 0x90, 0x2e, 0x3d, 0xab, 0x56, 0x32, 0x73, 0x3f, 0x29, 0x82, 0x74, + 0x7a, 0xf0, 0x43, 0x30, 0x4b, 0x31, 0xa5, 0x36, 0x71, 0xfd, 0x66, 0x52, 0x35, 0x4d, 0x59, 0x3f, + 0x5d, 0xe8, 0x4a, 0x81, 0x46, 0x41, 0xb5, 0xf2, 0x3a, 0x16, 0xd2, 0x9b, 0x28, 0xfc, 0x3a, 0x05, + 0x66, 0xfc, 0xd0, 0x79, 0x7a, 0x19, 0x37, 0x31, 0xe0, 0x3e, 0xe5, 0x27, 0x36, 0x30, 0x27, 0xfe, + 0x34, 0x05, 0x46, 0x65, 0x51, 0x3a, 0x45, 0x06, 0x69, 0xfa, 0xe5, 0x4f, 0x46, 0x98, 0x5a, 0x45, + 0xf1, 0xb4, 0x47, 0x7a, 0x75, 0xbb, 0xdd, 0x57, 0x0c, 0x55, 0xb4, 0xc6, 0x90, 0x0b, 0x9f, 0xcd, + 0x80, 0x18, 0x1b, 0x22, 0x30, 0x26, 0x0e, 0x70, 0x26, 0x6b, 0x1e, 0xf7, 0xfb, 0x8c, 0xfb, 0x4a, + 0x26, 0x66, 0x50, 0xf4, 0xd9, 0x3e, 0x07, 0x05, 0x38, 0x1c, 0x13, 0xb7, 0xda, 0xec, 0x68, 0xd5, + 0xf6, 0x33, 0xb5, 0x16, 0x73, 0x4d, 0xc9, 0x24, 0x31, 0x7d, 0x0e, 0x0a, 0x70, 0xe0, 0x01, 0x98, + 0x6e, 0x88, 0x73, 0xbe, 0xba, 0x64, 0x58, 0xb5, 0xe9, 0xbe, 0xaa, 0x5e, 0x37, 0x75, 0xe0, 0xf7, + 0xca, 0x6b, 0x71, 0xe1, 0x98, 0x95, 0x59, 0x7e, 0xba, 0x48, 0x88, 0xa0, 0xa4, 0x09, 0xf8, 0x83, + 0x14, 0xb8, 0x64, 0x1e, 0xd2, 0x35, 0xc7, 0xa4, 0xcc, 0xb6, 0x4a, 0x0e, 0xb1, 0xf6, 0xab, 0x8c, + 0x78, 0xfe, 0xcf, 0x38, 0x6e, 0x69, 0xcf, 0x63, 0x6f, 0x57, 0x13, 0xf2, 0x31, 0xf3, 0x46, 0xaf, + 0x9b, 0xbf, 0xa4, 0x93, 0x42, 0x5a, 0x5b, 0xf0, 0x0d, 0x30, 0x4a, 0xc5, 0x91, 0x50, 0xfd, 0x22, + 0xe3, 0xb9, 0x01, 0xbd, 0x98, 0x87, 0x59, 0xcc, 0x8e, 0xf8, 0xdd, 0x84, 0xa4, 0x23, 0x85, 0x00, + 0xef, 0x82, 0x61, 0xb7, 0x4e, 0x8d, 0x63, 0x9a, 0xba, 0xad, 0xf5, 0x6a, 0x0c, 0x45, 0xd4, 0xe1, + 0xad, 0xf5, 0x2a, 0xe2, 0x8a, 0x70, 0x1d, 0xa4, 0x6d, 0x6a, 0x51, 0x5b, 0x24, 0x97, 0xdc, 0xf2, + 0x97, 0x74, 0x08, 0x1b, 0xd5, 0x72, 0x75, 0x23, 0x86, 0x21, 0x7e, 0x5c, 0x20, 0xc8, 0x48, 0xaa, + 0xc3, 0x5d, 0x90, 0x6d, 0xc8, 0xea, 0x52, 0xa7, 0xc7, 0xe5, 0x95, 0x7b, 0xbe, 0x50, 0x0c, 0x4f, + 0x94, 0xef, 0x80, 0x85, 0x42, 0x28, 0xf8, 0xc3, 0x14, 0x98, 0x6d, 0xeb, 0xee, 0xa2, 0xc4, 0x1c, + 0x3a, 0xb7, 0xfc, 0xca, 0xa9, 0x2f, 0xaf, 0x62, 0x06, 0x45, 0x22, 0xd3, 0x8a, 0x21, 0xbd, 0x39, + 0xee, 0x68, 0x6f, 0xaf, 0x26, 0x26, 0x2d, 0x03, 0x1c, 0x8d, 0x4a, 0xab, 0x49, 0x47, 0xa3, 0xd2, + 0x2a, 0xe2, 0x8a, 0x70, 0x07, 0x80, 0xba, 0x83, 0x1f, 0x4a, 0x09, 0x31, 0x7d, 0x19, 0x30, 0x3c, + 0x5e, 0x0f, 0xa4, 0x14, 0x8e, 0xf8, 0x99, 0x45, 0x48, 0x45, 0x11, 0x1c, 0x1e, 0x4a, 0x96, 0xed, + 0xd6, 0xb0, 0x27, 0x86, 0x34, 0x03, 0x42, 0xa9, 0x2c, 0x24, 0x92, 0xa1, 0x24, 0xe9, 0x48, 0x21, + 0x08, 0x2c, 0xdc, 0x6e, 0xd6, 0xe9, 0x71, 0xc3, 0x96, 0x32, 0x6e, 0x37, 0xfb, 0x02, 0x4a, 0x62, + 0x09, 0x3a, 0x52, 0x08, 0x70, 0x0b, 0x64, 0xea, 0x3c, 0xe0, 0xb1, 0x27, 0xe6, 0xd7, 0xb9, 0xe5, + 0x2f, 0xeb, 0xb7, 0x2a, 0x44, 0x62, 0x68, 0xb9, 0x5e, 0x37, 0x9f, 0x51, 0x0c, 0xe4, 0x83, 0xc0, + 0x77, 0x41, 0xae, 0x46, 0x0e, 0xdd, 0x43, 0xd3, 0xab, 0xad, 0x54, 0x36, 0xc4, 0x3c, 0x3b, 0xb7, + 0xfc, 0x82, 0x0e, 0x73, 0x35, 0x14, 0x8b, 0xe1, 0x8a, 0xc6, 0x24, 0xc2, 0x44, 0x51, 0x40, 0x78, + 0x07, 0x0c, 0xd5, 0x2d, 0x63, 0x5a, 0xc0, 0x16, 0xb4, 0x4b, 0x2d, 0xc7, 0xd0, 0x46, 0x7b, 0xdd, + 0xfc, 0xd0, 0x7a, 0x19, 0x0d, 0xd5, 0x2d, 0x1e, 0xfa, 0xe6, 0x87, 0x1d, 0x0f, 0xaf, 0xdb, 0x0e, + 0x56, 0x73, 0x1d, 0x6d, 0xe8, 0xaf, 0xf8, 0x42, 0xc9, 0xd0, 0x0f, 0x58, 0x28, 0x84, 0xe2, 0xb8, + 0x96, 0x28, 0xa2, 0x0f, 0xcc, 0xb6, 0x18, 0xf1, 0x0c, 0xc0, 0x2d, 0xfb, 0x42, 0x49, 0xdc, 0x80, + 0x85, 0x42, 0x28, 0xb8, 0x0f, 0x26, 0x0e, 0x68, 0xbb, 0x89, 0xfd, 0x2c, 0x66, 0x5c, 0x12, 0xd8, + 0xcb, 0xda, 0xd3, 0x99, 0x12, 0xb4, 0x3d, 0xd6, 0x31, 0x9d, 0x44, 0xe2, 0x15, 0xe7, 0xe5, 0xdd, + 0x28, 0x18, 0x8a, 0x63, 0xf3, 0x40, 0xf8, 0xa0, 0x43, 0xf6, 0x8e, 0x18, 0x36, 0x66, 0x07, 0x07, + 0xc2, 0xd7, 0xa5, 0x48, 0x32, 0x10, 0x14, 0x03, 0xf9, 0x20, 0x81, 0xb3, 0x45, 0xc1, 0xb8, 0x7c, + 0x82, 0xb3, 0x13, 0xeb, 0x0d, 0x9d, 0x2d, 0x0a, 0x44, 0x08, 0x25, 0x0a, 0x43, 0xbb, 0x49, 0x18, + 0x71, 0xfb, 0x8a, 0xd2, 0x33, 0x83, 0x0b, 0x43, 0x45, 0x23, 0x9f, 0x2c, 0x0c, 0x3a, 0x29, 0xa4, + 0xb5, 0xc5, 0x37, 0xd7, 0xf6, 0x08, 0xef, 0x67, 0x70, 0x4d, 0x4d, 0xb4, 0x9e, 0x1f, 0x30, 0x56, + 0x90, 0x42, 0xc9, 0xcd, 0x05, 0x2c, 0x14, 0x42, 0xc1, 0x1a, 0x98, 0xe4, 0xe7, 0x84, 0x43, 0xe2, + 0xf9, 0xf9, 0xc7, 0x18, 0x5c, 0xc7, 0x2b, 0x31, 0x49, 0x85, 0x0d, 0xe5, 0x15, 0x7b, 0x94, 0x83, + 0xfa, 0x30, 0xf9, 0xab, 0xa6, 0x96, 0xe9, 0xe0, 0x8d, 0x6d, 0x31, 0xb8, 0x1b, 0xf0, 0xaa, 0xab, + 0x52, 0x24, 0xf9, 0xaa, 0x15, 0x03, 0xf9, 0x20, 0xdc, 0x1b, 0x94, 0x11, 0xcf, 0x6c, 0x60, 0x42, + 0xc5, 0x28, 0x79, 0x50, 0xab, 0x2a, 0x85, 0xb6, 0xab, 0x49, 0x6f, 0x04, 0x2c, 0x14, 0x42, 0xf1, + 0x4c, 0xce, 0x0b, 0xde, 0x95, 0xc1, 0x99, 0xbc, 0xbf, 0xdc, 0x89, 0x4c, 0xce, 0x8b, 0x1d, 0x57, + 0x84, 0x35, 0x90, 0xc5, 0xfe, 0x0f, 0x04, 0xc5, 0x84, 0x30, 0xb7, 0xfc, 0x95, 0x93, 0x7b, 0x3b, + 0xed, 0x6f, 0x0a, 0xe5, 0x2a, 0x03, 0x16, 0x0a, 0x81, 0x4b, 0xeb, 0x9f, 0x7c, 0xbe, 0x70, 0xe1, + 0xd3, 0xcf, 0x17, 0x2e, 0x7c, 0xf6, 0xf9, 0xc2, 0x85, 0x8f, 0x7a, 0x0b, 0xa9, 0x4f, 0x7a, 0x0b, + 0xa9, 0x4f, 0x7b, 0x0b, 0xa9, 0xcf, 0x7a, 0x0b, 0xa9, 0xdf, 0xf7, 0x16, 0x52, 0x3f, 0xf9, 0xc3, + 0xc2, 0x85, 0x77, 0x16, 0x4f, 0xfa, 0x29, 0xf2, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x90, 0x7b, + 0x0d, 0x63, 0xad, 0x2c, 0x00, 0x00, } func (m *ContainerRuntimeSettings) Marshal() (dAtA []byte, err error) { @@ -717,6 +884,41 @@ func (m *ContainerRuntimeSettings) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } +func (m *EphemeralVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EphemeralVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EphemeralVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VolumeClaimTemplate != nil { + { + size, err := m.VolumeClaimTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *IONiceSettings) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1002,6 +1204,49 @@ func (m *PersistentVolumeClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PersistentVolumeClaimTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.PartialObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *PodRuntimeSettings) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1264,9 +1509,9 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 + dAtA[i] = 0x3f i-- - dAtA[i] = 0xda + dAtA[i] = 0x82 } } if m.ContainerSecurityContext != nil { @@ -1279,9 +1524,9 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 + dAtA[i] = 0x3e i-- - dAtA[i] = 0xd2 + dAtA[i] = 0xfa } if m.Lifecycle != nil { { @@ -1293,9 +1538,9 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 + dAtA[i] = 0x3e i-- - dAtA[i] = 0xca + dAtA[i] = 0xf2 } if m.ReadinessProbe != nil { { @@ -1307,9 +1552,9 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 + dAtA[i] = 0x3e i-- - dAtA[i] = 0xc2 + dAtA[i] = 0xea } if m.LivenessProbe != nil { { @@ -1321,9 +1566,9 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 + dAtA[i] = 0x3e i-- - dAtA[i] = 0xba + dAtA[i] = 0xe2 } { size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) @@ -1334,9 +1579,9 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 + dAtA[i] = 0x3e i-- - dAtA[i] = 0xb2 + dAtA[i] = 0xda if len(m.Env) > 0 { for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { { @@ -1348,9 +1593,9 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 + dAtA[i] = 0x3e i-- - dAtA[i] = 0xaa + dAtA[i] = 0xd2 } } if len(m.Args) > 0 { @@ -1359,15 +1604,15 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { copy(dAtA[i:], m.Args[iNdEx]) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) i-- - dAtA[i] = 0x1 + dAtA[i] = 0x3e i-- - dAtA[i] = 0xa2 + dAtA[i] = 0xca } } - if len(m.InitContainers) > 0 { - for iNdEx := len(m.InitContainers) - 1; iNdEx >= 0; iNdEx-- { + if len(m.TopologySpreadConstraints) > 0 { + for iNdEx := len(m.TopologySpreadConstraints) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.InitContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.TopologySpreadConstraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1375,15 +1620,76 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 + dAtA[i] = 0x2 i-- - dAtA[i] = 0x9a + dAtA[i] = 0x8a } } - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + if m.EnableServiceLinks != nil { + i-- + if *m.EnableServiceLinks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 + } + if m.RuntimeClassName != nil { + i -= len(*m.RuntimeClassName) + copy(dAtA[i:], *m.RuntimeClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + if m.ShareProcessNamespace != nil { + i-- + if *m.ShareProcessNamespace { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if m.DNSConfig != nil { + { + size, err := m.DNSConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if m.Priority != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc8 + } + i -= len(m.PriorityClassName) + copy(dAtA[i:], m.PriorityClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1393,13 +1699,13 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x1 i-- - dAtA[i] = 0x92 + dAtA[i] = 0xb2 } } - if len(m.TopologySpreadConstraints) > 0 { - for iNdEx := len(m.TopologySpreadConstraints) - 1; iNdEx >= 0; iNdEx-- { + if len(m.InitContainers) > 0 { + for iNdEx := len(m.InitContainers) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.TopologySpreadConstraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.InitContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1409,32 +1715,43 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x1 i-- - dAtA[i] = 0x8a + dAtA[i] = 0xa2 } } - if m.DNSConfig != nil { - { - size, err := m.DNSConfig.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size + i -= len(m.SchedulerName) + copy(dAtA[i:], m.SchedulerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + if m.Affinity != nil { + { + size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1 i-- - dAtA[i] = 0x82 + dAtA[i] = 0x92 } - i -= len(m.DNSPolicy) - copy(dAtA[i:], m.DNSPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSPolicy))) - i-- - dAtA[i] = 0x7a - if m.TerminationGracePeriodSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) - i-- - dAtA[i] = 0x70 + if len(m.ImagePullSecrets) > 0 { + for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } } if m.SecurityContext != nil { { @@ -1446,17 +1763,7 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x6a - } - if m.ShareProcessNamespace != nil { - i-- - if *m.ShareProcessNamespace { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x60 + dAtA[i] = 0x72 } i-- if m.HostIPC { @@ -1465,7 +1772,7 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0 } i-- - dAtA[i] = 0x58 + dAtA[i] = 0x68 i-- if m.HostPID { dAtA[i] = 1 @@ -1473,7 +1780,7 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0 } i-- - dAtA[i] = 0x50 + dAtA[i] = 0x60 i-- if m.HostNetwork { dAtA[i] = 1 @@ -1481,62 +1788,12 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0 } i-- - dAtA[i] = 0x48 - if m.Priority != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) - i-- - dAtA[i] = 0x40 - } - i -= len(m.PriorityClassName) - copy(dAtA[i:], m.PriorityClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) - i-- - dAtA[i] = 0x3a - if len(m.ImagePullSecrets) > 0 { - for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if len(m.Tolerations) > 0 { - for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - i -= len(m.SchedulerName) - copy(dAtA[i:], m.SchedulerName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) + dAtA[i] = 0x58 + i -= len(m.ServiceAccountName) + copy(dAtA[i:], m.ServiceAccountName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) i-- - dAtA[i] = 0x22 - if m.Affinity != nil { - { - size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } + dAtA[i] = 0x42 if len(m.NodeSelector) > 0 { keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) for k := range m.NodeSelector { @@ -1558,14 +1815,33 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x3a } } - i -= len(m.ServiceAccountName) - copy(dAtA[i:], m.ServiceAccountName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i -= len(m.DNSPolicy) + copy(dAtA[i:], m.DNSPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSPolicy))) i-- - dAtA[i] = 0xa + dAtA[i] = 0x32 + if m.TerminationGracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) + i-- + dAtA[i] = 0x20 + } + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } @@ -1836,840 +2112,2420 @@ func (m *ServiceTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *Volume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *ContainerRuntimeSettings) Size() (n int) { - if m == nil { - return 0 - } + +func (m *Volume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.LivenessProbe != nil { - l = m.LivenessProbe.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ReadinessProbe != nil { - l = m.ReadinessProbe.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Lifecycle != nil { - l = m.Lifecycle.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Nice != nil { - l = m.Nice.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.IONice != nil { - l = m.IONice.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.EnvFrom) > 0 { - for _, e := range m.EnvFrom { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Env) > 0 { - for _, e := range m.Env { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.VolumeSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *IONiceSettings) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Class != nil { - n += 1 + sovGenerated(uint64(*m.Class)) - } - if m.ClassData != nil { - n += 1 + sovGenerated(uint64(*m.ClassData)) +func (m *VolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *NiceSettings) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Adjustment != nil { - n += 1 + sovGenerated(uint64(*m.Adjustment)) - } - return n +func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ObjectMeta) Size() (n int) { - if m == nil { - return 0 - } +func (m *VolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.Ephemeral != nil { + { + size, err := m.Ephemeral.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea } - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.CSI != nil { + { + size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 } - return n -} - -func (m *PartialObjectMeta) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.GenerateName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.StorageOS != nil { + { + size, err := m.StorageOS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda } - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.Projected != nil { + { + size, err := m.Projected.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 } - if len(m.OwnerReferences) > 0 { - for _, e := range m.OwnerReferences { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ScaleIO != nil { + { + size, err := m.ScaleIO.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca } - return n -} - -func (m *PersistentVolumeClaim) Size() (n int) { - if m == nil { - return 0 + if m.PortworxVolume != nil { + { + size, err := m.PortworxVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 } - var l int - _ = l - l = m.PartialObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodRuntimeSettings) Size() (n int) { - if m == nil { - return 0 + if m.PhotonPersistentDisk != nil { + { + size, err := m.PhotonPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } - var l int - _ = l - if len(m.PodAnnotations) > 0 { - for k, v := range m.PodAnnotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.AzureDisk != nil { + { + size, err := m.AzureDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - if len(m.NodeSelector) > 0 { - for k, v := range m.NodeSelector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.Quobyte != nil { + { + size, err := m.Quobyte.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa } - l = len(m.ServiceAccountName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ServiceAccountAnnotations) > 0 { - for k, v := range m.ServiceAccountAnnotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.VsphereVolume != nil { + { + size, err := m.VsphereVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 } - if m.AutomountServiceAccountToken != nil { - n += 2 + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a } - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.AzureFile != nil { + { + size, err := m.AzureFile.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.FC != nil { + { + size, err := m.FC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a } - if m.Affinity != nil { - l = m.Affinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.DownwardAPI != nil { + { + size, err := m.DownwardAPI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 } - l = len(m.SchedulerName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Tolerations) > 0 { - for _, e := range m.Tolerations { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Flocker != nil { + { + size, err := m.Flocker.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x7a } - l = len(m.PriorityClassName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Priority != nil { - n += 1 + sovGenerated(uint64(*m.Priority)) + if m.CephFS != nil { + { + size, err := m.CephFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 } - if len(m.ReadinessGates) > 0 { - for _, e := range m.ReadinessGates { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Cinder != nil { + { + size, err := m.Cinder.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x6a } - if m.RuntimeClassName != nil { - l = len(*m.RuntimeClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.EnableServiceLinks != nil { - n += 3 - } - if len(m.TopologySpreadConstraints) > 0 { - for _, e := range m.TopologySpreadConstraints { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.FlexVolume != nil { + { + size, err := m.FlexVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x62 } - return n -} - -func (m *PodSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ServiceAccountName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.NodeSelector) > 0 { - for k, v := range m.NodeSelector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.RBD != nil { + { + size, err := m.RBD.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x5a } - if m.Affinity != nil { - l = m.Affinity.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.SchedulerName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Tolerations) > 0 { - for _, e := range m.Tolerations { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.PersistentVolumeClaim != nil { + { + size, err := m.PersistentVolumeClaim.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x52 } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Glusterfs != nil { + { + size, err := m.Glusterfs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x4a } - l = len(m.PriorityClassName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Priority != nil { - n += 1 + sovGenerated(uint64(*m.Priority)) - } - n += 2 - n += 2 - n += 2 - if m.ShareProcessNamespace != nil { - n += 2 - } - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TerminationGracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) + if m.ISCSI != nil { + { + size, err := m.ISCSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 } - l = len(m.DNSPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.DNSConfig != nil { - l = m.DNSConfig.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.NFS != nil { + { + size, err := m.NFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } - if len(m.TopologySpreadConstraints) > 0 { - for _, e := range m.TopologySpreadConstraints { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.Secret != nil { + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x32 } - if len(m.Volumes) > 0 { - for _, e := range m.Volumes { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.AWSElasticBlockStore != nil { + { + size, err := m.AWSElasticBlockStore.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - if len(m.InitContainers) > 0 { - for _, e := range m.InitContainers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.GCEPersistentDisk != nil { + { + size, err := m.GCEPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) + if m.EmptyDir != nil { + { + size, err := m.EmptyDir.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if len(m.Env) > 0 { - for _, e := range m.Env { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.HostPath != nil { + { + size, err := m.HostPath.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ContainerRuntimeSettings) Size() (n int) { + if m == nil { + return 0 } + var l int + _ = l l = m.Resources.Size() - n += 2 + l + sovGenerated(uint64(l)) + n += 1 + l + sovGenerated(uint64(l)) if m.LivenessProbe != nil { l = m.LivenessProbe.Size() - n += 2 + l + sovGenerated(uint64(l)) + n += 1 + l + sovGenerated(uint64(l)) } if m.ReadinessProbe != nil { l = m.ReadinessProbe.Size() - n += 2 + l + sovGenerated(uint64(l)) + n += 1 + l + sovGenerated(uint64(l)) } if m.Lifecycle != nil { l = m.Lifecycle.Size() - n += 2 + l + sovGenerated(uint64(l)) + n += 1 + l + sovGenerated(uint64(l)) } - if m.ContainerSecurityContext != nil { - l = m.ContainerSecurityContext.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.VolumeMounts) > 0 { - for _, e := range m.VolumeMounts { + if m.Nice != nil { + l = m.Nice.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IONice != nil { + l = m.IONice.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } return n } -func (m *PodTemplateSpec) Size() (n int) { +func (m *EphemeralVolumeSource) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Controller.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.VolumeClaimTemplate != nil { + l = m.VolumeClaimTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *RuntimeSettings) Size() (n int) { +func (m *IONiceSettings) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Pod != nil { - l = m.Pod.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Class != nil { + n += 1 + sovGenerated(uint64(*m.Class)) } - if m.Container != nil { - l = m.Container.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ClassData != nil { + n += 1 + sovGenerated(uint64(*m.ClassData)) } return n } -func (m *ServicePort) Size() (n int) { +func (m *NiceSettings) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Port)) - n += 1 + sovGenerated(uint64(m.NodePort)) + if m.Adjustment != nil { + n += 1 + sovGenerated(uint64(*m.Adjustment)) + } return n } -func (m *ServiceSpec) Size() (n int) { +func (m *ObjectMeta) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - l = len(m.ClusterIP) + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *PartialObjectMeta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) + l = len(m.GenerateName) n += 1 + l + sovGenerated(uint64(l)) - if len(m.ExternalIPs) > 0 { - for _, s := range m.ExternalIPs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - l = len(m.LoadBalancerIP) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.LoadBalancerSourceRanges) > 0 { - for _, s := range m.LoadBalancerSourceRanges { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - l = len(m.ExternalTrafficPolicy) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.HealthCheckNodePort)) - if m.SessionAffinityConfig != nil { - l = m.SessionAffinityConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.OwnerReferences) > 0 { + for _, e := range m.OwnerReferences { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } return n } -func (m *ServiceTemplateSpec) Size() (n int) { +func (m *PersistentVolumeClaim) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ObjectMeta.Size() + l = m.PartialObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ContainerRuntimeSettings) String() string { - if this == nil { - return "nil" +func (m *PersistentVolumeClaimTemplate) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForEnvFrom := "[]EnvFromSource{" - for _, f := range this.EnvFrom { - repeatedStringForEnvFrom += fmt.Sprintf("%v", f) + "," + var l int + _ = l + l = m.PartialObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodRuntimeSettings) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForEnvFrom += "}" - repeatedStringForEnv := "[]EnvVar{" - for _, f := range this.Env { - repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + var l int + _ = l + if len(m.PodAnnotations) > 0 { + for k, v := range m.PodAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - repeatedStringForEnv += "}" - s := strings.Join([]string{`&ContainerRuntimeSettings{`, - `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, - `LivenessProbe:` + strings.Replace(fmt.Sprintf("%v", this.LivenessProbe), "Probe", "v1.Probe", 1) + `,`, - `ReadinessProbe:` + strings.Replace(fmt.Sprintf("%v", this.ReadinessProbe), "Probe", "v1.Probe", 1) + `,`, - `Lifecycle:` + strings.Replace(fmt.Sprintf("%v", this.Lifecycle), "Lifecycle", "v1.Lifecycle", 1) + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "v1.SecurityContext", 1) + `,`, - `Nice:` + strings.Replace(this.Nice.String(), "NiceSettings", "NiceSettings", 1) + `,`, - `IONice:` + strings.Replace(this.IONice.String(), "IONiceSettings", "IONiceSettings", 1) + `,`, - `EnvFrom:` + repeatedStringForEnvFrom + `,`, - `Env:` + repeatedStringForEnv + `,`, - `}`, - }, "") - return s -} -func (this *IONiceSettings) String() string { - if this == nil { - return "nil" + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&IONiceSettings{`, - `Class:` + valueToStringGenerated(this.Class) + `,`, - `ClassData:` + valueToStringGenerated(this.ClassData) + `,`, - `}`, - }, "") - return s -} -func (this *NiceSettings) String() string { - if this == nil { - return "nil" + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ServiceAccountAnnotations) > 0 { + for k, v := range m.ServiceAccountAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&NiceSettings{`, - `Adjustment:` + valueToStringGenerated(this.Adjustment) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectMeta) String() string { - if this == nil { - return "nil" + if m.AutomountServiceAccountToken != nil { + n += 2 } - keysForLabels := make([]string, 0, len(this.Labels)) - for k := range this.Labels { - keysForLabels = append(keysForLabels, k) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForLabels += "}" - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) + if m.Affinity != nil { + l = m.Affinity.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + l = len(m.SchedulerName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForAnnotations += "}" - s := strings.Join([]string{`&ObjectMeta{`, - `Labels:` + mapStringForLabels + `,`, - `Annotations:` + mapStringForAnnotations + `,`, - `}`, - }, "") - return s -} -func (this *PartialObjectMeta) String() string { - if this == nil { - return "nil" + l = len(m.PriorityClassName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 1 + sovGenerated(uint64(*m.Priority)) } - repeatedStringForOwnerReferences := "[]OwnerReference{" - for _, f := range this.OwnerReferences { - repeatedStringForOwnerReferences += fmt.Sprintf("%v", f) + "," + if len(m.ReadinessGates) > 0 { + for _, e := range m.ReadinessGates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForOwnerReferences += "}" - keysForLabels := make([]string, 0, len(this.Labels)) - for k := range this.Labels { - keysForLabels = append(keysForLabels, k) + if m.RuntimeClassName != nil { + l = len(*m.RuntimeClassName) + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + if m.EnableServiceLinks != nil { + n += 3 } - mapStringForLabels += "}" - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) + if len(m.TopologySpreadConstraints) > 0 { + for _, e := range m.TopologySpreadConstraints { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + return n +} + +func (m *PodSpec) Size() (n int) { + if m == nil { + return 0 } - mapStringForAnnotations += "}" - s := strings.Join([]string{`&PartialObjectMeta{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `GenerateName:` + fmt.Sprintf("%v", this.GenerateName) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Labels:` + mapStringForLabels + `,`, - `Annotations:` + mapStringForAnnotations + `,`, - `OwnerReferences:` + repeatedStringForOwnerReferences + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaim) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PersistentVolumeClaim{`, - `PartialObjectMeta:` + strings.Replace(strings.Replace(this.PartialObjectMeta.String(), "PartialObjectMeta", "PartialObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Spec), "PersistentVolumeClaimSpec", "v1.PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "PersistentVolumeClaimStatus", "v1.PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodRuntimeSettings) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForImagePullSecrets := "[]LocalObjectReference{" - for _, f := range this.ImagePullSecrets { - repeatedStringForImagePullSecrets += fmt.Sprintf("%v", f) + "," + if m.TerminationGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) } - repeatedStringForImagePullSecrets += "}" - repeatedStringForTolerations := "[]Toleration{" - for _, f := range this.Tolerations { - repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + l = len(m.DNSPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - repeatedStringForTolerations += "}" - repeatedStringForReadinessGates := "[]PodReadinessGate{" - for _, f := range this.ReadinessGates { - repeatedStringForReadinessGates += fmt.Sprintf("%v", f) + "," + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + n += 2 + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForReadinessGates += "}" - repeatedStringForTopologySpreadConstraints := "[]TopologySpreadConstraint{" - for _, f := range this.TopologySpreadConstraints { - repeatedStringForTopologySpreadConstraints += fmt.Sprintf("%v", f) + "," + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForTopologySpreadConstraints += "}" - keysForPodAnnotations := make([]string, 0, len(this.PodAnnotations)) - for k := range this.PodAnnotations { - keysForPodAnnotations = append(keysForPodAnnotations, k) + if m.Affinity != nil { + l = m.Affinity.Size() + n += 2 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForPodAnnotations) - mapStringForPodAnnotations := "map[string]string{" - for _, k := range keysForPodAnnotations { - mapStringForPodAnnotations += fmt.Sprintf("%v: %v,", k, this.PodAnnotations[k]) + l = len(m.SchedulerName) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.InitContainers) > 0 { + for _, e := range m.InitContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - mapStringForPodAnnotations += "}" - keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) - for k := range this.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, k) + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - mapStringForNodeSelector := "map[string]string{" - for _, k := range keysForNodeSelector { - mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + l = len(m.PriorityClassName) + n += 2 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 2 + sovGenerated(uint64(*m.Priority)) } - mapStringForNodeSelector += "}" - keysForServiceAccountAnnotations := make([]string, 0, len(this.ServiceAccountAnnotations)) - for k := range this.ServiceAccountAnnotations { - keysForServiceAccountAnnotations = append(keysForServiceAccountAnnotations, k) + if m.DNSConfig != nil { + l = m.DNSConfig.Size() + n += 2 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForServiceAccountAnnotations) - mapStringForServiceAccountAnnotations := "map[string]string{" - for _, k := range keysForServiceAccountAnnotations { - mapStringForServiceAccountAnnotations += fmt.Sprintf("%v: %v,", k, this.ServiceAccountAnnotations[k]) + if m.ShareProcessNamespace != nil { + n += 3 } - mapStringForServiceAccountAnnotations += "}" - s := strings.Join([]string{`&PodRuntimeSettings{`, - `PodAnnotations:` + mapStringForPodAnnotations + `,`, - `NodeSelector:` + mapStringForNodeSelector + `,`, - `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, - `ServiceAccountAnnotations:` + mapStringForServiceAccountAnnotations + `,`, - `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "v1.PodSecurityContext", 1) + `,`, - `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, - `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "v1.Affinity", 1) + `,`, - `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, - `Tolerations:` + repeatedStringForTolerations + `,`, - `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, - `Priority:` + valueToStringGenerated(this.Priority) + `,`, - `ReadinessGates:` + repeatedStringForReadinessGates + `,`, - `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, - `EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`, - `TopologySpreadConstraints:` + repeatedStringForTopologySpreadConstraints + `,`, - `}`, - }, "") - return s -} -func (this *PodSpec) String() string { - if this == nil { - return "nil" + if m.RuntimeClassName != nil { + l = len(*m.RuntimeClassName) + n += 2 + l + sovGenerated(uint64(l)) } - repeatedStringForTolerations := "[]Toleration{" - for _, f := range this.Tolerations { - repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + if m.EnableServiceLinks != nil { + n += 3 } - repeatedStringForTolerations += "}" - repeatedStringForImagePullSecrets := "[]LocalObjectReference{" - for _, f := range this.ImagePullSecrets { - repeatedStringForImagePullSecrets += fmt.Sprintf("%v", f) + "," + if len(m.TopologySpreadConstraints) > 0 { + for _, e := range m.TopologySpreadConstraints { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - repeatedStringForImagePullSecrets += "}" - repeatedStringForTopologySpreadConstraints := "[]TopologySpreadConstraint{" - for _, f := range this.TopologySpreadConstraints { - repeatedStringForTopologySpreadConstraints += fmt.Sprintf("%v", f) + "," + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } } - repeatedStringForTopologySpreadConstraints += "}" - repeatedStringForVolumes := "[]Volume{" - for _, f := range this.Volumes { - repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - repeatedStringForVolumes += "}" - repeatedStringForInitContainers := "[]Container{" - for _, f := range this.InitContainers { - repeatedStringForInitContainers += fmt.Sprintf("%v", f) + "," + l = m.Resources.Size() + n += 2 + l + sovGenerated(uint64(l)) + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() + n += 2 + l + sovGenerated(uint64(l)) } - repeatedStringForInitContainers += "}" - repeatedStringForEnv := "[]EnvVar{" - for _, f := range this.Env { - repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() + n += 2 + l + sovGenerated(uint64(l)) } - repeatedStringForEnv += "}" - repeatedStringForVolumeMounts := "[]VolumeMount{" - for _, f := range this.VolumeMounts { - repeatedStringForVolumeMounts += fmt.Sprintf("%v", f) + "," + if m.Lifecycle != nil { + l = m.Lifecycle.Size() + n += 2 + l + sovGenerated(uint64(l)) } - repeatedStringForVolumeMounts += "}" - keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) - for k := range this.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, k) + if m.ContainerSecurityContext != nil { + l = m.ContainerSecurityContext.Size() + n += 2 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - mapStringForNodeSelector := "map[string]string{" - for _, k := range keysForNodeSelector { - mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - mapStringForNodeSelector += "}" - s := strings.Join([]string{`&PodSpec{`, - `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, - `NodeSelector:` + mapStringForNodeSelector + `,`, - `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "v1.Affinity", 1) + `,`, - `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, - `Tolerations:` + repeatedStringForTolerations + `,`, - `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, - `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, - `Priority:` + valueToStringGenerated(this.Priority) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "v1.PodSecurityContext", 1) + `,`, - `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, - `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, - `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "v1.PodDNSConfig", 1) + `,`, - `TopologySpreadConstraints:` + repeatedStringForTopologySpreadConstraints + `,`, - `Volumes:` + repeatedStringForVolumes + `,`, - `InitContainers:` + repeatedStringForInitContainers + `,`, - `Args:` + fmt.Sprintf("%v", this.Args) + `,`, - `Env:` + repeatedStringForEnv + `,`, - `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, - `LivenessProbe:` + strings.Replace(fmt.Sprintf("%v", this.LivenessProbe), "Probe", "v1.Probe", 1) + `,`, - `ReadinessProbe:` + strings.Replace(fmt.Sprintf("%v", this.ReadinessProbe), "Probe", "v1.Probe", 1) + `,`, - `Lifecycle:` + strings.Replace(fmt.Sprintf("%v", this.Lifecycle), "Lifecycle", "v1.Lifecycle", 1) + `,`, - `ContainerSecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.ContainerSecurityContext), "SecurityContext", "v1.SecurityContext", 1) + `,`, - `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, - `}`, - }, "") - return s + return n } -func (this *PodTemplateSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, - `Controller:` + strings.Replace(strings.Replace(this.Controller.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + +func (m *PodTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Controller.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RuntimeSettings) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pod != nil { + l = m.Pod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Container != nil { + l = m.Container.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ServicePort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + n += 1 + sovGenerated(uint64(m.NodePort)) + return n +} + +func (m *ServiceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ClusterIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ExternalIPs) > 0 { + for _, s := range m.ExternalIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.LoadBalancerIP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.LoadBalancerSourceRanges) > 0 { + for _, s := range m.LoadBalancerSourceRanges { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ExternalTrafficPolicy) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HealthCheckNodePort)) + if m.SessionAffinityConfig != nil { + l = m.SessionAffinityConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ServiceTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Volume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.VolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EmptyDir != nil { + l = m.EmptyDir.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PersistentVolumeClaim != nil { + l = m.PersistentVolumeClaim.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Projected != nil { + l = m.Projected.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.StorageOS != nil { + l = m.StorageOS.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.CSI != nil { + l = m.CSI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Ephemeral != nil { + l = m.Ephemeral.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ContainerRuntimeSettings) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnvFrom += "}" + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&ContainerRuntimeSettings{`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `LivenessProbe:` + strings.Replace(fmt.Sprintf("%v", this.LivenessProbe), "Probe", "v1.Probe", 1) + `,`, + `ReadinessProbe:` + strings.Replace(fmt.Sprintf("%v", this.ReadinessProbe), "Probe", "v1.Probe", 1) + `,`, + `Lifecycle:` + strings.Replace(fmt.Sprintf("%v", this.Lifecycle), "Lifecycle", "v1.Lifecycle", 1) + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "v1.SecurityContext", 1) + `,`, + `Nice:` + strings.Replace(this.Nice.String(), "NiceSettings", "NiceSettings", 1) + `,`, + `IONice:` + strings.Replace(this.IONice.String(), "IONiceSettings", "IONiceSettings", 1) + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `Env:` + repeatedStringForEnv + `,`, `}`, }, "") return s } -func (this *RuntimeSettings) String() string { +func (this *EphemeralVolumeSource) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&RuntimeSettings{`, - `Pod:` + strings.Replace(this.Pod.String(), "PodRuntimeSettings", "PodRuntimeSettings", 1) + `,`, - `Container:` + strings.Replace(this.Container.String(), "ContainerRuntimeSettings", "ContainerRuntimeSettings", 1) + `,`, + s := strings.Join([]string{`&EphemeralVolumeSource{`, + `VolumeClaimTemplate:` + strings.Replace(this.VolumeClaimTemplate.String(), "PersistentVolumeClaimTemplate", "PersistentVolumeClaimTemplate", 1) + `,`, `}`, }, "") return s } -func (this *ServicePort) String() string { +func (this *IONiceSettings) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ServicePort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, + s := strings.Join([]string{`&IONiceSettings{`, + `Class:` + valueToStringGenerated(this.Class) + `,`, + `ClassData:` + valueToStringGenerated(this.ClassData) + `,`, `}`, }, "") return s } -func (this *ServiceSpec) String() string { +func (this *NiceSettings) String() string { if this == nil { return "nil" } - repeatedStringForPorts := "[]ServicePort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ServicePort", "ServicePort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - s := strings.Join([]string{`&ServiceSpec{`, - `Ports:` + repeatedStringForPorts + `,`, - `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, - `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, - `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, - `ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`, - `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, - `SessionAffinityConfig:` + strings.Replace(fmt.Sprintf("%v", this.SessionAffinityConfig), "SessionAffinityConfig", "v1.SessionAffinityConfig", 1) + `,`, + s := strings.Join([]string{`&NiceSettings{`, + `Adjustment:` + valueToStringGenerated(this.Adjustment) + `,`, `}`, }, "") return s } -func (this *ServiceTemplateSpec) String() string { +func (this *ObjectMeta) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ServiceTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ObjectMeta{`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `}`, + }, "") + return s +} +func (this *PartialObjectMeta) String() string { + if this == nil { + return "nil" + } + repeatedStringForOwnerReferences := "[]OwnerReference{" + for _, f := range this.OwnerReferences { + repeatedStringForOwnerReferences += fmt.Sprintf("%v", f) + "," + } + repeatedStringForOwnerReferences += "}" + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&PartialObjectMeta{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `GenerateName:` + fmt.Sprintf("%v", this.GenerateName) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `OwnerReferences:` + repeatedStringForOwnerReferences + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaim{`, + `PartialObjectMeta:` + strings.Replace(strings.Replace(this.PartialObjectMeta.String(), "PartialObjectMeta", "PartialObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Spec), "PersistentVolumeClaimSpec", "v1.PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "PersistentVolumeClaimStatus", "v1.PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimTemplate{`, + `PartialObjectMeta:` + strings.Replace(strings.Replace(this.PartialObjectMeta.String(), "PartialObjectMeta", "PartialObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Spec), "PersistentVolumeClaimSpec", "v1.PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodRuntimeSettings) String() string { + if this == nil { + return "nil" + } + repeatedStringForImagePullSecrets := "[]LocalObjectReference{" + for _, f := range this.ImagePullSecrets { + repeatedStringForImagePullSecrets += fmt.Sprintf("%v", f) + "," + } + repeatedStringForImagePullSecrets += "}" + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + repeatedStringForReadinessGates := "[]PodReadinessGate{" + for _, f := range this.ReadinessGates { + repeatedStringForReadinessGates += fmt.Sprintf("%v", f) + "," + } + repeatedStringForReadinessGates += "}" + repeatedStringForTopologySpreadConstraints := "[]TopologySpreadConstraint{" + for _, f := range this.TopologySpreadConstraints { + repeatedStringForTopologySpreadConstraints += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTopologySpreadConstraints += "}" + keysForPodAnnotations := make([]string, 0, len(this.PodAnnotations)) + for k := range this.PodAnnotations { + keysForPodAnnotations = append(keysForPodAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodAnnotations) + mapStringForPodAnnotations := "map[string]string{" + for _, k := range keysForPodAnnotations { + mapStringForPodAnnotations += fmt.Sprintf("%v: %v,", k, this.PodAnnotations[k]) + } + mapStringForPodAnnotations += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + keysForServiceAccountAnnotations := make([]string, 0, len(this.ServiceAccountAnnotations)) + for k := range this.ServiceAccountAnnotations { + keysForServiceAccountAnnotations = append(keysForServiceAccountAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForServiceAccountAnnotations) + mapStringForServiceAccountAnnotations := "map[string]string{" + for _, k := range keysForServiceAccountAnnotations { + mapStringForServiceAccountAnnotations += fmt.Sprintf("%v: %v,", k, this.ServiceAccountAnnotations[k]) + } + mapStringForServiceAccountAnnotations += "}" + s := strings.Join([]string{`&PodRuntimeSettings{`, + `PodAnnotations:` + mapStringForPodAnnotations + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `ServiceAccountAnnotations:` + mapStringForServiceAccountAnnotations + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "v1.PodSecurityContext", 1) + `,`, + `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, + `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "v1.Affinity", 1) + `,`, + `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `ReadinessGates:` + repeatedStringForReadinessGates + `,`, + `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, + `EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`, + `TopologySpreadConstraints:` + repeatedStringForTopologySpreadConstraints + `,`, + `}`, + }, "") + return s +} +func (this *PodSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += strings.Replace(strings.Replace(f.String(), "Volume", "Volume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumes += "}" + repeatedStringForImagePullSecrets := "[]LocalObjectReference{" + for _, f := range this.ImagePullSecrets { + repeatedStringForImagePullSecrets += fmt.Sprintf("%v", f) + "," + } + repeatedStringForImagePullSecrets += "}" + repeatedStringForInitContainers := "[]Container{" + for _, f := range this.InitContainers { + repeatedStringForInitContainers += fmt.Sprintf("%v", f) + "," + } + repeatedStringForInitContainers += "}" + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + repeatedStringForTopologySpreadConstraints := "[]TopologySpreadConstraint{" + for _, f := range this.TopologySpreadConstraints { + repeatedStringForTopologySpreadConstraints += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTopologySpreadConstraints += "}" + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeMounts += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&PodSpec{`, + `Volumes:` + repeatedStringForVolumes + `,`, + `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, + `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "v1.PodSecurityContext", 1) + `,`, + `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, + `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "v1.Affinity", 1) + `,`, + `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, + `InitContainers:` + repeatedStringForInitContainers + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "v1.PodDNSConfig", 1) + `,`, + `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, + `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, + `EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`, + `TopologySpreadConstraints:` + repeatedStringForTopologySpreadConstraints + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `LivenessProbe:` + strings.Replace(fmt.Sprintf("%v", this.LivenessProbe), "Probe", "v1.Probe", 1) + `,`, + `ReadinessProbe:` + strings.Replace(fmt.Sprintf("%v", this.ReadinessProbe), "Probe", "v1.Probe", 1) + `,`, + `Lifecycle:` + strings.Replace(fmt.Sprintf("%v", this.Lifecycle), "Lifecycle", "v1.Lifecycle", 1) + `,`, + `ContainerSecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.ContainerSecurityContext), "SecurityContext", "v1.SecurityContext", 1) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `Controller:` + strings.Replace(strings.Replace(this.Controller.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeSettings) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeSettings{`, + `Pod:` + strings.Replace(this.Pod.String(), "PodRuntimeSettings", "PodRuntimeSettings", 1) + `,`, + `Container:` + strings.Replace(this.Container.String(), "ContainerRuntimeSettings", "ContainerRuntimeSettings", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServicePort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServicePort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]ServicePort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ServicePort", "ServicePort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&ServiceSpec{`, + `Ports:` + repeatedStringForPorts + `,`, + `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, + `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, + `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, + `ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`, + `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, + `SessionAffinityConfig:` + strings.Replace(fmt.Sprintf("%v", this.SessionAffinityConfig), "SessionAffinityConfig", "v1.SessionAffinityConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Volume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Volume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `VolumeSource:` + strings.Replace(strings.Replace(this.VolumeSource.String(), "VolumeSource", "VolumeSource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeSource{`, + `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "v1.HostPathVolumeSource", 1) + `,`, + `EmptyDir:` + strings.Replace(fmt.Sprintf("%v", this.EmptyDir), "EmptyDirVolumeSource", "v1.EmptyDirVolumeSource", 1) + `,`, + `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "v1.GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "v1.AWSElasticBlockStoreVolumeSource", 1) + `,`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretVolumeSource", "v1.SecretVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "v1.NFSVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIVolumeSource", "v1.ISCSIVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "v1.GlusterfsVolumeSource", 1) + `,`, + `PersistentVolumeClaim:` + strings.Replace(fmt.Sprintf("%v", this.PersistentVolumeClaim), "PersistentVolumeClaimVolumeSource", "v1.PersistentVolumeClaimVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDVolumeSource", "v1.RBDVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexVolumeSource", "v1.FlexVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "v1.CinderVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSVolumeSource", "v1.CephFSVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "v1.FlockerVolumeSource", 1) + `,`, + `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIVolumeSource", "v1.DownwardAPIVolumeSource", 1) + `,`, + `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "v1.FCVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFileVolumeSource", "v1.AzureFileVolumeSource", 1) + `,`, + `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapVolumeSource", "v1.ConfigMapVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "v1.VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "v1.QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "v1.AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "v1.PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "v1.PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "v1.ScaleIOVolumeSource", 1) + `,`, + `Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "v1.ProjectedVolumeSource", 1) + `,`, + `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSVolumeSource", "v1.StorageOSVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIVolumeSource", "v1.CSIVolumeSource", 1) + `,`, + `Ephemeral:` + strings.Replace(this.Ephemeral.String(), "EphemeralVolumeSource", "EphemeralVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ContainerRuntimeSettings) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerRuntimeSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerRuntimeSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LivenessProbe == nil { + m.LivenessProbe = &v1.Probe{} + } + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReadinessProbe == nil { + m.ReadinessProbe = &v1.Probe{} + } + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Lifecycle == nil { + m.Lifecycle = &v1.Lifecycle{} + } + if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &v1.SecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nice", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Nice == nil { + m.Nice = &NiceSettings{} + } + if err := m.Nice.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IONice", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IONice == nil { + m.IONice = &IONiceSettings{} + } + if err := m.IONice.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EnvFrom = append(m.EnvFrom, v1.EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EphemeralVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EphemeralVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EphemeralVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumeClaimTemplate == nil { + m.VolumeClaimTemplate = &PersistentVolumeClaimTemplate{} + } + if err := m.VolumeClaimTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IONiceSettings) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IONiceSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IONiceSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Class", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Class = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClassData", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClassData = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NiceSettings) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NiceSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NiceSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Adjustment", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Adjustment = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *ContainerRuntimeSettings) Unmarshal(dAtA []byte) error { +func (m *PartialObjectMeta) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2692,17 +4548,17 @@ func (m *ContainerRuntimeSettings) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerRuntimeSettings: wiretype end group for non-group") + return fmt.Errorf("proto: PartialObjectMeta: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerRuntimeSettings: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PartialObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2712,28 +4568,91 @@ func (m *ContainerRuntimeSettings) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GenerateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2750,26 +4669,117 @@ func (m *ContainerRuntimeSettings) Unmarshal(dAtA []byte) error { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LivenessProbe == nil { - m.LivenessProbe = &v1.Probe{} - } - if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2796,52 +4806,107 @@ func (m *ContainerRuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ReadinessProbe == nil { - m.ReadinessProbe = &v1.Probe{} - } - if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + if m.Annotations == nil { + m.Annotations = make(map[string]string) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Lifecycle == nil { - m.Lifecycle = &v1.Lifecycle{} - } - if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex - case 5: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2868,52 +4933,64 @@ func (m *ContainerRuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecurityContext == nil { - m.SecurityContext = &v1.SecurityContext{} - } - if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.OwnerReferences = append(m.OwnerReferences, v11.OwnerReference{}) + if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Nice", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.Nice == nil { - m.Nice = &NiceSettings{} + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if err := m.Nice.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 7: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IONice", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PartialObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2940,16 +5017,13 @@ func (m *ContainerRuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.IONice == nil { - m.IONice = &IONiceSettings{} - } - if err := m.IONice.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.PartialObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2976,14 +5050,13 @@ func (m *ContainerRuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.EnvFrom = append(m.EnvFrom, v1.EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 9: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3010,8 +5083,7 @@ func (m *ContainerRuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, v1.EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3036,7 +5108,7 @@ func (m *ContainerRuntimeSettings) Unmarshal(dAtA []byte) error { } return nil } -func (m *IONiceSettings) Unmarshal(dAtA []byte) error { +func (m *PersistentVolumeClaimTemplate) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3059,17 +5131,17 @@ func (m *IONiceSettings) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IONiceSettings: wiretype end group for non-group") + return fmt.Errorf("proto: PersistentVolumeClaimTemplate: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IONiceSettings: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PersistentVolumeClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Class", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PartialObjectMeta", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3079,17 +5151,30 @@ func (m *IONiceSettings) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Class = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PartialObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClassData", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3099,12 +5184,25 @@ func (m *IONiceSettings) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.ClassData = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3126,7 +5224,7 @@ func (m *IONiceSettings) Unmarshal(dAtA []byte) error { } return nil } -func (m *NiceSettings) Unmarshal(dAtA []byte) error { +func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3149,17 +5247,17 @@ func (m *NiceSettings) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NiceSettings: wiretype end group for non-group") + return fmt.Errorf("proto: PodRuntimeSettings: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NiceSettings: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodRuntimeSettings: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Adjustment", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAnnotations", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3169,65 +5267,122 @@ func (m *NiceSettings) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Adjustment = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + if msglen < 0 { + return ErrInvalidLengthGenerated } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectMeta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.PodAnnotations == nil { + m.PodAnnotations = make(map[string]string) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.PodAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3254,8 +5409,8 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Labels == nil { - m.Labels = make(map[string]string) + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) } var mapkey string var mapvalue string @@ -3350,11 +5505,43 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Labels[mapkey] = mapvalue + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountAnnotations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3381,8 +5568,8 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Annotations == nil { - m.Annotations = make(map[string]string) + if m.ServiceAccountAnnotations == nil { + m.ServiceAccountAnnotations = make(map[string]string) } var mapkey string var mapvalue string @@ -3477,61 +5664,236 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Annotations[mapkey] = mapvalue + m.ServiceAccountAnnotations[mapkey] = mapvalue iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AutomountServiceAccountToken = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &v1.PodSecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PartialObjectMeta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.ImagePullSecrets = append(m.ImagePullSecrets, v1.LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - if iNdEx >= l { + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if m.Affinity == nil { + m.Affinity = &v1.Affinity{} + } + if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchedulerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v1.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PartialObjectMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PartialObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3559,13 +5921,33 @@ func (m *PartialObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.PriorityClassName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Priority = &v + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessGates", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3575,27 +5957,29 @@ func (m *PartialObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.GenerateName = string(dAtA[iNdEx:postIndex]) + m.ReadinessGates = append(m.ReadinessGates, v1.PodReadinessGate{}) + if err := m.ReadinessGates[len(m.ReadinessGates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3623,11 +6007,33 @@ func (m *PartialObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.RuntimeClassName = &s iNdEx = postIndex - case 4: + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EnableServiceLinks", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.EnableServiceLinks = &b + case 17: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TopologySpreadConstraints", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3654,107 +6060,150 @@ func (m *PartialObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Labels == nil { - m.Labels = make(map[string]string) + m.TopologySpreadConstraints = append(m.TopologySpreadConstraints, v1.TopologySpreadConstraint{}) + if err := m.TopologySpreadConstraints[len(m.TopologySpreadConstraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationGracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break } } - m.Labels[mapkey] = mapvalue + m.TerminationGracePeriodSeconds = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DNSPolicy = k8s_io_api_core_v1.DNSPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3781,8 +6230,8 @@ func (m *PartialObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Annotations == nil { - m.Annotations = make(map[string]string) + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) } var mapkey string var mapvalue string @@ -3877,13 +6326,13 @@ func (m *PartialObjectMeta) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Annotations[mapkey] = mapvalue + m.NodeSelector[mapkey] = mapvalue iNdEx = postIndex - case 6: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3893,79 +6342,87 @@ func (m *PartialObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.OwnerReferences = append(m.OwnerReferences, v11.OwnerReference{}) - if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + m.HostNetwork = bool(v != 0) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.HostPID = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeClaim: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeClaim: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.HostIPC = bool(v != 0) + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PartialObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3992,13 +6449,16 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.PartialObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SecurityContext == nil { + m.SecurityContext = &v1.PodSecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4025,13 +6485,14 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ImagePullSecrets = append(m.ImagePullSecrets, v1.LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 18: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4058,63 +6519,48 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Affinity == nil { + m.Affinity = &v1.Affinity{} } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { - return io.ErrUnexpectedEOF + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if postIndex > l { + return io.ErrUnexpectedEOF } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodRuntimeSettings: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodRuntimeSettings: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.SchedulerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodAnnotations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4141,107 +6587,14 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.PodAnnotations == nil { - m.PodAnnotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + m.InitContainers = append(m.InitContainers, v1.Container{}) + if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.PodAnnotations[mapkey] = mapvalue iNdEx = postIndex - case 2: + case 22: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4268,107 +6621,14 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeSelector == nil { - m.NodeSelector = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + m.Tolerations = append(m.Tolerations, v1.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.NodeSelector[mapkey] = mapvalue iNdEx = postIndex - case 3: + case 24: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4396,11 +6656,31 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + m.PriorityClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 25: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Priority = &v + case 26: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountAnnotations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4427,107 +6707,16 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ServiceAccountAnnotations == nil { - m.ServiceAccountAnnotations = make(map[string]string) + if m.DNSConfig == nil { + m.DNSConfig = &v1.PodDNSConfig{} } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.ServiceAccountAnnotations[mapkey] = mapvalue iNdEx = postIndex - case 5: + case 27: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShareProcessNamespace", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -4545,10 +6734,10 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { } } b := bool(v != 0) - m.AutomountServiceAccountToken = &b - case 6: + m.ShareProcessNamespace = &b + case 29: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4576,11 +6765,33 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.NodeName = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.RuntimeClassName = &s iNdEx = postIndex - case 7: + case 30: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EnableServiceLinks", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.EnableServiceLinks = &b + case 33: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TopologySpreadConstraints", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4607,16 +6818,46 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecurityContext == nil { - m.SecurityContext = &v1.PodSecurityContext{} - } - if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.TopologySpreadConstraints = append(m.TopologySpreadConstraints, v1.TopologySpreadConstraint{}) + if err := m.TopologySpreadConstraints[len(m.TopologySpreadConstraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 1001: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 1002: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4643,14 +6884,14 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ImagePullSecrets = append(m.ImagePullSecrets, v1.LocalObjectReference{}) - if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Env = append(m.Env, v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 9: + case 1003: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4677,18 +6918,15 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Affinity == nil { - m.Affinity = &v1.Affinity{} - } - if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 10: + case 1004: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4698,27 +6936,31 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.SchedulerName = string(dAtA[iNdEx:postIndex]) + if m.LivenessProbe == nil { + m.LivenessProbe = &v1.Probe{} + } + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 11: + case 1005: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4745,16 +6987,18 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tolerations = append(m.Tolerations, v1.Toleration{}) - if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ReadinessProbe == nil { + m.ReadinessProbe = &v1.Probe{} + } + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 12: + case 1006: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4764,29 +7008,33 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.PriorityClassName = string(dAtA[iNdEx:postIndex]) + if m.Lifecycle == nil { + m.Lifecycle = &v1.Lifecycle{} + } + if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + case 1007: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerSecurityContext", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4796,15 +7044,31 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Priority = &v - case 14: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerSecurityContext == nil { + m.ContainerSecurityContext = &v1.SecurityContext{} + } + if err := m.ContainerSecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1008: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadinessGates", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4831,16 +7095,66 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ReadinessGates = append(m.ReadinessGates, v1.PodReadinessGate{}) - if err := m.ReadinessGates[len(m.ReadinessGates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.VolumeMounts = append(m.VolumeMounts, v1.VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 15: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4850,30 +7164,30 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.RuntimeClassName = &s + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EnableServiceLinks", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4883,16 +7197,28 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.EnableServiceLinks = &b - case 17: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Controller.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopologySpreadConstraints", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4919,8 +7245,7 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TopologySpreadConstraints = append(m.TopologySpreadConstraints, v1.TopologySpreadConstraint{}) - if err := m.TopologySpreadConstraints[len(m.TopologySpreadConstraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4945,7 +7270,7 @@ func (m *PodRuntimeSettings) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSpec) Unmarshal(dAtA []byte) error { +func (m *RuntimeSettings) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4968,17 +7293,17 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSpec: wiretype end group for non-group") + return fmt.Errorf("proto: RuntimeSettings: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RuntimeSettings: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Pod", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4988,27 +7313,31 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) + if m.Pod == nil { + m.Pod = &PodRuntimeSettings{} + } + if err := m.Pod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5035,109 +7364,68 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeSelector == nil { - m.NodeSelector = make(map[string]string) + if m.Container == nil { + m.Container = &ContainerRuntimeSettings{} } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - m.NodeSelector[mapkey] = mapvalue - iNdEx = postIndex - case 3: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServicePort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServicePort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServicePort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5147,33 +7435,48 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Affinity == nil { - m.Affinity = &v1.Affinity{} + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePort", wireType) } - var stringLen uint64 + m.NodePort = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5183,27 +7486,64 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.NodePort |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.SchedulerName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5230,16 +7570,16 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tolerations = append(m.Tolerations, v1.Toleration{}) - if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, ServicePort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIP", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5249,29 +7589,27 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ImagePullSecrets = append(m.ImagePullSecrets, v1.LocalObjectReference{}) - if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ClusterIP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5299,13 +7637,13 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PriorityClassName = string(dAtA[iNdEx:postIndex]) + m.Type = k8s_io_api_core_v1.ServiceType(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType) } - var v int32 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5315,17 +7653,29 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.Priority = &v - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - var v int + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalIPs = append(m.ExternalIPs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5335,17 +7685,29 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.HostNetwork = bool(v != 0) - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - var v int + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5355,17 +7717,29 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.HostPID = bool(v != 0) - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - var v int + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5375,17 +7749,29 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.HostIPC = bool(v != 0) - case 12: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalTrafficPolicy = k8s_io_api_core_v1.ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareProcessNamespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HealthCheckNodePort", wireType) } - var v int + m.HealthCheckNodePort = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5395,16 +7781,14 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.HealthCheckNodePort |= int32(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.ShareProcessNamespace = &b - case 13: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinityConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5431,38 +7815,68 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecurityContext == nil { - m.SecurityContext = &v1.PodSecurityContext{} + if m.SessionAffinityConfig == nil { + m.SessionAffinityConfig = &v1.SessionAffinityConfig{} } - if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SessionAffinityConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationGracePeriodSeconds", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - m.TerminationGracePeriodSeconds = &v - case 15: + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DNSPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5472,27 +7886,28 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.DNSPolicy = k8s_io_api_core_v1.DNSPolicy(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 16: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5519,18 +7934,65 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.DNSConfig == nil { - m.DNSConfig = &v1.PodDNSConfig{} - } - if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 17: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Volume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Volume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopologySpreadConstraints", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5540,29 +8002,27 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.TopologySpreadConstraints = append(m.TopologySpreadConstraints, v1.TopologySpreadConstraint{}) - if err := m.TopologySpreadConstraints[len(m.TopologySpreadConstraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 18: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeSource", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5589,14 +8049,63 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Volumes = append(m.Volumes, v1.Volume{}) - if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.VolumeSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 19: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5623,16 +8132,18 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.InitContainers = append(m.InitContainers, v1.Container{}) - if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.HostPath == nil { + m.HostPath = &v1.HostPathVolumeSource{} + } + if err := m.HostPath.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 20: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EmptyDir", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5642,27 +8153,31 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + if m.EmptyDir == nil { + m.EmptyDir = &v1.EmptyDirVolumeSource{} + } + if err := m.EmptyDir.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 21: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5689,14 +8204,16 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, v1.EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.GCEPersistentDisk == nil { + m.GCEPersistentDisk = &v1.GCEPersistentDiskVolumeSource{} + } + if err := m.GCEPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 22: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5723,13 +8240,16 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.AWSElasticBlockStore == nil { + m.AWSElasticBlockStore = &v1.AWSElasticBlockStoreVolumeSource{} + } + if err := m.AWSElasticBlockStore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 23: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5756,16 +8276,16 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.LivenessProbe == nil { - m.LivenessProbe = &v1.Probe{} + if m.Secret == nil { + m.Secret = &v1.SecretVolumeSource{} } - if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 24: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5792,16 +8312,16 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ReadinessProbe == nil { - m.ReadinessProbe = &v1.Probe{} + if m.NFS == nil { + m.NFS = &v1.NFSVolumeSource{} } - if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.NFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 25: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5828,16 +8348,16 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Lifecycle == nil { - m.Lifecycle = &v1.Lifecycle{} + if m.ISCSI == nil { + m.ISCSI = &v1.ISCSIVolumeSource{} } - if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ISCSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 26: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerSecurityContext", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5864,16 +8384,16 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ContainerSecurityContext == nil { - m.ContainerSecurityContext = &v1.SecurityContext{} + if m.Glusterfs == nil { + m.Glusterfs = &v1.GlusterfsVolumeSource{} } - if err := m.ContainerSecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Glusterfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 27: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaim", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5900,64 +8420,16 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeMounts = append(m.VolumeMounts, v1.VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.PersistentVolumeClaim == nil { + m.PersistentVolumeClaim = &v1.PersistentVolumeClaimVolumeSource{} } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.PersistentVolumeClaim.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodTemplateSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5984,13 +8456,16 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.RBD == nil { + m.RBD = &v1.RBDVolumeSource{} + } + if err := m.RBD.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6017,13 +8492,16 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Controller.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.FlexVolume == nil { + m.FlexVolume = &v1.FlexVolumeSource{} + } + if err := m.FlexVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6050,63 +8528,16 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Cinder == nil { + m.Cinder = &v1.CinderVolumeSource{} } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.Cinder.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuntimeSettings) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuntimeSettings: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeSettings: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pod", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6133,16 +8564,16 @@ func (m *RuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Pod == nil { - m.Pod = &PodRuntimeSettings{} + if m.CephFS == nil { + m.CephFS = &v1.CephFSVolumeSource{} } - if err := m.Pod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.CephFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6169,68 +8600,18 @@ func (m *RuntimeSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Container == nil { - m.Container = &ContainerRuntimeSettings{} + if m.Flocker == nil { + m.Flocker = &v1.FlockerVolumeSource{} } - if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Flocker.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServicePort) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServicePort: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServicePort: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 16: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6240,29 +8621,33 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if m.DownwardAPI == nil { + m.DownwardAPI = &v1.DownwardAPIVolumeSource{} + } + if err := m.DownwardAPI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType) } - m.Port = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6272,16 +8657,33 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NodePort", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } - m.NodePort = 0 + if m.FC == nil { + m.FC = &v1.FCVolumeSource{} + } + if err := m.FC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6291,64 +8693,31 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NodePort |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + if msglen < 0 { + return ErrInvalidLengthGenerated } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.AzureFile == nil { + m.AzureFile = &v1.AzureFileVolumeSource{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.AzureFile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 19: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6375,16 +8744,18 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, ServicePort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ConfigMap == nil { + m.ConfigMap = &v1.ConfigMapVolumeSource{} + } + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterIP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6394,29 +8765,33 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterIP = string(dAtA[iNdEx:postIndex]) + if m.VsphereVolume == nil { + m.VsphereVolume = &v1.VsphereVirtualDiskVolumeSource{} + } + if err := m.VsphereVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 21: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Quobyte", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6426,29 +8801,33 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = k8s_io_api_core_v1.ServiceType(dAtA[iNdEx:postIndex]) + if m.Quobyte == nil { + m.Quobyte = &v1.QuobyteVolumeSource{} + } + if err := m.Quobyte.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 22: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6458,29 +8837,33 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalIPs = append(m.ExternalIPs, string(dAtA[iNdEx:postIndex])) + if m.AzureDisk == nil { + m.AzureDisk = &v1.AzureDiskVolumeSource{} + } + if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 23: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PhotonPersistentDisk", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6490,29 +8873,33 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.LoadBalancerIP = string(dAtA[iNdEx:postIndex]) + if m.PhotonPersistentDisk == nil { + m.PhotonPersistentDisk = &v1.PhotonPersistentDiskVolumeSource{} + } + if err := m.PhotonPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 6: + case 24: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PortworxVolume", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6522,29 +8909,33 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex])) + if m.PortworxVolume == nil { + m.PortworxVolume = &v1.PortworxVolumeSource{} + } + if err := m.PortworxVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 7: + case 25: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ScaleIO", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6554,29 +8945,33 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalTrafficPolicy = k8s_io_api_core_v1.ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + if m.ScaleIO == nil { + m.ScaleIO = &v1.ScaleIOVolumeSource{} + } + if err := m.ScaleIO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HealthCheckNodePort", wireType) + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Projected", wireType) } - m.HealthCheckNodePort = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6586,14 +8981,31 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.HealthCheckNodePort |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 9: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Projected == nil { + m.Projected = &v1.ProjectedVolumeSource{} + } + if err := m.Projected.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinityConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StorageOS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6620,66 +9032,16 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SessionAffinityConfig == nil { - m.SessionAffinityConfig = &v1.SessionAffinityConfig{} + if m.StorageOS == nil { + m.StorageOS = &v1.StorageOSVolumeSource{} } - if err := m.SessionAffinityConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StorageOS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceTemplateSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceTemplateSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 28: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6706,13 +9068,16 @@ func (m *ServiceTemplateSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.CSI == nil { + m.CSI = &v1.CSIVolumeSource{} + } + if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 29: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ephemeral", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6739,7 +9104,10 @@ func (m *ServiceTemplateSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Ephemeral == nil { + m.Ephemeral = &EphemeralVolumeSource{} + } + if err := m.Ephemeral.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/vendor/kmodules.xyz/offshoot-api/api/v1/generated.proto b/vendor/kmodules.xyz/offshoot-api/api/v1/generated.proto index 437a4154..a13b7837 100644 --- a/vendor/kmodules.xyz/offshoot-api/api/v1/generated.proto +++ b/vendor/kmodules.xyz/offshoot-api/api/v1/generated.proto @@ -88,6 +88,32 @@ message ContainerRuntimeSettings { repeated k8s.io.api.core.v1.EnvVar env = 9; } +// Represents an ephemeral volume that is handled by a normal storage driver. +message EphemeralVolumeSource { + // Will be used to create a stand-alone PVC to provision the volume. + // The pod in which this EphemeralVolumeSource is embedded will be the + // owner of the PVC, i.e. the PVC will be deleted together with the + // pod. The name of the PVC will be `-` where + // `` is the name from the `PodSpec.Volumes` array + // entry. Pod validation will reject the pod if the concatenated name + // is not valid for a PVC (for example, too long). + // + // An existing PVC with that name that is not owned by the pod + // will *not* be used for the pod to avoid using an unrelated + // volume by mistake. Starting the pod is then blocked until + // the unrelated PVC is removed. If such a pre-created PVC is + // meant to be used by the pod, the PVC has to updated with an + // owner reference to the pod once the pod exists. Normally + // this should not be necessary, but it may be useful when + // manually reconstructing a broken cluster. + // + // This field is read-only and no changes will be made by Kubernetes + // to the PVC after it has been created. + // + // Required, must not be nil. + optional PersistentVolumeClaimTemplate volumeClaimTemplate = 1; +} + // https://linux.die.net/man/1/ionice message IONiceSettings { optional int32 class = 1; @@ -202,6 +228,23 @@ message PersistentVolumeClaim { optional k8s.io.api.core.v1.PersistentVolumeClaimStatus status = 3; } +// PersistentVolumeClaimTemplate is used to produce +// PersistentVolumeClaim objects as part of an EphemeralVolumeSource. +message PersistentVolumeClaimTemplate { + // May contain labels and annotations that will be copied into the PVC + // when creating it. No other fields are allowed and will be rejected during + // validation. + // + // +optional + optional PartialObjectMeta metadata = 1; + + // The specification for the PersistentVolumeClaim. The entire content is + // copied unchanged into the PVC that gets created from this + // template. The same fields as in a PersistentVolumeClaim + // are also valid here. + optional k8s.io.api.core.v1.PersistentVolumeClaimSpec spec = 2; +} + message PodRuntimeSettings { // PodAnnotations are the annotations that will be attached with the respective Pod // +optional @@ -310,70 +353,80 @@ message PodRuntimeSettings { } message PodSpec { - // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - // +optional - optional string serviceAccountName = 1; - - // NodeSelector is a selector which must be true for the pod to fit on a node. - // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - // +optional - map nodeSelector = 2; - - // If specified, the pod's scheduling constraints + // List of volumes that can be mounted by containers belonging to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes // +optional - optional k8s.io.api.core.v1.Affinity affinity = 3; + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + repeated Volume volumes = 1; - // If specified, the pod will be dispatched by specified scheduler. - // If not specified, the pod will be dispatched by default scheduler. - // +optional - optional string schedulerName = 4; + // List of initialization containers belonging to the pod. + // Init containers are executed in order prior to containers being started. If any + // init container fails, the pod is considered to have failed and is handled according + // to its restartPolicy. The name for an init container or normal container must be + // unique among all containers. + // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // The resourceRequirements of an init container are taken into account during scheduling + // by finding the highest request/limit for each resource type, and then using the max of + // of that value or the sum of the normal containers. Limits are applied to init containers + // in a similar fashion. + // Init containers cannot currently be added or removed. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // +patchMergeKey=name + // +patchStrategy=merge + repeated k8s.io.api.core.v1.Container initContainers = 20; - // If specified, the pod's tolerations. + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates stop immediately via + // the kill signal (no opportunity to shut down). + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. // +optional - repeated k8s.io.api.core.v1.Toleration tolerations = 5; + optional int64 terminationGracePeriodSeconds = 4; - // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. + // Set DNS policy for the pod. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. // +optional - repeated k8s.io.api.core.v1.LocalObjectReference imagePullSecrets = 6; + optional string dnsPolicy = 6; - // If specified, indicates the pod's priority. "system-node-critical" and - // "system-cluster-critical" are two special keywords which indicate the - // highest priorities with the former being the highest priority. Any other - // name must be defined by creating a PriorityClass object with that name. - // If not specified, the pod priority will be default or zero if there is no - // default. + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ // +optional - optional string priorityClassName = 7; + // +mapType=atomic + map nodeSelector = 7; - // The priority value. Various system components use this field to find the - // priority of the pod. When Priority Admission Controller is enabled, it - // prevents users from setting this field. The admission controller populates - // this field from PriorityClassName. - // The higher the value, the higher the priority. + // ServiceAccountName is the name of the ServiceAccount to use to run this pod. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ // +optional - optional int32 priority = 8; + optional string serviceAccountName = 8; // Host networking requested for this pod. Use the host's network namespace. // If this option is set, the ports that will be used must be specified. // Default to false. // +k8s:conversion-gen=false // +optional - optional bool hostNetwork = 9; + optional bool hostNetwork = 11; // Use the host's pid namespace. // Optional: Default to false. // +k8s:conversion-gen=false // +optional - optional bool hostPID = 10; + optional bool hostPID = 12; // Use the host's ipc namespace. // Optional: Default to false. // +k8s:conversion-gen=false // +optional - optional bool hostIPC = 11; + optional bool hostIPC = 13; // Share a single process namespace between all of the containers in a pod. // When this is set containers will be able to view and signal processes from other containers @@ -382,38 +435,70 @@ message PodSpec { // Optional: Default to false. // +k8s:conversion-gen=false // +optional - optional bool shareProcessNamespace = 12; + optional bool shareProcessNamespace = 27; // SecurityContext holds pod-level security attributes and common container settings. // Optional: Defaults to empty. See type description for default values of each field. // +optional - optional k8s.io.api.core.v1.PodSecurityContext securityContext = 13; + optional k8s.io.api.core.v1.PodSecurityContext securityContext = 14; - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates stop immediately via - // the kill signal (no opportunity to shut down). - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // Defaults to 30 seconds. + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. + // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod // +optional - optional int64 terminationGracePeriodSeconds = 14; + // +patchMergeKey=name + // +patchStrategy=merge + repeated k8s.io.api.core.v1.LocalObjectReference imagePullSecrets = 15; - // Set DNS policy for the pod. - // Defaults to "ClusterFirst". - // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. - // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. - // To have DNS options set along with hostNetwork, you have to specify DNS policy - // explicitly to 'ClusterFirstWithHostNet'. + // If specified, the pod's scheduling constraints // +optional - optional string dnsPolicy = 15; + optional k8s.io.api.core.v1.Affinity affinity = 18; + + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + optional string schedulerName = 19; + + // If specified, the pod's tolerations. + // +optional + repeated k8s.io.api.core.v1.Toleration tolerations = 22; + + // If specified, indicates the pod's priority. "system-node-critical" and + // "system-cluster-critical" are two special keywords which indicate the + // highest priorities with the former being the highest priority. Any other + // name must be defined by creating a PriorityClass object with that name. + // If not specified, the pod priority will be default or zero if there is no + // default. + // +optional + optional string priorityClassName = 24; + + // The priority value. Various system components use this field to find the + // priority of the pod. When Priority Admission Controller is enabled, it + // prevents users from setting this field. The admission controller populates + // this field from PriorityClassName. + // The higher the value, the higher the priority. + // +optional + optional int32 priority = 25; // Specifies the DNS parameters of a pod. // Parameters specified here will be merged to the generated DNS // configuration based on DNSPolicy. // +optional - optional k8s.io.api.core.v1.PodDNSConfig dnsConfig = 16; + optional k8s.io.api.core.v1.PodDNSConfig dnsConfig = 26; + + // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + // empty definition that uses the default runtime handler. + // More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + // +optional + optional string runtimeClassName = 29; + + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // Optional: Defaults to true. + // +optional + optional bool enableServiceLinks = 30; // TopologySpreadConstraints describes how a group of pods ought to spread across topology // domains. Scheduler will schedule pods in a way which abides by the constraints. @@ -424,31 +509,7 @@ message PodSpec { // +listType=map // +listMapKey=topologyKey // +listMapKey=whenUnsatisfiable - repeated k8s.io.api.core.v1.TopologySpreadConstraint topologySpreadConstraints = 17; - - // List of volumes that can be mounted by containers belonging to the pod. - // More info: https://kubernetes.io/docs/concepts/storage/volumes - // +optional - // +patchMergeKey=name - // +patchStrategy=merge,retainKeys - repeated k8s.io.api.core.v1.Volume volumes = 18; - - // List of initialization containers belonging to the pod. - // Init containers are executed in order prior to containers being started. If any - // init container fails, the pod is considered to have failed and is handled according - // to its restartPolicy. The name for an init container or normal container must be - // unique among all containers. - // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. - // The resourceRequirements of an init container are taken into account during scheduling - // by finding the highest request/limit for each resource type, and then using the max of - // of that value or the sum of the normal containers. Limits are applied to init containers - // in a similar fashion. - // Init containers cannot currently be added or removed. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - // +patchMergeKey=name - // +patchStrategy=merge - repeated k8s.io.api.core.v1.Container initContainers = 19; + repeated k8s.io.api.core.v1.TopologySpreadConstraint topologySpreadConstraints = 33; // Arguments to the entrypoint. // The docker image's CMD is used if this is not provided. @@ -459,16 +520,16 @@ message PodSpec { // Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional - repeated string args = 20; + repeated string args = 1001; // List of environment variables to set in the container. // Cannot be updated. // +optional - repeated k8s.io.api.core.v1.EnvVar env = 21; + repeated k8s.io.api.core.v1.EnvVar env = 1002; // Compute Resources required by the sidecar container. // +optional - optional k8s.io.api.core.v1.ResourceRequirements resources = 22; + optional k8s.io.api.core.v1.ResourceRequirements resources = 1003; // Periodic probe of container liveness. // Container will be restarted if the probe fails. @@ -477,7 +538,7 @@ message PodSpec { // Cannot be updated. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional - optional k8s.io.api.core.v1.Probe livenessProbe = 23; + optional k8s.io.api.core.v1.Probe livenessProbe = 1004; // Periodic probe of container service readiness. // Container will be removed from service endpoints if the probe fails. @@ -486,25 +547,25 @@ message PodSpec { // To ignore defaulting, set the value to empty ReadynessProbe "{}". // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional - optional k8s.io.api.core.v1.Probe readinessProbe = 24; + optional k8s.io.api.core.v1.Probe readinessProbe = 1005; // Actions that the management system should take in response to container lifecycle events. // Cannot be updated. // +optional - optional k8s.io.api.core.v1.Lifecycle lifecycle = 25; + optional k8s.io.api.core.v1.Lifecycle lifecycle = 1006; // Security options the pod should run with. // More info: https://kubernetes.io/docs/concepts/policy/security-context/ // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ // +optional - optional k8s.io.api.core.v1.SecurityContext containerSecurityContext = 26; + optional k8s.io.api.core.v1.SecurityContext containerSecurityContext = 1007; // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional // +patchMergeKey=mountPath // +patchStrategy=merge - repeated k8s.io.api.core.v1.VolumeMount volumeMounts = 27; + repeated k8s.io.api.core.v1.VolumeMount volumeMounts = 1008; } // PodTemplateSpec describes the data a pod should have when created from a template @@ -647,3 +708,176 @@ message ServiceTemplateSpec { optional ServiceSpec spec = 2; } +// Volume represents a named volume in a pod that may be accessed by any container in the pod. +message Volume { + // name of the volume. + // Must be a DNS_LABEL and unique within the pod. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + optional string name = 1; + + // volumeSource represents the location and type of the mounted volume. + // If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + optional VolumeSource volumeSource = 2; +} + +// Represents the source of a volume to mount. +// Only one of its members may be specified. +message VolumeSource { + // hostPath represents a pre-existing file or directory on the host + // machine that is directly exposed to the container. This is generally + // used for system agents or other privileged things that are allowed + // to see the host machine. Most containers will NOT need this. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + optional k8s.io.api.core.v1.HostPathVolumeSource hostPath = 1; + + // emptyDir represents a temporary directory that shares a pod's lifetime. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + // +optional + optional k8s.io.api.core.v1.EmptyDirVolumeSource emptyDir = 2; + + // gcePersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + optional k8s.io.api.core.v1.GCEPersistentDiskVolumeSource gcePersistentDisk = 3; + + // awsElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // +optional + optional k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4; + + // secret represents a secret that should populate this volume. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + optional k8s.io.api.core.v1.SecretVolumeSource secret = 6; + + // nfs represents an NFS mount on the host that shares a pod's lifetime + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + // +optional + optional k8s.io.api.core.v1.NFSVolumeSource nfs = 7; + + // iscsi represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://examples.k8s.io/volumes/iscsi/README.md + // +optional + optional k8s.io.api.core.v1.ISCSIVolumeSource iscsi = 8; + + // glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md + // +optional + optional k8s.io.api.core.v1.GlusterfsVolumeSource glusterfs = 9; + + // persistentVolumeClaimVolumeSource represents a reference to a + // PersistentVolumeClaim in the same namespace. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + optional k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; + + // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: https://examples.k8s.io/volumes/rbd/README.md + // +optional + optional k8s.io.api.core.v1.RBDVolumeSource rbd = 11; + + // flexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. + // +optional + optional k8s.io.api.core.v1.FlexVolumeSource flexVolume = 12; + + // cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // +optional + optional k8s.io.api.core.v1.CinderVolumeSource cinder = 13; + + // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + optional k8s.io.api.core.v1.CephFSVolumeSource cephfs = 14; + + // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + optional k8s.io.api.core.v1.FlockerVolumeSource flocker = 15; + + // downwardAPI represents downward API about the pod that should populate this volume + // +optional + optional k8s.io.api.core.v1.DownwardAPIVolumeSource downwardAPI = 16; + + // fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + optional k8s.io.api.core.v1.FCVolumeSource fc = 17; + + // azureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + optional k8s.io.api.core.v1.AzureFileVolumeSource azureFile = 18; + + // configMap represents a configMap that should populate this volume + // +optional + optional k8s.io.api.core.v1.ConfigMapVolumeSource configMap = 19; + + // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + optional k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource vsphereVolume = 20; + + // quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + optional k8s.io.api.core.v1.QuobyteVolumeSource quobyte = 21; + + // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + optional k8s.io.api.core.v1.AzureDiskVolumeSource azureDisk = 22; + + // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + optional k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource photonPersistentDisk = 23; + + // projected items for all in one resources secrets, configmaps, and downward API + optional k8s.io.api.core.v1.ProjectedVolumeSource projected = 26; + + // portworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + optional k8s.io.api.core.v1.PortworxVolumeSource portworxVolume = 24; + + // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + optional k8s.io.api.core.v1.ScaleIOVolumeSource scaleIO = 25; + + // storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + // +optional + optional k8s.io.api.core.v1.StorageOSVolumeSource storageos = 27; + + // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). + // +optional + optional k8s.io.api.core.v1.CSIVolumeSource csi = 28; + + // ephemeral represents a volume that is handled by a cluster storage driver. + // The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + // and deleted when the pod is removed. + // + // Use this if: + // a) the volume is only needed while the pod runs, + // b) features of normal volumes like restoring from snapshot or capacity + // tracking are needed, + // c) the storage driver is specified through a storage class, and + // d) the storage driver supports dynamic volume provisioning through + // a PersistentVolumeClaim (see EphemeralVolumeSource for more + // information on the connection between this volume type + // and PersistentVolumeClaim). + // + // Use PersistentVolumeClaim or one of the vendor-specific + // APIs for volumes that persist for longer than the lifecycle + // of an individual pod. + // + // Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + // be used that way - see the documentation of the driver for + // more information. + // + // A pod can use both types of ephemeral volumes and + // persistent volumes at the same time. + // + // +optional + optional EphemeralVolumeSource ephemeral = 29; +} + diff --git a/vendor/kmodules.xyz/offshoot-api/api/v1/openapi_generated.go b/vendor/kmodules.xyz/offshoot-api/api/v1/openapi_generated.go index ae9976dd..c861302c 100644 --- a/vendor/kmodules.xyz/offshoot-api/api/v1/openapi_generated.go +++ b/vendor/kmodules.xyz/offshoot-api/api/v1/openapi_generated.go @@ -30,19 +30,23 @@ import ( func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { return map[string]common.OpenAPIDefinition{ - "kmodules.xyz/offshoot-api/api/v1.ContainerRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref), - "kmodules.xyz/offshoot-api/api/v1.IONiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref), - "kmodules.xyz/offshoot-api/api/v1.NiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_NiceSettings(ref), - "kmodules.xyz/offshoot-api/api/v1.ObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_ObjectMeta(ref), - "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_PartialObjectMeta(ref), - "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaim": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref), - "kmodules.xyz/offshoot-api/api/v1.PodRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref), - "kmodules.xyz/offshoot-api/api/v1.PodSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref), - "kmodules.xyz/offshoot-api/api/v1.PodTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodTemplateSpec(ref), - "kmodules.xyz/offshoot-api/api/v1.RuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_RuntimeSettings(ref), - "kmodules.xyz/offshoot-api/api/v1.ServicePort": schema_kmodulesxyz_offshoot_api_api_v1_ServicePort(ref), - "kmodules.xyz/offshoot-api/api/v1.ServiceSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceSpec(ref), - "kmodules.xyz/offshoot-api/api/v1.ServiceTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref), + "kmodules.xyz/offshoot-api/api/v1.ContainerRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref), + "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref), + "kmodules.xyz/offshoot-api/api/v1.IONiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref), + "kmodules.xyz/offshoot-api/api/v1.NiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_NiceSettings(ref), + "kmodules.xyz/offshoot-api/api/v1.ObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_ObjectMeta(ref), + "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_PartialObjectMeta(ref), + "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaim": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref), + "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref), + "kmodules.xyz/offshoot-api/api/v1.PodRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref), + "kmodules.xyz/offshoot-api/api/v1.PodSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref), + "kmodules.xyz/offshoot-api/api/v1.PodTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodTemplateSpec(ref), + "kmodules.xyz/offshoot-api/api/v1.RuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_RuntimeSettings(ref), + "kmodules.xyz/offshoot-api/api/v1.ServicePort": schema_kmodulesxyz_offshoot_api_api_v1_ServicePort(ref), + "kmodules.xyz/offshoot-api/api/v1.ServiceSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceSpec(ref), + "kmodules.xyz/offshoot-api/api/v1.ServiceTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref), + "kmodules.xyz/offshoot-api/api/v1.Volume": schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref), + "kmodules.xyz/offshoot-api/api/v1.VolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref), } } @@ -137,6 +141,27 @@ func schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref common. } } +func schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents an ephemeral volume that is handled by a normal storage driver.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "volumeClaimTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"}, + } +} + func schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -364,6 +389,36 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref common.Ref } } +func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"), + }, + }, + }, + Required: []string{"spec"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"}, + } +} + func schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -567,84 +622,88 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback SchemaProps: spec.SchemaProps{ Type: []string{"object"}, Properties: map[string]spec.Schema{ - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", - Type: []string{"string"}, - Format: "", + "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, }, - }, - "nodeSelector": { SchemaProps: spec.SchemaProps{ - Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, + Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/offshoot-api/api/v1.Volume"), }, }, }, }, }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's scheduling constraints", - Ref: ref("k8s.io/api/core/v1.Affinity"), + "initContainers": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, }, - }, - "schedulerName": { SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", - Type: []string{"string"}, - Format: "", - }, - }, - "tolerations": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's tolerations.", + Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Toleration"), + Ref: ref("k8s.io/api/core/v1.Container"), }, }, }, }, }, - "imagePullSecrets": { + "terminationGracePeriodSeconds": { SchemaProps: spec.SchemaProps{ - Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ + Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "dnsPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + Type: []string{"string"}, + Format: "", + }, + }, + "nodeSelector": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, - "priorityClassName": { + "serviceAccountName": { SchemaProps: spec.SchemaProps{ - Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", Type: []string{"string"}, Format: "", }, }, - "priority": { - SchemaProps: spec.SchemaProps{ - Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", - Type: []string{"integer"}, - Format: "int32", - }, - }, "hostNetwork": { SchemaProps: spec.SchemaProps{ Description: "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", @@ -679,86 +738,107 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), }, }, - "terminationGracePeriodSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "dnsPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", - Type: []string{"string"}, - Format: "", - }, - }, - "dnsConfig": { - SchemaProps: spec.SchemaProps{ - Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", - Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), - }, - }, - "topologySpreadConstraints": { + "imagePullSecrets": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []interface{}{ - "topologyKey", - "whenUnsatisfiable", - }, - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "topologyKey", + "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge", }, }, SchemaProps: spec.SchemaProps{ - Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", + Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"), + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), }, }, }, }, }, - "volumes": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge,retainKeys", - }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's scheduling constraints", + Ref: ref("k8s.io/api/core/v1.Affinity"), }, + }, + "schedulerName": { SchemaProps: spec.SchemaProps{ - Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", + Type: []string{"string"}, + Format: "", + }, + }, + "tolerations": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's tolerations.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Volume"), + Ref: ref("k8s.io/api/core/v1.Toleration"), }, }, }, }, }, - "initContainers": { + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Type: []string{"string"}, + Format: "", + }, + }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "dnsConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), + }, + }, + "runtimeClassName": { + SchemaProps: spec.SchemaProps{ + Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", + Type: []string{"string"}, + Format: "", + }, + }, + "enableServiceLinks": { + SchemaProps: spec.SchemaProps{ + Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "topologySpreadConstraints": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-list-map-keys": []interface{}{ + "topologyKey", + "whenUnsatisfiable", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "topologyKey", "x-kubernetes-patch-strategy": "merge", }, }, SchemaProps: spec.SchemaProps{ - Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Container"), + Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"), }, }, }, @@ -848,7 +928,7 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback }, }, Dependencies: []string{ - "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.Volume", "k8s.io/api/core/v1.VolumeMount"}, + "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.VolumeMount", "kmodules.xyz/offshoot-api/api/v1.Volume"}, } } @@ -1082,3 +1162,378 @@ func schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref common.Refer "kmodules.xyz/offshoot-api/api/v1.ObjectMeta", "kmodules.xyz/offshoot-api/api/v1.ServiceSpec"}, } } + +func schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Volume represents a named volume in a pod that may be accessed by any container in the pod.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "downwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "configMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "projected items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + "ephemeral": { + SchemaProps: spec.SchemaProps{ + Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"}, + } +} + +func schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents the source of a volume to mount. Only one of its members may be specified.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "downwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "configMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "projected items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + "ephemeral": { + SchemaProps: spec.SchemaProps{ + Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"}, + } +} diff --git a/vendor/kmodules.xyz/offshoot-api/api/v1/pvc.go b/vendor/kmodules.xyz/offshoot-api/api/v1/pvc.go index ab83f5a6..9e81d735 100644 --- a/vendor/kmodules.xyz/offshoot-api/api/v1/pvc.go +++ b/vendor/kmodules.xyz/offshoot-api/api/v1/pvc.go @@ -106,7 +106,7 @@ type PersistentVolumeClaim struct { Status core.PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } -func (in *PersistentVolumeClaim) ToCorePVC() *core.PersistentVolumeClaim { +func (in *PersistentVolumeClaim) ToAPIObject() *core.PersistentVolumeClaim { if in == nil { return nil } @@ -123,3 +123,274 @@ func (in *PersistentVolumeClaim) ToCorePVC() *core.PersistentVolumeClaim { Status: in.Status, } } + +// PersistentVolumeClaimTemplate is used to produce +// PersistentVolumeClaim objects as part of an EphemeralVolumeSource. +type PersistentVolumeClaimTemplate struct { + // May contain labels and annotations that will be copied into the PVC + // when creating it. No other fields are allowed and will be rejected during + // validation. + // + // +optional + PartialObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The specification for the PersistentVolumeClaim. The entire content is + // copied unchanged into the PVC that gets created from this + // template. The same fields as in a PersistentVolumeClaim + // are also valid here. + Spec core.PersistentVolumeClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +func (in *PersistentVolumeClaimTemplate) ToAPIObject() *core.PersistentVolumeClaimTemplate { + if in == nil { + return nil + } + return &core.PersistentVolumeClaimTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: in.Name, + GenerateName: in.GenerateName, + Namespace: in.Namespace, + Labels: in.Labels, + Annotations: in.Annotations, + OwnerReferences: in.OwnerReferences, + }, + Spec: in.Spec, + } +} + +// Volume represents a named volume in a pod that may be accessed by any container in the pod. +type Volume struct { + // name of the volume. + // Must be a DNS_LABEL and unique within the pod. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // volumeSource represents the location and type of the mounted volume. + // If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"` +} + +func (in *Volume) ToAPIObject() *core.Volume { + if in == nil { + return nil + } + return &core.Volume{ + Name: in.Name, + VolumeSource: *in.VolumeSource.ToAPIObject(), + } +} + +// Represents the source of a volume to mount. +// Only one of its members may be specified. +type VolumeSource struct { + // hostPath represents a pre-existing file or directory on the host + // machine that is directly exposed to the container. This is generally + // used for system agents or other privileged things that are allowed + // to see the host machine. Most containers will NOT need this. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + HostPath *core.HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"` + // emptyDir represents a temporary directory that shares a pod's lifetime. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + // +optional + EmptyDir *core.EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"` + // gcePersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + GCEPersistentDisk *core.GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"` + // awsElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // +optional + AWSElasticBlockStore *core.AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"` + // secret represents a secret that should populate this volume. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + Secret *core.SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"` + // nfs represents an NFS mount on the host that shares a pod's lifetime + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + // +optional + NFS *core.NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` + // iscsi represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://examples.k8s.io/volumes/iscsi/README.md + // +optional + ISCSI *core.ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` + // glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md + // +optional + Glusterfs *core.GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` + // persistentVolumeClaimVolumeSource represents a reference to a + // PersistentVolumeClaim in the same namespace. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + PersistentVolumeClaim *core.PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` + // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: https://examples.k8s.io/volumes/rbd/README.md + // +optional + RBD *core.RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` + // flexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. + // +optional + FlexVolume *core.FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` + // cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // +optional + Cinder *core.CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` + // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *core.CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"` + // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *core.FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"` + // downwardAPI represents downward API about the pod that should populate this volume + // +optional + DownwardAPI *core.DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"` + // fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *core.FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"` + // azureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *core.AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"` + // configMap represents a configMap that should populate this volume + // +optional + ConfigMap *core.ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"` + // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *core.VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"` + // quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *core.QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"` + // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *core.AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"` + // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *core.PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"` + // projected items for all in one resources secrets, configmaps, and downward API + Projected *core.ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"` + // portworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *core.PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"` + // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *core.ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"` + // storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + // +optional + StorageOS *core.StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"` + // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). + // +optional + CSI *core.CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,28,opt,name=csi"` + // ephemeral represents a volume that is handled by a cluster storage driver. + // The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + // and deleted when the pod is removed. + // + // Use this if: + // a) the volume is only needed while the pod runs, + // b) features of normal volumes like restoring from snapshot or capacity + // tracking are needed, + // c) the storage driver is specified through a storage class, and + // d) the storage driver supports dynamic volume provisioning through + // a PersistentVolumeClaim (see EphemeralVolumeSource for more + // information on the connection between this volume type + // and PersistentVolumeClaim). + // + // Use PersistentVolumeClaim or one of the vendor-specific + // APIs for volumes that persist for longer than the lifecycle + // of an individual pod. + // + // Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + // be used that way - see the documentation of the driver for + // more information. + // + // A pod can use both types of ephemeral volumes and + // persistent volumes at the same time. + // + // +optional + Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"` +} + +func (in *VolumeSource) ToAPIObject() *core.VolumeSource { + if in == nil { + return nil + } + return &core.VolumeSource{ + HostPath: in.HostPath, + EmptyDir: in.EmptyDir, + GCEPersistentDisk: in.GCEPersistentDisk, + AWSElasticBlockStore: in.AWSElasticBlockStore, + Secret: in.Secret, + NFS: in.NFS, + ISCSI: in.ISCSI, + Glusterfs: in.Glusterfs, + PersistentVolumeClaim: in.PersistentVolumeClaim, + RBD: in.RBD, + FlexVolume: in.FlexVolume, + Cinder: in.Cinder, + CephFS: in.CephFS, + Flocker: in.Flocker, + DownwardAPI: in.DownwardAPI, + FC: in.FC, + AzureFile: in.AzureFile, + ConfigMap: in.ConfigMap, + VsphereVolume: in.VsphereVolume, + Quobyte: in.Quobyte, + AzureDisk: in.AzureDisk, + PhotonPersistentDisk: in.PhotonPersistentDisk, + Projected: in.Projected, + PortworxVolume: in.PortworxVolume, + ScaleIO: in.ScaleIO, + StorageOS: in.StorageOS, + CSI: in.CSI, + Ephemeral: in.Ephemeral.ToAPIObject(), + } +} + +// Represents an ephemeral volume that is handled by a normal storage driver. +type EphemeralVolumeSource struct { + // Will be used to create a stand-alone PVC to provision the volume. + // The pod in which this EphemeralVolumeSource is embedded will be the + // owner of the PVC, i.e. the PVC will be deleted together with the + // pod. The name of the PVC will be `-` where + // `` is the name from the `PodSpec.Volumes` array + // entry. Pod validation will reject the pod if the concatenated name + // is not valid for a PVC (for example, too long). + // + // An existing PVC with that name that is not owned by the pod + // will *not* be used for the pod to avoid using an unrelated + // volume by mistake. Starting the pod is then blocked until + // the unrelated PVC is removed. If such a pre-created PVC is + // meant to be used by the pod, the PVC has to updated with an + // owner reference to the pod once the pod exists. Normally + // this should not be necessary, but it may be useful when + // manually reconstructing a broken cluster. + // + // This field is read-only and no changes will be made by Kubernetes + // to the PVC after it has been created. + // + // Required, must not be nil. + VolumeClaimTemplate *PersistentVolumeClaimTemplate `json:"volumeClaimTemplate,omitempty" protobuf:"bytes,1,opt,name=volumeClaimTemplate"` + + // ReadOnly is tombstoned to show why 2 is a reserved protobuf tag. + // ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` +} + +func (in *EphemeralVolumeSource) ToAPIObject() *core.EphemeralVolumeSource { + if in == nil { + return nil + } + return &core.EphemeralVolumeSource{ + VolumeClaimTemplate: in.VolumeClaimTemplate.ToAPIObject(), + } +} + +func ConvertVolumes(in []Volume) []core.Volume { + out := make([]core.Volume, 0, len(in)) + for _, v := range in { + out = append(out, *v.ToAPIObject()) + } + return out +} diff --git a/vendor/kmodules.xyz/offshoot-api/api/v1/types.go b/vendor/kmodules.xyz/offshoot-api/api/v1/types.go index 516ada4f..0b374eae 100644 --- a/vendor/kmodules.xyz/offshoot-api/api/v1/types.go +++ b/vendor/kmodules.xyz/offshoot-api/api/v1/types.go @@ -57,70 +57,80 @@ type PodTemplateSpec struct { } type PodSpec struct { - // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - // +optional - ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,1,opt,name=serviceAccountName"` - - // NodeSelector is a selector which must be true for the pod to fit on a node. - // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - // +optional - NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,2,rep,name=nodeSelector"` - - // If specified, the pod's scheduling constraints + // List of volumes that can be mounted by containers belonging to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes // +optional - Affinity *core.Affinity `json:"affinity,omitempty" protobuf:"bytes,3,opt,name=affinity"` + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` - // If specified, the pod will be dispatched by specified scheduler. - // If not specified, the pod will be dispatched by default scheduler. - // +optional - SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,4,opt,name=schedulerName"` + // List of initialization containers belonging to the pod. + // Init containers are executed in order prior to containers being started. If any + // init container fails, the pod is considered to have failed and is handled according + // to its restartPolicy. The name for an init container or normal container must be + // unique among all containers. + // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // The resourceRequirements of an init container are taken into account during scheduling + // by finding the highest request/limit for each resource type, and then using the max of + // of that value or the sum of the normal containers. Limits are applied to init containers + // in a similar fashion. + // Init containers cannot currently be added or removed. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // +patchMergeKey=name + // +patchStrategy=merge + InitContainers []core.Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"` - // If specified, the pod's tolerations. + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates stop immediately via + // the kill signal (no opportunity to shut down). + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. // +optional - Tolerations []core.Toleration `json:"tolerations,omitempty" protobuf:"bytes,5,rep,name=tolerations"` + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"` - // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. + // Set DNS policy for the pod. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. // +optional - ImagePullSecrets []core.LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,6,rep,name=imagePullSecrets"` + DNSPolicy core.DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"` - // If specified, indicates the pod's priority. "system-node-critical" and - // "system-cluster-critical" are two special keywords which indicate the - // highest priorities with the former being the highest priority. Any other - // name must be defined by creating a PriorityClass object with that name. - // If not specified, the pod priority will be default or zero if there is no - // default. + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ // +optional - PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,7,opt,name=priorityClassName"` + // +mapType=atomic + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` - // The priority value. Various system components use this field to find the - // priority of the pod. When Priority Admission Controller is enabled, it - // prevents users from setting this field. The admission controller populates - // this field from PriorityClassName. - // The higher the value, the higher the priority. + // ServiceAccountName is the name of the ServiceAccount to use to run this pod. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ // +optional - Priority *int32 `json:"priority,omitempty" protobuf:"varint,8,opt,name=priority"` + ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` // Host networking requested for this pod. Use the host's network namespace. // If this option is set, the ports that will be used must be specified. // Default to false. // +k8s:conversion-gen=false // +optional - HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,9,opt,name=hostNetwork"` + HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"` // Use the host's pid namespace. // Optional: Default to false. // +k8s:conversion-gen=false // +optional - HostPID bool `json:"hostPID,omitempty" protobuf:"varint,10,opt,name=hostPID"` + HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"` // Use the host's ipc namespace. // Optional: Default to false. // +k8s:conversion-gen=false // +optional - HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,11,opt,name=hostIPC"` + HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"` // Share a single process namespace between all of the containers in a pod. // When this is set containers will be able to view and signal processes from other containers @@ -129,38 +139,70 @@ type PodSpec struct { // Optional: Default to false. // +k8s:conversion-gen=false // +optional - ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,12,opt,name=shareProcessNamespace"` + ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"` // SecurityContext holds pod-level security attributes and common container settings. // Optional: Defaults to empty. See type description for default values of each field. // +optional - SecurityContext *core.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,13,opt,name=securityContext"` + SecurityContext *core.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"` - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates stop immediately via - // the kill signal (no opportunity to shut down). - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // Defaults to 30 seconds. + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. + // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod // +optional - TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,14,opt,name=terminationGracePeriodSeconds"` + // +patchMergeKey=name + // +patchStrategy=merge + ImagePullSecrets []core.LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"` - // Set DNS policy for the pod. - // Defaults to "ClusterFirst". - // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. - // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. - // To have DNS options set along with hostNetwork, you have to specify DNS policy - // explicitly to 'ClusterFirstWithHostNet'. + // If specified, the pod's scheduling constraints + // +optional + Affinity *core.Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"` + + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"` + + // If specified, the pod's tolerations. + // +optional + Tolerations []core.Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"` + + // If specified, indicates the pod's priority. "system-node-critical" and + // "system-cluster-critical" are two special keywords which indicate the + // highest priorities with the former being the highest priority. Any other + // name must be defined by creating a PriorityClass object with that name. + // If not specified, the pod priority will be default or zero if there is no + // default. // +optional - DNSPolicy core.DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,15,opt,name=dnsPolicy,casttype=k8s.io/api/core/v1.DNSPolicy"` + PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"` + + // The priority value. Various system components use this field to find the + // priority of the pod. When Priority Admission Controller is enabled, it + // prevents users from setting this field. The admission controller populates + // this field from PriorityClassName. + // The higher the value, the higher the priority. + // +optional + Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"` // Specifies the DNS parameters of a pod. // Parameters specified here will be merged to the generated DNS // configuration based on DNSPolicy. // +optional - DNSConfig *core.PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,16,opt,name=dnsConfig"` + DNSConfig *core.PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"` + + // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + // empty definition that uses the default runtime handler. + // More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + // +optional + RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"` + + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // Optional: Defaults to true. + // +optional + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"` // TopologySpreadConstraints describes how a group of pods ought to spread across topology // domains. Scheduler will schedule pods in a way which abides by the constraints. @@ -171,31 +213,7 @@ type PodSpec struct { // +listType=map // +listMapKey=topologyKey // +listMapKey=whenUnsatisfiable - TopologySpreadConstraints []core.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey" protobuf:"bytes,17,rep,name=topologySpreadConstraints"` - - // List of volumes that can be mounted by containers belonging to the pod. - // More info: https://kubernetes.io/docs/concepts/storage/volumes - // +optional - // +patchMergeKey=name - // +patchStrategy=merge,retainKeys - Volumes []core.Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,18,rep,name=volumes"` - - // List of initialization containers belonging to the pod. - // Init containers are executed in order prior to containers being started. If any - // init container fails, the pod is considered to have failed and is handled according - // to its restartPolicy. The name for an init container or normal container must be - // unique among all containers. - // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. - // The resourceRequirements of an init container are taken into account during scheduling - // by finding the highest request/limit for each resource type, and then using the max of - // of that value or the sum of the normal containers. Limits are applied to init containers - // in a similar fashion. - // Init containers cannot currently be added or removed. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - // +patchMergeKey=name - // +patchStrategy=merge - InitContainers []core.Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,19,rep,name=initContainers"` + TopologySpreadConstraints []core.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey" protobuf:"bytes,33,opt,name=topologySpreadConstraints"` //////////////////////////////////////////////////////// // Application (database) Container Specific Settings // @@ -210,16 +228,16 @@ type PodSpec struct { // Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional - Args []string `json:"args,omitempty" protobuf:"bytes,20,rep,name=args"` + Args []string `json:"args,omitempty" protobuf:"bytes,1001,rep,name=args"` // List of environment variables to set in the container. // Cannot be updated. // +optional - Env []core.EnvVar `json:"env,omitempty" protobuf:"bytes,21,rep,name=env"` + Env []core.EnvVar `json:"env,omitempty" protobuf:"bytes,1002,rep,name=env"` // Compute Resources required by the sidecar container. // +optional - Resources core.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,22,opt,name=resources"` + Resources core.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1003,opt,name=resources"` // Periodic probe of container liveness. // Container will be restarted if the probe fails. @@ -228,7 +246,7 @@ type PodSpec struct { // Cannot be updated. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional - LivenessProbe *core.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,23,opt,name=livenessProbe"` + LivenessProbe *core.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,1004,opt,name=livenessProbe"` // Periodic probe of container service readiness. // Container will be removed from service endpoints if the probe fails. @@ -237,25 +255,25 @@ type PodSpec struct { // To ignore defaulting, set the value to empty ReadynessProbe "{}". // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional - ReadinessProbe *core.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,24,opt,name=readinessProbe"` + ReadinessProbe *core.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,1005,opt,name=readinessProbe"` // Actions that the management system should take in response to container lifecycle events. // Cannot be updated. // +optional - Lifecycle *core.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,25,opt,name=lifecycle"` + Lifecycle *core.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,1006,opt,name=lifecycle"` // Security options the pod should run with. // More info: https://kubernetes.io/docs/concepts/policy/security-context/ // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ // +optional - ContainerSecurityContext *core.SecurityContext `json:"containerSecurityContext,omitempty" protobuf:"bytes,26,opt,name=containerSecurityContext"` + ContainerSecurityContext *core.SecurityContext `json:"containerSecurityContext,omitempty" protobuf:"bytes,1007,opt,name=containerSecurityContext"` // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional // +patchMergeKey=mountPath // +patchStrategy=merge - VolumeMounts []core.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,27,rep,name=volumeMounts"` + VolumeMounts []core.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,1008,rep,name=volumeMounts"` } // ServiceTemplateSpec describes the data a service should have when created from a template diff --git a/vendor/kmodules.xyz/offshoot-api/api/v1/zz_generated.deepcopy.go b/vendor/kmodules.xyz/offshoot-api/api/v1/zz_generated.deepcopy.go index ab4d240e..768c2551 100644 --- a/vendor/kmodules.xyz/offshoot-api/api/v1/zz_generated.deepcopy.go +++ b/vendor/kmodules.xyz/offshoot-api/api/v1/zz_generated.deepcopy.go @@ -87,6 +87,27 @@ func (in *ContainerRuntimeSettings) DeepCopy() *ContainerRuntimeSettings { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralVolumeSource) DeepCopyInto(out *EphemeralVolumeSource) { + *out = *in + if in.VolumeClaimTemplate != nil { + in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate + *out = new(PersistentVolumeClaimTemplate) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralVolumeSource. +func (in *EphemeralVolumeSource) DeepCopy() *EphemeralVolumeSource { + if in == nil { + return nil + } + out := new(EphemeralVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IONiceSettings) DeepCopyInto(out *IONiceSettings) { *out = *in @@ -221,6 +242,24 @@ func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimTemplate) DeepCopyInto(out *PersistentVolumeClaimTemplate) { + *out = *in + in.PartialObjectMeta.DeepCopyInto(&out.PartialObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimTemplate. +func (in *PersistentVolumeClaimTemplate) DeepCopy() *PersistentVolumeClaimTemplate { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimTemplate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodRuntimeSettings) DeepCopyInto(out *PodRuntimeSettings) { *out = *in @@ -315,6 +354,25 @@ func (in *PodRuntimeSettings) DeepCopy() *PodRuntimeSettings { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = *in + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } if in.NodeSelector != nil { in, out := &in.NodeSelector, &out.NodeSelector *out = make(map[string]string, len(*in)) @@ -322,6 +380,21 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { (*out)[key] = val } } + if in.ShareProcessNamespace != nil { + in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace + *out = new(bool) + **out = **in + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity *out = new(corev1.Affinity) @@ -334,36 +407,26 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]corev1.LocalObjectReference, len(*in)) - copy(*out, *in) - } if in.Priority != nil { in, out := &in.Priority, &out.Priority *out = new(int32) **out = **in } - if in.ShareProcessNamespace != nil { - in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace - *out = new(bool) - **out = **in - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(corev1.PodSecurityContext) - (*in).DeepCopyInto(*out) - } - if in.TerminationGracePeriodSeconds != nil { - in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds - *out = new(int64) - **out = **in - } if in.DNSConfig != nil { in, out := &in.DNSConfig, &out.DNSConfig *out = new(corev1.PodDNSConfig) (*in).DeepCopyInto(*out) } + if in.RuntimeClassName != nil { + in, out := &in.RuntimeClassName, &out.RuntimeClassName + *out = new(string) + **out = **in + } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } if in.TopologySpreadConstraints != nil { in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints *out = make([]corev1.TopologySpreadConstraint, len(*in)) @@ -371,20 +434,6 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]corev1.Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]corev1.Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } if in.Args != nil { in, out := &in.Args, &out.Args *out = make([]string, len(*in)) @@ -552,3 +601,176 @@ func (in *ServiceTemplateSpec) DeepCopy() *ServiceTemplateSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + in.VolumeSource.DeepCopyInto(&out.VolumeSource) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { + *out = *in + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(corev1.HostPathVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(corev1.EmptyDirVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(corev1.GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(corev1.AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(corev1.SecretVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(corev1.NFSVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(corev1.ISCSIVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(corev1.GlusterfsVolumeSource) + **out = **in + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(corev1.PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(corev1.RBDVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(corev1.FlexVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(corev1.CinderVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(corev1.CephFSVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(corev1.FlockerVolumeSource) + **out = **in + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(corev1.DownwardAPIVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(corev1.FCVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(corev1.AzureFileVolumeSource) + **out = **in + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(corev1.ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(corev1.VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(corev1.QuobyteVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(corev1.AzureDiskVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(corev1.PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(corev1.ProjectedVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(corev1.PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(corev1.ScaleIOVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + *out = new(corev1.StorageOSVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(corev1.CSIVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Ephemeral != nil { + in, out := &in.Ephemeral, &out.Ephemeral + *out = new(EphemeralVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource. +func (in *VolumeSource) DeepCopy() *VolumeSource { + if in == nil { + return nil + } + out := new(VolumeSource) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/kmodules.xyz/resource-metrics/Makefile b/vendor/kmodules.xyz/resource-metrics/Makefile index db3218fb..18741c05 100644 --- a/vendor/kmodules.xyz/resource-metrics/Makefile +++ b/vendor/kmodules.xyz/resource-metrics/Makefile @@ -43,7 +43,7 @@ endif ### These variables should not need tweaking. ### -SRC_PKGS := api apps batch core kubedb.com testing +SRC_PKGS := api apps batch core kubedb.com SRC_DIRS := $(SRC_PKGS) *.go # directories which hold app source (not vendored) DOCKER_PLATFORMS := linux/amd64 linux/arm linux/arm64 @@ -53,8 +53,8 @@ BIN_PLATFORMS := $(DOCKER_PLATFORMS) windows/amd64 darwin/amd64 darwin/arm64 OS := $(if $(GOOS),$(GOOS),$(shell go env GOOS)) ARCH := $(if $(GOARCH),$(GOARCH),$(shell go env GOARCH)) -GO_VERSION ?= 1.19 -BUILD_IMAGE ?= appscode/golang-dev:$(GO_VERSION) +GO_VERSION ?= 1.20 +BUILD_IMAGE ?= ghcr.io/appscode/golang-dev:$(GO_VERSION) OUTBIN = bin/$(BIN)-$(OS)-$(ARCH) ifeq ($(OS),windows) diff --git a/vendor/kmodules.xyz/resource-metrics/calculator.go b/vendor/kmodules.xyz/resource-metrics/calculator.go index 8c507173..90f9f94f 100644 --- a/vendor/kmodules.xyz/resource-metrics/calculator.go +++ b/vendor/kmodules.xyz/resource-metrics/calculator.go @@ -23,6 +23,7 @@ import ( _ "kmodules.xyz/resource-metrics/batch/v1beta1" _ "kmodules.xyz/resource-metrics/core/v1" _ "kmodules.xyz/resource-metrics/kubedb.com/v1alpha2" + _ "kmodules.xyz/resource-metrics/kubevault.com/v1alpha2" core "k8s.io/api/core/v1" ) diff --git a/vendor/kmodules.xyz/resource-metrics/kubedb.com/v1alpha2/kafka.go b/vendor/kmodules.xyz/resource-metrics/kubedb.com/v1alpha2/kafka.go new file mode 100644 index 00000000..540afcd7 --- /dev/null +++ b/vendor/kmodules.xyz/resource-metrics/kubedb.com/v1alpha2/kafka.go @@ -0,0 +1,181 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + "fmt" + "reflect" + + "kmodules.xyz/resource-metrics/api" + + "gomodules.xyz/pointer" + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func init() { + api.Register(schema.GroupVersionKind{ + Group: "kubedb.com", + Version: "v1alpha2", + Kind: "Kafka", + }, Kafka{}.ResourceCalculator()) +} + +type Kafka struct{} + +func (r Kafka) ResourceCalculator() api.ResourceCalculator { + return &api.ResourceCalculatorFuncs{ + AppRoles: []api.PodRole{api.PodRoleDefault}, + RuntimeRoles: []api.PodRole{api.PodRoleDefault, api.PodRoleExporter}, + RoleReplicasFn: r.roleReplicasFn, + ModeFn: r.modeFn, + UsesTLSFn: r.usesTLSFn, + RoleResourceLimitsFn: r.roleResourceFn(api.ResourceLimits), + RoleResourceRequestsFn: r.roleResourceFn(api.ResourceRequests), + } +} + +func (r Kafka) roleReplicasFn(obj map[string]interface{}) (api.ReplicaList, error) { + result := api.ReplicaList{} + + topology, found, err := unstructured.NestedMap(obj, "spec", "topology") + if err != nil { + return nil, err + } + if found && topology != nil { + // dedicated topology mode + var replicas int64 = 0 + for role, roleSpec := range topology { + roleReplicas, found, err := unstructured.NestedInt64(roleSpec.(map[string]interface{}), "replicas") + if err != nil { + return nil, err + } + if found { + result[api.PodRole(role)] = roleReplicas + replicas += roleReplicas + } + } + result[api.PodRoleDefault] = replicas + } else { + // Combined mode + replicas, found, err := unstructured.NestedInt64(obj, "spec", "replicas") + if err != nil { + return nil, fmt.Errorf("failed to read spec.replicas %v: %w", obj, err) + } + if !found { + result[api.PodRoleDefault] = 1 + } else { + result[api.PodRoleDefault] = replicas + } + } + return result, nil +} + +func (r Kafka) modeFn(obj map[string]interface{}) (string, error) { + topology, found, err := unstructured.NestedFieldNoCopy(obj, "spec", "topology") + if err != nil { + return "", err + } + if found && !reflect.ValueOf(topology).IsNil() { + return "Dedicated", nil + } + return "Combined", nil +} + +func (r Kafka) usesTLSFn(obj map[string]interface{}) (bool, error) { + _, found, err := unstructured.NestedFieldNoCopy(obj, "spec", "enableSSL") + return found, err +} + +func (r Kafka) roleResourceFn(fn func(rr core.ResourceRequirements) core.ResourceList) func(obj map[string]interface{}) (map[api.PodRole]core.ResourceList, error) { + return func(obj map[string]interface{}) (map[api.PodRole]core.ResourceList, error) { + exporter, err := api.ContainerResources(obj, fn, "spec", "monitor", "prometheus", "exporter") + if err != nil { + return nil, err + } + + topology, found, err := unstructured.NestedMap(obj, "spec", "topology") + if err != nil { + return nil, err + } + if found && topology != nil { + var replicas int64 = 0 + var totalResources core.ResourceList + result := map[api.PodRole]core.ResourceList{} + + for role, roleSpec := range topology { + rolePerReplicaResources, roleReplicas, err := KafkaNodeResources(roleSpec.(map[string]interface{}), fn) + if err != nil { + return nil, err + } + + roleResources := api.MulResourceList(rolePerReplicaResources, roleReplicas) + result[api.PodRole(role)] = roleResources + totalResources = api.AddResourceList(totalResources, roleResources) + } + + result[api.PodRoleDefault] = totalResources + result[api.PodRoleExporter] = api.MulResourceList(exporter, replicas) + return result, nil + } + + // Kafka Combined + container, replicas, err := api.AppNodeResources(obj, fn, "spec") + if err != nil { + return nil, err + } + + return map[api.PodRole]core.ResourceList{ + api.PodRoleDefault: api.MulResourceList(container, replicas), + api.PodRoleExporter: api.MulResourceList(exporter, replicas), + }, nil + } +} + +type KafkaNode struct { + Replicas *int64 `json:"replicas,omitempty"` + Resources core.ResourceRequirements `json:"resources,omitempty"` + Storage core.PersistentVolumeClaimSpec `json:"storage,omitempty"` +} + +func KafkaNodeResources( + obj map[string]interface{}, + fn func(rr core.ResourceRequirements) core.ResourceList, + fields ...string, +) (core.ResourceList, int64, error) { + val, found, err := unstructured.NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return nil, 0, err + } + + var node KafkaNode + err = runtime.DefaultUnstructuredConverter.FromUnstructured(val.(map[string]interface{}), &node) + if err != nil { + return nil, 0, fmt.Errorf("failed to parse node %#v: %w", node, err) + } + + if node.Replicas == nil { + node.Replicas = pointer.Int64P(1) + } + rr := fn(node.Resources) + sr := fn(node.Storage.Resources) + rr[core.ResourceStorage] = *sr.Storage() + + return rr, *node.Replicas, nil +} diff --git a/vendor/kmodules.xyz/resource-metrics/kubedb.com/v1alpha2/redis.go b/vendor/kmodules.xyz/resource-metrics/kubedb.com/v1alpha2/redis.go index 4787e3c7..c3d65097 100644 --- a/vendor/kmodules.xyz/resource-metrics/kubedb.com/v1alpha2/redis.go +++ b/vendor/kmodules.xyz/resource-metrics/kubedb.com/v1alpha2/redis.go @@ -123,6 +123,9 @@ func (r Redis) roleResourceFn(fn func(rr core.ResourceRequirements) core.Resourc if err != nil { return nil, err } + // If spec.cluster.replicas = x + // That means in each shard there are (x+1) redis replicas + shardReplicas += 1 replicas = shards * shardReplicas } diff --git a/vendor/kmodules.xyz/resource-metrics/kubevault.com/v1alpha2/constant.go b/vendor/kmodules.xyz/resource-metrics/kubevault.com/v1alpha2/constant.go new file mode 100644 index 00000000..88f872cd --- /dev/null +++ b/vendor/kmodules.xyz/resource-metrics/kubevault.com/v1alpha2/constant.go @@ -0,0 +1,19 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +const VaultModeDefault = "Default" diff --git a/vendor/kmodules.xyz/resource-metrics/kubevault.com/v1alpha2/vaultserver.go b/vendor/kmodules.xyz/resource-metrics/kubevault.com/v1alpha2/vaultserver.go new file mode 100644 index 00000000..13e2c251 --- /dev/null +++ b/vendor/kmodules.xyz/resource-metrics/kubevault.com/v1alpha2/vaultserver.go @@ -0,0 +1,96 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + "fmt" + + "kmodules.xyz/resource-metrics/api" + + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func init() { + api.Register(schema.GroupVersionKind{ + Group: "kubevault.com", + Version: "v1alpha2", + Kind: "VaultServer", + }, VaultServer{}.ResourceCalculator()) +} + +type VaultServer struct{} + +func (r VaultServer) ResourceCalculator() api.ResourceCalculator { + return &api.ResourceCalculatorFuncs{ + AppRoles: []api.PodRole{api.PodRoleDefault}, + RuntimeRoles: []api.PodRole{api.PodRoleDefault, api.PodRoleExporter}, + RoleReplicasFn: r.roleReplicasFn, + ModeFn: r.modeFn, + UsesTLSFn: r.usesTLSFn, + RoleResourceLimitsFn: r.roleResourceFn(api.ResourceLimits), + RoleResourceRequestsFn: r.roleResourceFn(api.ResourceRequests), + } +} + +func (r VaultServer) roleReplicasFn(obj map[string]interface{}) (api.ReplicaList, error) { + replicas, found, err := unstructured.NestedInt64(obj, "spec", "replicas") + if err != nil { + return nil, fmt.Errorf("failed to read spec.replicas %v: %w", obj, err) + } + if !found { + return api.ReplicaList{api.PodRoleDefault: 1}, nil + } + return api.ReplicaList{api.PodRoleDefault: replicas}, nil +} + +func (r VaultServer) modeFn(obj map[string]interface{}) (string, error) { + return VaultModeDefault, nil +} + +func (r VaultServer) usesTLSFn(obj map[string]interface{}) (bool, error) { + _, found, err := unstructured.NestedFieldNoCopy(obj, "spec", "tls") + return found, err +} + +func (r VaultServer) roleResourceFn(fn func(rr core.ResourceRequirements) core.ResourceList) func(obj map[string]interface{}) (map[api.PodRole]core.ResourceList, error) { + return func(obj map[string]interface{}) (map[api.PodRole]core.ResourceList, error) { + exporter, err := api.ContainerResources(obj, fn, "spec", "monitor", "prometheus", "exporter") + if err != nil { + return nil, err + } + + container, replicas, err := api.AppNodeResources(obj, fn, "spec") + if err != nil { + return nil, err + } + + raftBackend, _, err := api.AppNodeResources(obj, fn, "spec", "backend", "raft") + if err != nil { + return nil, err + } + if raftBackend != nil { + container[core.ResourceStorage] = raftBackend[core.ResourceStorage] + } + + return map[api.PodRole]core.ResourceList{ + api.PodRoleDefault: api.MulResourceList(container, replicas), + api.PodRoleExporter: api.MulResourceList(exporter, replicas), + }, nil + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index ed296209..5affee7e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -23,18 +23,15 @@ github.com/Azure/go-autorest/logger # github.com/Azure/go-autorest/tracing v0.6.0 ## explicit; go 1.12 github.com/Azure/go-autorest/tracing -# github.com/Masterminds/semver/v3 v3.1.1 -## explicit; go 1.12 +# github.com/Masterminds/semver/v3 v3.2.1 +## explicit; go 1.18 github.com/Masterminds/semver/v3 # github.com/NYTimes/gziphandler v1.1.1 ## explicit; go 1.11 github.com/NYTimes/gziphandler -# github.com/PuerkitoBio/purell v1.1.1 -## explicit +# github.com/PuerkitoBio/purell v1.2.0 +## explicit; go 1.19 github.com/PuerkitoBio/purell -# github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 -## explicit -github.com/PuerkitoBio/urlesc # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile @@ -70,7 +67,7 @@ github.com/fatih/structs # github.com/felixge/httpsnoop v1.0.1 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/fsnotify/fsnotify v1.5.4 +# github.com/fsnotify/fsnotify v1.6.0 ## explicit; go 1.16 github.com/fsnotify/fsnotify # github.com/go-errors/errors v1.0.1 @@ -155,7 +152,7 @@ github.com/grpc-ecosystem/grpc-gateway/utilities # github.com/imdario/mergo v0.3.13 => github.com/imdario/mergo v0.3.6 ## explicit github.com/imdario/mergo -# github.com/inconshreveable/mousetrap v1.0.1 +# github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap # github.com/josharian/intern v1.0.0 @@ -229,7 +226,7 @@ github.com/robfig/cron/v3 # github.com/sergi/go-diff v1.2.0 ## explicit; go 1.12 github.com/sergi/go-diff/diffmatchpatch -# github.com/spf13/cobra v1.6.0 +# github.com/spf13/cobra v1.7.0 ## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 @@ -245,7 +242,7 @@ github.com/yudai/gojsondiff/formatter # github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 ## explicit github.com/yudai/golcs -# go.bytebuilders.dev/license-verifier v0.12.1 +# go.bytebuilders.dev/license-verifier v0.13.2 ## explicit; go 1.18 go.bytebuilders.dev/license-verifier/apis/licenses go.bytebuilders.dev/license-verifier/info @@ -363,7 +360,7 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.6.0 +# golang.org/x/crypto v0.9.0 ## explicit; go 1.17 golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 @@ -373,7 +370,7 @@ golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/salsa20/salsa -# golang.org/x/net v0.7.0 +# golang.org/x/net v0.10.0 ## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/http/httpguts @@ -395,17 +392,17 @@ golang.org/x/oauth2/jwt # golang.org/x/sync v0.1.0 ## explicit golang.org/x/sync/singleflight -# golang.org/x/sys v0.5.0 +# golang.org/x/sys v0.8.0 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.5.0 +# golang.org/x/term v0.8.0 ## explicit; go 1.17 golang.org/x/term -# golang.org/x/text v0.7.0 +# golang.org/x/text v0.9.0 ## explicit; go 1.17 golang.org/x/text/encoding golang.org/x/text/encoding/internal @@ -430,8 +427,8 @@ gomodules.xyz/encoding/json # gomodules.xyz/flags v0.1.3 ## explicit; go 1.16 gomodules.xyz/flags -# gomodules.xyz/jsonpatch/v2 v2.2.0 -## explicit; go 1.12 +# gomodules.xyz/jsonpatch/v2 v2.3.0 +## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 # gomodules.xyz/logs v0.0.6 ## explicit; go 1.16 @@ -445,8 +442,8 @@ gomodules.xyz/sets # gomodules.xyz/wait v0.2.0 ## explicit; go 1.14 gomodules.xyz/wait -# gomodules.xyz/x v0.0.14 -## explicit; go 1.17 +# gomodules.xyz/x v0.0.15 +## explicit; go 1.18 gomodules.xyz/x/version # google.golang.org/appengine v1.6.7 ## explicit; go 1.11 @@ -1177,7 +1174,7 @@ k8s.io/utils/trace kmodules.xyz/authorizer/rbac kmodules.xyz/authorizer/rbac/helpers kmodules.xyz/authorizer/rbac/validation -# kmodules.xyz/client-go v0.25.19 +# kmodules.xyz/client-go v0.25.23 ## explicit; go 1.18 kmodules.xyz/client-go kmodules.xyz/client-go/api/v1 @@ -1189,22 +1186,22 @@ kmodules.xyz/client-go/meta kmodules.xyz/client-go/openapi kmodules.xyz/client-go/tools/clientcmd kmodules.xyz/client-go/tools/clusterid -# kmodules.xyz/custom-resources v0.25.0 +# kmodules.xyz/custom-resources v0.25.2 ## explicit; go 1.18 kmodules.xyz/custom-resources/apis/appcatalog kmodules.xyz/custom-resources/apis/appcatalog/install kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1 kmodules.xyz/custom-resources/crds -# kmodules.xyz/objectstore-api v0.25.1-0.20221104003322-f0289b5b6ca2 +# kmodules.xyz/objectstore-api v0.25.1 ## explicit; go 1.18 kmodules.xyz/objectstore-api/api/v1 -# kmodules.xyz/offshoot-api v0.25.0 +# kmodules.xyz/offshoot-api v0.25.4 ## explicit; go 1.18 kmodules.xyz/offshoot-api/api/v1 # kmodules.xyz/prober v0.25.0 ## explicit; go 1.18 kmodules.xyz/prober/api/v1 -# kmodules.xyz/resource-metrics v0.25.0 +# kmodules.xyz/resource-metrics v0.25.2 ## explicit; go 1.18 kmodules.xyz/resource-metrics kmodules.xyz/resource-metrics/api @@ -1213,6 +1210,7 @@ kmodules.xyz/resource-metrics/batch/v1 kmodules.xyz/resource-metrics/batch/v1beta1 kmodules.xyz/resource-metrics/core/v1 kmodules.xyz/resource-metrics/kubedb.com/v1alpha2 +kmodules.xyz/resource-metrics/kubevault.com/v1alpha2 # sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.32 ## explicit; go 1.17 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client @@ -1340,7 +1338,7 @@ sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml -# stash.appscode.dev/apimachinery v0.29.0 +# stash.appscode.dev/apimachinery v0.30.0 ## explicit; go 1.18 stash.appscode.dev/apimachinery/apis/stash stash.appscode.dev/apimachinery/apis/stash/install diff --git a/vendor/stash.appscode.dev/apimachinery/apis/stash/v1alpha1/openapi_generated.go b/vendor/stash.appscode.dev/apimachinery/apis/stash/v1alpha1/openapi_generated.go index ee600202..dea554dc 100644 --- a/vendor/stash.appscode.dev/apimachinery/apis/stash/v1alpha1/openapi_generated.go +++ b/vendor/stash.appscode.dev/apimachinery/apis/stash/v1alpha1/openapi_generated.go @@ -383,11 +383,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/objectstore-api/api/v1.S3Spec": schema_kmodulesxyz_objectstore_api_api_v1_S3Spec(ref), "kmodules.xyz/objectstore-api/api/v1.SwiftSpec": schema_kmodulesxyz_objectstore_api_api_v1_SwiftSpec(ref), "kmodules.xyz/offshoot-api/api/v1.ContainerRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref), + "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref), "kmodules.xyz/offshoot-api/api/v1.IONiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref), "kmodules.xyz/offshoot-api/api/v1.NiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_NiceSettings(ref), "kmodules.xyz/offshoot-api/api/v1.ObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_ObjectMeta(ref), "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_PartialObjectMeta(ref), "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaim": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref), + "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref), "kmodules.xyz/offshoot-api/api/v1.PodRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref), "kmodules.xyz/offshoot-api/api/v1.PodSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref), "kmodules.xyz/offshoot-api/api/v1.PodTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodTemplateSpec(ref), @@ -395,6 +397,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/offshoot-api/api/v1.ServicePort": schema_kmodulesxyz_offshoot_api_api_v1_ServicePort(ref), "kmodules.xyz/offshoot-api/api/v1.ServiceSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceSpec(ref), "kmodules.xyz/offshoot-api/api/v1.ServiceTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref), + "kmodules.xyz/offshoot-api/api/v1.Volume": schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref), + "kmodules.xyz/offshoot-api/api/v1.VolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref), "kmodules.xyz/prober/api/v1.FormEntry": schema_kmodulesxyz_prober_api_v1_FormEntry(ref), "kmodules.xyz/prober/api/v1.HTTPPostAction": schema_kmodulesxyz_prober_api_v1_HTTPPostAction(ref), "kmodules.xyz/prober/api/v1.Handler": schema_kmodulesxyz_prober_api_v1_Handler(ref), @@ -17804,6 +17808,12 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. Format: "", }, }, + "appRef": { + SchemaProps: spec.SchemaProps{ + Description: "Reference to underlying application", + Ref: ref("kmodules.xyz/client-go/api/v1.TypedObjectReference"), + }, + }, "version": { SchemaProps: spec.SchemaProps{ Description: "Version used to facilitate programmatic handling of application.", @@ -17855,7 +17865,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, } } @@ -18798,6 +18808,27 @@ func schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref common. } } +func schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents an ephemeral volume that is handled by a normal storage driver.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "volumeClaimTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"}, + } +} + func schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -19025,6 +19056,36 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref common.Ref } } +func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"), + }, + }, + }, + Required: []string{"spec"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"}, + } +} + func schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -19228,84 +19289,88 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback SchemaProps: spec.SchemaProps{ Type: []string{"object"}, Properties: map[string]spec.Schema{ - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", - Type: []string{"string"}, - Format: "", + "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, }, - }, - "nodeSelector": { SchemaProps: spec.SchemaProps{ - Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, + Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/offshoot-api/api/v1.Volume"), }, }, }, }, }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's scheduling constraints", - Ref: ref("k8s.io/api/core/v1.Affinity"), - }, - }, - "schedulerName": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", - Type: []string{"string"}, - Format: "", + "initContainers": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, }, - }, - "tolerations": { SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's tolerations.", + Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Toleration"), + Ref: ref("k8s.io/api/core/v1.Container"), }, }, }, }, }, - "imagePullSecrets": { + "terminationGracePeriodSeconds": { SchemaProps: spec.SchemaProps{ - Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ + Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "dnsPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\nPossible enum values:\n - `\"ClusterFirst\"` indicates that the pod should use cluster DNS first unless hostNetwork is true, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"ClusterFirstWithHostNet\"` indicates that the pod should use cluster DNS first, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"Default\"` indicates that the pod should use the default (as determined by kubelet) DNS settings.\n - `\"None\"` indicates that the pod should use empty DNS settings. DNS parameters such as nameservers and search paths should be defined via DNSConfig.", + Type: []string{"string"}, + Format: "", + Enum: []interface{}{"ClusterFirst", "ClusterFirstWithHostNet", "Default", "None"}}, + }, + "nodeSelector": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, - "priorityClassName": { + "serviceAccountName": { SchemaProps: spec.SchemaProps{ - Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", Type: []string{"string"}, Format: "", }, }, - "priority": { - SchemaProps: spec.SchemaProps{ - Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", - Type: []string{"integer"}, - Format: "int32", - }, - }, "hostNetwork": { SchemaProps: spec.SchemaProps{ Description: "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", @@ -19340,86 +19405,107 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), }, }, - "terminationGracePeriodSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "dnsPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\nPossible enum values:\n - `\"ClusterFirst\"` indicates that the pod should use cluster DNS first unless hostNetwork is true, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"ClusterFirstWithHostNet\"` indicates that the pod should use cluster DNS first, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"Default\"` indicates that the pod should use the default (as determined by kubelet) DNS settings.\n - `\"None\"` indicates that the pod should use empty DNS settings. DNS parameters such as nameservers and search paths should be defined via DNSConfig.", - Type: []string{"string"}, - Format: "", - Enum: []interface{}{"ClusterFirst", "ClusterFirstWithHostNet", "Default", "None"}}, - }, - "dnsConfig": { - SchemaProps: spec.SchemaProps{ - Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", - Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), - }, - }, - "topologySpreadConstraints": { + "imagePullSecrets": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []interface{}{ - "topologyKey", - "whenUnsatisfiable", - }, - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "topologyKey", + "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge", }, }, SchemaProps: spec.SchemaProps{ - Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", + Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"), + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), }, }, }, }, }, - "volumes": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge,retainKeys", - }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's scheduling constraints", + Ref: ref("k8s.io/api/core/v1.Affinity"), + }, + }, + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", + Type: []string{"string"}, + Format: "", }, + }, + "tolerations": { SchemaProps: spec.SchemaProps{ - Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Description: "If specified, the pod's tolerations.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Volume"), + Ref: ref("k8s.io/api/core/v1.Toleration"), }, }, }, }, }, - "initContainers": { + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Type: []string{"string"}, + Format: "", + }, + }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "dnsConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), + }, + }, + "runtimeClassName": { + SchemaProps: spec.SchemaProps{ + Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", + Type: []string{"string"}, + Format: "", + }, + }, + "enableServiceLinks": { + SchemaProps: spec.SchemaProps{ + Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "topologySpreadConstraints": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-list-map-keys": []interface{}{ + "topologyKey", + "whenUnsatisfiable", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "topologyKey", "x-kubernetes-patch-strategy": "merge", }, }, SchemaProps: spec.SchemaProps{ - Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Container"), + Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"), }, }, }, @@ -19509,7 +19595,7 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback }, }, Dependencies: []string{ - "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.Volume", "k8s.io/api/core/v1.VolumeMount"}, + "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.VolumeMount", "kmodules.xyz/offshoot-api/api/v1.Volume"}, } } @@ -19744,6 +19830,381 @@ func schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref common.Refer } } +func schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Volume represents a named volume in a pod that may be accessed by any container in the pod.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "downwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "configMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "projected items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + "ephemeral": { + SchemaProps: spec.SchemaProps{ + Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"}, + } +} + +func schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents the source of a volume to mount. Only one of its members may be specified.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "downwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "configMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "projected items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + "ephemeral": { + SchemaProps: spec.SchemaProps{ + Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"}, + } +} + func schema_kmodulesxyz_prober_api_v1_FormEntry(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/stash.appscode.dev/apimachinery/apis/stash/v1beta1/backup_session_types.go b/vendor/stash.appscode.dev/apimachinery/apis/stash/v1beta1/backup_session_types.go index d6bd9467..e3e7d7b9 100644 --- a/vendor/stash.appscode.dev/apimachinery/apis/stash/v1beta1/backup_session_types.go +++ b/vendor/stash.appscode.dev/apimachinery/apis/stash/v1beta1/backup_session_types.go @@ -151,7 +151,7 @@ type HostBackupStats struct { // Snapshots specifies the stats of individual snapshots that has been taken for this host in current backup session // +optional Snapshots []SnapshotStats `json:"snapshots,omitempty"` - // Duration indicates total time taken to complete backup for this hosts + // Duration indicates total time taken to complete backup for this host // +optional Duration string `json:"duration,omitempty"` // Error indicates string value of error in case of backup failure diff --git a/vendor/stash.appscode.dev/apimachinery/apis/stash/v1beta1/openapi_generated.go b/vendor/stash.appscode.dev/apimachinery/apis/stash/v1beta1/openapi_generated.go index 2936d918..82831d33 100644 --- a/vendor/stash.appscode.dev/apimachinery/apis/stash/v1beta1/openapi_generated.go +++ b/vendor/stash.appscode.dev/apimachinery/apis/stash/v1beta1/openapi_generated.go @@ -383,11 +383,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/objectstore-api/api/v1.S3Spec": schema_kmodulesxyz_objectstore_api_api_v1_S3Spec(ref), "kmodules.xyz/objectstore-api/api/v1.SwiftSpec": schema_kmodulesxyz_objectstore_api_api_v1_SwiftSpec(ref), "kmodules.xyz/offshoot-api/api/v1.ContainerRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref), + "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref), "kmodules.xyz/offshoot-api/api/v1.IONiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref), "kmodules.xyz/offshoot-api/api/v1.NiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_NiceSettings(ref), "kmodules.xyz/offshoot-api/api/v1.ObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_ObjectMeta(ref), "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_PartialObjectMeta(ref), "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaim": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref), + "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref), "kmodules.xyz/offshoot-api/api/v1.PodRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref), "kmodules.xyz/offshoot-api/api/v1.PodSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref), "kmodules.xyz/offshoot-api/api/v1.PodTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodTemplateSpec(ref), @@ -395,6 +397,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/offshoot-api/api/v1.ServicePort": schema_kmodulesxyz_offshoot_api_api_v1_ServicePort(ref), "kmodules.xyz/offshoot-api/api/v1.ServiceSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceSpec(ref), "kmodules.xyz/offshoot-api/api/v1.ServiceTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref), + "kmodules.xyz/offshoot-api/api/v1.Volume": schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref), + "kmodules.xyz/offshoot-api/api/v1.VolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref), "kmodules.xyz/prober/api/v1.FormEntry": schema_kmodulesxyz_prober_api_v1_FormEntry(ref), "kmodules.xyz/prober/api/v1.HTTPPostAction": schema_kmodulesxyz_prober_api_v1_HTTPPostAction(ref), "kmodules.xyz/prober/api/v1.Handler": schema_kmodulesxyz_prober_api_v1_Handler(ref), @@ -17850,6 +17854,12 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. Format: "", }, }, + "appRef": { + SchemaProps: spec.SchemaProps{ + Description: "Reference to underlying application", + Ref: ref("kmodules.xyz/client-go/api/v1.TypedObjectReference"), + }, + }, "version": { SchemaProps: spec.SchemaProps{ Description: "Version used to facilitate programmatic handling of application.", @@ -17901,7 +17911,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, } } @@ -18844,6 +18854,27 @@ func schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref common. } } +func schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents an ephemeral volume that is handled by a normal storage driver.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "volumeClaimTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"}, + } +} + func schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -19071,6 +19102,36 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref common.Ref } } +func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"), + }, + }, + }, + Required: []string{"spec"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"}, + } +} + func schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -19274,84 +19335,88 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback SchemaProps: spec.SchemaProps{ Type: []string{"object"}, Properties: map[string]spec.Schema{ - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", - Type: []string{"string"}, - Format: "", + "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, }, - }, - "nodeSelector": { SchemaProps: spec.SchemaProps{ - Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, + Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/offshoot-api/api/v1.Volume"), }, }, }, }, }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's scheduling constraints", - Ref: ref("k8s.io/api/core/v1.Affinity"), - }, - }, - "schedulerName": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", - Type: []string{"string"}, - Format: "", + "initContainers": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, }, - }, - "tolerations": { SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's tolerations.", + Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Toleration"), + Ref: ref("k8s.io/api/core/v1.Container"), }, }, }, }, }, - "imagePullSecrets": { + "terminationGracePeriodSeconds": { SchemaProps: spec.SchemaProps{ - Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ + Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "dnsPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\nPossible enum values:\n - `\"ClusterFirst\"` indicates that the pod should use cluster DNS first unless hostNetwork is true, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"ClusterFirstWithHostNet\"` indicates that the pod should use cluster DNS first, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"Default\"` indicates that the pod should use the default (as determined by kubelet) DNS settings.\n - `\"None\"` indicates that the pod should use empty DNS settings. DNS parameters such as nameservers and search paths should be defined via DNSConfig.", + Type: []string{"string"}, + Format: "", + Enum: []interface{}{"ClusterFirst", "ClusterFirstWithHostNet", "Default", "None"}}, + }, + "nodeSelector": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, - "priorityClassName": { + "serviceAccountName": { SchemaProps: spec.SchemaProps{ - Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", Type: []string{"string"}, Format: "", }, }, - "priority": { - SchemaProps: spec.SchemaProps{ - Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", - Type: []string{"integer"}, - Format: "int32", - }, - }, "hostNetwork": { SchemaProps: spec.SchemaProps{ Description: "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", @@ -19386,86 +19451,107 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), }, }, - "terminationGracePeriodSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "dnsPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\nPossible enum values:\n - `\"ClusterFirst\"` indicates that the pod should use cluster DNS first unless hostNetwork is true, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"ClusterFirstWithHostNet\"` indicates that the pod should use cluster DNS first, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"Default\"` indicates that the pod should use the default (as determined by kubelet) DNS settings.\n - `\"None\"` indicates that the pod should use empty DNS settings. DNS parameters such as nameservers and search paths should be defined via DNSConfig.", - Type: []string{"string"}, - Format: "", - Enum: []interface{}{"ClusterFirst", "ClusterFirstWithHostNet", "Default", "None"}}, - }, - "dnsConfig": { - SchemaProps: spec.SchemaProps{ - Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", - Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), - }, - }, - "topologySpreadConstraints": { + "imagePullSecrets": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []interface{}{ - "topologyKey", - "whenUnsatisfiable", - }, - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "topologyKey", + "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge", }, }, SchemaProps: spec.SchemaProps{ - Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", + Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"), + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), }, }, }, }, }, - "volumes": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge,retainKeys", - }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's scheduling constraints", + Ref: ref("k8s.io/api/core/v1.Affinity"), + }, + }, + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", + Type: []string{"string"}, + Format: "", }, + }, + "tolerations": { SchemaProps: spec.SchemaProps{ - Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Description: "If specified, the pod's tolerations.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Volume"), + Ref: ref("k8s.io/api/core/v1.Toleration"), }, }, }, }, }, - "initContainers": { + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Type: []string{"string"}, + Format: "", + }, + }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "dnsConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), + }, + }, + "runtimeClassName": { + SchemaProps: spec.SchemaProps{ + Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", + Type: []string{"string"}, + Format: "", + }, + }, + "enableServiceLinks": { + SchemaProps: spec.SchemaProps{ + Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "topologySpreadConstraints": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-list-map-keys": []interface{}{ + "topologyKey", + "whenUnsatisfiable", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "topologyKey", "x-kubernetes-patch-strategy": "merge", }, }, SchemaProps: spec.SchemaProps{ - Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Container"), + Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"), }, }, }, @@ -19555,7 +19641,7 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback }, }, Dependencies: []string{ - "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.Volume", "k8s.io/api/core/v1.VolumeMount"}, + "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.VolumeMount", "kmodules.xyz/offshoot-api/api/v1.Volume"}, } } @@ -19790,6 +19876,381 @@ func schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref common.Refer } } +func schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Volume represents a named volume in a pod that may be accessed by any container in the pod.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "downwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "configMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "projected items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + "ephemeral": { + SchemaProps: spec.SchemaProps{ + Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"}, + } +} + +func schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents the source of a volume to mount. Only one of its members may be specified.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "downwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "configMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "projected items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + "ephemeral": { + SchemaProps: spec.SchemaProps{ + Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"}, + } +} + func schema_kmodulesxyz_prober_api_v1_FormEntry(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -21512,7 +21973,7 @@ func schema_apimachinery_apis_stash_v1beta1_HostBackupStats(ref common.Reference }, "duration": { SchemaProps: spec.SchemaProps{ - Description: "Duration indicates total time taken to complete backup for this hosts", + Description: "Duration indicates total time taken to complete backup for this host", Type: []string{"string"}, Format: "", }, diff --git a/vendor/stash.appscode.dev/apimachinery/apis/ui/v1alpha1/openapi_generated.go b/vendor/stash.appscode.dev/apimachinery/apis/ui/v1alpha1/openapi_generated.go index 4fe9881c..df9f12f5 100644 --- a/vendor/stash.appscode.dev/apimachinery/apis/ui/v1alpha1/openapi_generated.go +++ b/vendor/stash.appscode.dev/apimachinery/apis/ui/v1alpha1/openapi_generated.go @@ -383,11 +383,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/objectstore-api/api/v1.S3Spec": schema_kmodulesxyz_objectstore_api_api_v1_S3Spec(ref), "kmodules.xyz/objectstore-api/api/v1.SwiftSpec": schema_kmodulesxyz_objectstore_api_api_v1_SwiftSpec(ref), "kmodules.xyz/offshoot-api/api/v1.ContainerRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref), + "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref), "kmodules.xyz/offshoot-api/api/v1.IONiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref), "kmodules.xyz/offshoot-api/api/v1.NiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_NiceSettings(ref), "kmodules.xyz/offshoot-api/api/v1.ObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_ObjectMeta(ref), "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_PartialObjectMeta(ref), "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaim": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref), + "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref), "kmodules.xyz/offshoot-api/api/v1.PodRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref), "kmodules.xyz/offshoot-api/api/v1.PodSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref), "kmodules.xyz/offshoot-api/api/v1.PodTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodTemplateSpec(ref), @@ -395,6 +397,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/offshoot-api/api/v1.ServicePort": schema_kmodulesxyz_offshoot_api_api_v1_ServicePort(ref), "kmodules.xyz/offshoot-api/api/v1.ServiceSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceSpec(ref), "kmodules.xyz/offshoot-api/api/v1.ServiceTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref), + "kmodules.xyz/offshoot-api/api/v1.Volume": schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref), + "kmodules.xyz/offshoot-api/api/v1.VolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref), "kmodules.xyz/prober/api/v1.FormEntry": schema_kmodulesxyz_prober_api_v1_FormEntry(ref), "kmodules.xyz/prober/api/v1.HTTPPostAction": schema_kmodulesxyz_prober_api_v1_HTTPPostAction(ref), "kmodules.xyz/prober/api/v1.Handler": schema_kmodulesxyz_prober_api_v1_Handler(ref), @@ -17799,6 +17803,12 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. Format: "", }, }, + "appRef": { + SchemaProps: spec.SchemaProps{ + Description: "Reference to underlying application", + Ref: ref("kmodules.xyz/client-go/api/v1.TypedObjectReference"), + }, + }, "version": { SchemaProps: spec.SchemaProps{ Description: "Version used to facilitate programmatic handling of application.", @@ -17850,7 +17860,7 @@ func schema_custom_resources_apis_appcatalog_v1alpha1_AppBindingSpec(ref common. }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/client-go/api/v1.TypedObjectReference", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.ClientConfig", "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1.SecretTransform"}, } } @@ -18793,6 +18803,27 @@ func schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref common. } } +func schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents an ephemeral volume that is handled by a normal storage driver.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "volumeClaimTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"}, + } +} + func schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -19020,6 +19051,36 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref common.Ref } } +func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"), + }, + }, + }, + Required: []string{"spec"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"}, + } +} + func schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -19223,84 +19284,88 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback SchemaProps: spec.SchemaProps{ Type: []string{"object"}, Properties: map[string]spec.Schema{ - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", - Type: []string{"string"}, - Format: "", + "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, }, - }, - "nodeSelector": { SchemaProps: spec.SchemaProps{ - Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, + Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/offshoot-api/api/v1.Volume"), }, }, }, }, }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's scheduling constraints", - Ref: ref("k8s.io/api/core/v1.Affinity"), - }, - }, - "schedulerName": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", - Type: []string{"string"}, - Format: "", + "initContainers": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, }, - }, - "tolerations": { SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's tolerations.", + Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Toleration"), + Ref: ref("k8s.io/api/core/v1.Container"), }, }, }, }, }, - "imagePullSecrets": { + "terminationGracePeriodSeconds": { SchemaProps: spec.SchemaProps{ - Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ + Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "dnsPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\nPossible enum values:\n - `\"ClusterFirst\"` indicates that the pod should use cluster DNS first unless hostNetwork is true, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"ClusterFirstWithHostNet\"` indicates that the pod should use cluster DNS first, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"Default\"` indicates that the pod should use the default (as determined by kubelet) DNS settings.\n - `\"None\"` indicates that the pod should use empty DNS settings. DNS parameters such as nameservers and search paths should be defined via DNSConfig.", + Type: []string{"string"}, + Format: "", + Enum: []interface{}{"ClusterFirst", "ClusterFirstWithHostNet", "Default", "None"}}, + }, + "nodeSelector": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-map-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, - "priorityClassName": { + "serviceAccountName": { SchemaProps: spec.SchemaProps{ - Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", Type: []string{"string"}, Format: "", }, }, - "priority": { - SchemaProps: spec.SchemaProps{ - Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", - Type: []string{"integer"}, - Format: "int32", - }, - }, "hostNetwork": { SchemaProps: spec.SchemaProps{ Description: "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", @@ -19335,86 +19400,107 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), }, }, - "terminationGracePeriodSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "dnsPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\nPossible enum values:\n - `\"ClusterFirst\"` indicates that the pod should use cluster DNS first unless hostNetwork is true, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"ClusterFirstWithHostNet\"` indicates that the pod should use cluster DNS first, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"Default\"` indicates that the pod should use the default (as determined by kubelet) DNS settings.\n - `\"None\"` indicates that the pod should use empty DNS settings. DNS parameters such as nameservers and search paths should be defined via DNSConfig.", - Type: []string{"string"}, - Format: "", - Enum: []interface{}{"ClusterFirst", "ClusterFirstWithHostNet", "Default", "None"}}, - }, - "dnsConfig": { - SchemaProps: spec.SchemaProps{ - Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", - Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), - }, - }, - "topologySpreadConstraints": { + "imagePullSecrets": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []interface{}{ - "topologyKey", - "whenUnsatisfiable", - }, - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "topologyKey", + "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge", }, }, SchemaProps: spec.SchemaProps{ - Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", + Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"), + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), }, }, }, }, }, - "volumes": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge,retainKeys", - }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's scheduling constraints", + Ref: ref("k8s.io/api/core/v1.Affinity"), + }, + }, + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", + Type: []string{"string"}, + Format: "", }, + }, + "tolerations": { SchemaProps: spec.SchemaProps{ - Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Description: "If specified, the pod's tolerations.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Volume"), + Ref: ref("k8s.io/api/core/v1.Toleration"), }, }, }, }, }, - "initContainers": { + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Type: []string{"string"}, + Format: "", + }, + }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "dnsConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), + }, + }, + "runtimeClassName": { + SchemaProps: spec.SchemaProps{ + Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", + Type: []string{"string"}, + Format: "", + }, + }, + "enableServiceLinks": { + SchemaProps: spec.SchemaProps{ + Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "topologySpreadConstraints": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-list-map-keys": []interface{}{ + "topologyKey", + "whenUnsatisfiable", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "topologyKey", "x-kubernetes-patch-strategy": "merge", }, }, SchemaProps: spec.SchemaProps{ - Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Container"), + Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"), }, }, }, @@ -19504,7 +19590,7 @@ func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback }, }, Dependencies: []string{ - "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.Volume", "k8s.io/api/core/v1.VolumeMount"}, + "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.VolumeMount", "kmodules.xyz/offshoot-api/api/v1.Volume"}, } } @@ -19739,6 +19825,381 @@ func schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref common.Refer } } +func schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Volume represents a named volume in a pod that may be accessed by any container in the pod.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "downwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "configMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "projected items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + "ephemeral": { + SchemaProps: spec.SchemaProps{ + Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"}, + } +} + +func schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents the source of a volume to mount. Only one of its members may be specified.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "downwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "configMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "projected items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + "ephemeral": { + SchemaProps: spec.SchemaProps{ + Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"}, + } +} + func schema_kmodulesxyz_prober_api_v1_FormEntry(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/stash.appscode.dev/apimachinery/crds/stash.appscode.com_backupsessions.yaml b/vendor/stash.appscode.dev/apimachinery/crds/stash.appscode.com_backupsessions.yaml index 3f67a791..51bfa652 100644 --- a/vendor/stash.appscode.dev/apimachinery/crds/stash.appscode.com_backupsessions.yaml +++ b/vendor/stash.appscode.dev/apimachinery/crds/stash.appscode.com_backupsessions.yaml @@ -252,7 +252,7 @@ spec: properties: duration: description: Duration indicates total time taken to complete - backup for this hosts + backup for this host type: string error: description: Error indicates string value of error in